diff options
Diffstat (limited to 'drivers')
774 files changed, 13089 insertions, 8723 deletions
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 8638f43cfc3d..79d86da1c892 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) } } + if (obj_desc->common.type == ACPI_TYPE_REGION) { + acpi_ut_remove_address_range(obj_desc->region.space_id, node); + } + /* Clear the Node entry in all cases */ node->object = NULL; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 6ecbbabf1233..eec263c9019e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1043,9 +1043,6 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; - /* Initialize debug output. Linux does not use ACPICA defaults */ - acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; - #ifdef CONFIG_X86 /* * If the machine falls into the DMI check table, diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 1b207fca1420..d4244e7d0e38 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) cpc_read(cpunum, nominal_reg, &nom); perf_caps->nominal_perf = nom; - cpc_read(cpunum, guaranteed_reg, &guaranteed); - perf_caps->guaranteed_perf = guaranteed; + if (guaranteed_reg->type != ACPI_TYPE_BUFFER || + IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { + perf_caps->guaranteed_perf = 0; + } else { + cpc_read(cpunum, guaranteed_reg, &guaranteed); + perf_caps->guaranteed_perf = guaranteed; + } cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); perf_caps->lowest_nonlinear_perf = min_nonlinear; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 5a389a4f4f65..f1ed0befe303 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, goto out; } + dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, + cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, + out_obj->buffer.pointer, + min_t(u32, 128, out_obj->buffer.length), true); + if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, @@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return 0; } - dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, - cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, - out_obj->buffer.pointer, - min_t(u32, 128, out_obj->buffer.length), true); - for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer, diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index f70de71f79d6..cddd0fcf622c 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; - if (old_data) - memcpy(nd_cmd.cmd.old_pass, old_data->data, - sizeof(nd_cmd.cmd.old_pass)); + memcpy(nd_cmd.cmd.old_pass, old_data->data, + sizeof(nd_cmd.cmd.old_pass)); memcpy(nd_cmd.cmd.new_pass, new_data->data, sizeof(nd_cmd.cmd.new_pass)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, /* flush all cache before we erase DIMM */ nvdimm_invalidate_cache(); - if (nkey) - memcpy(nd_cmd.cmd.passphrase, nkey->data, - sizeof(nd_cmd.cmd.passphrase)); + memcpy(nd_cmd.cmd.passphrase, nkey->data, + sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 78db97687f26..c4b06cc075f9 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) match.hrv = hrv; dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + put_device(dev); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8685882da64c..4b9c7ca492e6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) + if (offset > buffer->data_size || read_size < sizeof(*hdr) || + !IS_ALIGNED(offset, sizeof(u32))) return 0; binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, offset, read_size); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 6389467670a0..195f120c4e8c 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + + mm = alloc->vma_vm_mm; + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; vma = binder_alloc_get_vma(alloc); - if (vma) { - if (!mmget_not_zero(alloc->vma_vm_mm)) - goto err_mmget; - mm = alloc->vma_vm_mm; - if (!down_read_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; - } list_lru_isolate(lru, item); spin_unlock(lock); @@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, zap_page_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); - - up_read(&mm->mmap_sem); - mmput(mm); } + up_write(&mm->mmap_sem); + mmput(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index b3ed8f9953a8..173e6f2dd9af 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev) /* Per the spec, only slot type and drawer type ODD can be supported */ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) { - char buf[16]; + char *buf; unsigned int ret; - struct rm_feature_desc *desc = (void *)(buf + 8); + struct rm_feature_desc *desc; struct ata_taskfile tf; static const char cdb[] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ - 0, sizeof(buf), + 0, 16, 0, 0, 0, }; + buf = kzalloc(16, GFP_KERNEL); + if (!buf) + return ODD_MECH_TYPE_UNSUPPORTED; + desc = (void *)(buf + 8); + ata_tf_init(dev, &tf); tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; tf.protocol = ATAPI_PROT_PIO; - tf.lbam = sizeof(buf); + tf.lbam = 16; ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, - buf, sizeof(buf), 0); - if (ret) + buf, 16, 0); + if (ret) { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } - if (be16_to_cpu(desc->feature_code) != 3) + if (be16_to_cpu(desc->feature_code) != 3) { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } - if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) + if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) { + kfree(buf); return ODD_MECH_TYPE_SLOT; - else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) + } else if (desc->mech_type == 1 && desc->load == 0 && + desc->eject == 1) { + kfree(buf); return ODD_MECH_TYPE_DRAWER; - else + } else { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } } /* Test if ODD is zero power ready by sense code */ diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 11e1663bdc4d..b2c06da4f62e 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id) } if (status & ISR_TBRQ_W) { - fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n"); + fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n"); process_txdone_queue (dev, &dev->tx_relq); } diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 57410f9c5d44..c52c738e554a 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -164,9 +164,7 @@ config ARM_CHARLCD line and the Linux version on the second line, but that's still useful. -endif # AUXDISPLAY - -menuconfig PANEL +menuconfig PARPORT_PANEL tristate "Parallel port LCD/Keypad Panel support" depends on PARPORT select CHARLCD @@ -178,7 +176,7 @@ menuconfig PANEL compiled as a module, or linked into the kernel and started at boot. If you don't understand what all this is about, say N. -if PANEL +if PARPORT_PANEL config PANEL_PARPORT int "Default parallel port number (0=LPT1)" @@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL Default for the 'BL' pin in custom profile is '0' (uncontrolled). +endif # PARPORT_PANEL + config PANEL_CHANGE_MESSAGE bool "Change LCD initialization message ?" + depends on CHARLCD default "n" ---help--- This allows you to replace the boot message indicating the kernel version @@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE An empty message will only clear the display at driver init time. Any other printf()-formatted message is valid with newline and escape codes. -endif # PANEL +choice + prompt "Backlight initial state" + default CHARLCD_BL_FLASH + + config CHARLCD_BL_OFF + bool "Off" + help + Backlight is initially turned off + + config CHARLCD_BL_ON + bool "On" + help + Backlight is initially turned on + + config CHARLCD_BL_FLASH + bool "Flash" + help + Backlight is flashed briefly on init + +endchoice + +endif # AUXDISPLAY + +config PANEL + tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)" + depends on PARPORT + select AUXDISPLAY + select PARPORT_PANEL config CHARLCD tristate "Character LCD core support" if COMPILE_TEST diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile index 7ac6776ca3f6..cf54b5efb07e 100644 --- a/drivers/auxdisplay/Makefile +++ b/drivers/auxdisplay/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o obj-$(CONFIG_HD44780) += hd44780.o obj-$(CONFIG_HT16K33) += ht16k33.o -obj-$(CONFIG_PANEL) += panel.o +obj-$(CONFIG_PARPORT_PANEL) += panel.o diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 60e0b772673f..92745efefb54 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -91,7 +91,7 @@ struct charlcd_priv { unsigned long long drvdata[0]; }; -#define to_priv(p) container_of(p, struct charlcd_priv, lcd) +#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd) /* Device single-open policy control */ static atomic_t charlcd_available = ATOMIC_INIT(1); @@ -105,7 +105,7 @@ static void long_sleep(int ms) /* turn the backlight on or off */ static void charlcd_backlight(struct charlcd *lcd, int on) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; @@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work) /* turn the backlight on for a little while */ void charlcd_poke(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; @@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke); static void charlcd_gotoxy(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); unsigned int addr; /* @@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd) static void charlcd_home(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); priv->addr.x = 0; priv->addr.y = 0; @@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd) static void charlcd_print(struct charlcd *lcd, char c) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); if (priv->addr.x < lcd->bwidth) { if (lcd->char_conv) @@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd) /* clears the display and resets X/Y */ static void charlcd_clear_display(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR); priv->addr.x = 0; @@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd) static int charlcd_init_display(struct charlcd *lcd) { void (*write_cmd_raw)(struct charlcd *lcd, int cmd); - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); u8 init; if (lcd->ifwidth != 4 && lcd->ifwidth != 8) @@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y) static inline int handle_lcd_special_code(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); /* LCD special codes */ @@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd) static void charlcd_write_char(struct charlcd *lcd, char c) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); /* first, we'll test if we're in escape mode */ if ((c != '\n') && priv->esc_seq.len >= 0) { @@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, static int charlcd_open(struct inode *inode, struct file *file) { - struct charlcd_priv *priv = to_priv(the_charlcd); + struct charlcd_priv *priv = charlcd_to_priv(the_charlcd); int ret; ret = -EBUSY; @@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s) } } +#ifdef CONFIG_PANEL_BOOT_MESSAGE +#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE +#else +#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n" +#endif + +#ifdef CONFIG_CHARLCD_BL_ON +#define LCD_INIT_BL "\x1b[L+" +#elif defined(CONFIG_CHARLCD_BL_FLASH) +#define LCD_INIT_BL "\x1b[L*" +#else +#define LCD_INIT_BL "\x1b[L-" +#endif + /* initialize the LCD driver */ static int charlcd_init(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); int ret; if (lcd->ops->backlight) { @@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd) return ret; /* display a short message */ -#ifdef CONFIG_PANEL_CHANGE_MESSAGE -#ifdef CONFIG_PANEL_BOOT_MESSAGE - charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE); -#endif -#else - charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n"); -#endif + charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT); + /* clear the display on the next device opening */ priv->must_clear = true; charlcd_home(lcd); @@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size) } EXPORT_SYMBOL_GPL(charlcd_alloc); +void charlcd_free(struct charlcd *lcd) +{ + kfree(charlcd_to_priv(lcd)); +} +EXPORT_SYMBOL_GPL(charlcd_free); + static int panel_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { @@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register); int charlcd_unregister(struct charlcd *lcd) { - struct charlcd_priv *priv = to_priv(lcd); + struct charlcd_priv *priv = charlcd_to_priv(lcd); unregister_reboot_notifier(&panel_notifier); charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-"); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index 9ad93ea42fdc..ab15b64707ad 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev) return 0; fail: - kfree(lcd); + charlcd_free(lcd); return ret; } @@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev) struct charlcd *lcd = platform_get_drvdata(pdev); charlcd_unregister(lcd); + + charlcd_free(lcd); return 0; } diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 21b9b2f2470a..e06de63497cf 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1620,7 +1620,7 @@ err_lcd_unreg: if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; parport_unregister_device(pprt); pprt = NULL; @@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port) if (lcd.enabled) { charlcd_unregister(lcd.charlcd); lcd.initialized = false; - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index cb8347500ce2..e49028a60429 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr, ret = lock_device_hotplug_sysfs(); if (ret) - goto out; + return ret; nid = memory_add_physaddr_to_nid(phys_addr); ret = __add_memory(nid, phys_addr, diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 76c9969b7124..96a6dc9d305c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_lock(genpd); - ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; + genpd_lock(genpd); + dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; @@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - out: genpd_unlock(genpd); - + out: if (ret) genpd_free_dev_data(dev, gpd_data); else @@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; - if (genpd->detach_dev) - genpd->detach_dev(genpd, dev); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + genpd_free_dev_data(dev, gpd_data); return 0; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 1fad9291f6aa..7fc5a18e02ad 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode, val, nval); } -struct fwnode_handle * +static struct fwnode_handle * software_node_get_parent(const struct fwnode_handle *fwnode) { struct software_node *swnode = to_software_node(fwnode); @@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode) NULL; } -struct fwnode_handle * +static struct fwnode_handle * software_node_get_next_child(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1e6edd568214..bf1c61cab8eb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) return -EBADF; l = f->f_mapping->host->i_bdev->bd_disk->private_data; - if (l->lo_state == Lo_unbound) { + if (l->lo_state != Lo_bound) { return -EINVAL; } f = l->lo_backing_file; diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 417a9f15c116..d7ac09c092f2 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1748,6 +1748,11 @@ static int __init null_init(void) return -EINVAL; } + if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { + pr_err("null_blk: invalid home_node value\n"); + g_home_node = NUMA_NO_NODE; + } + if (g_queue_mode == NULL_Q_RQ) { pr_err("null_blk: legacy IO path no longer available\n"); return -EINVAL; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 96670eefaeb2..6d415b20fb70 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -314,6 +314,7 @@ static void pcd_init_units(void) disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, 1, BLK_MQ_F_SHOULD_MERGE); if (IS_ERR(disk->queue)) { + put_disk(disk); disk->queue = NULL; continue; } @@ -749,8 +750,14 @@ static int pcd_detect(void) return 0; printk("%s: No CD-ROM drive found\n", name); - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; + blk_cleanup_queue(cd->disk->queue); + cd->disk->queue = NULL; + blk_mq_free_tag_set(&cd->tag_set); put_disk(cd->disk); + } pi_unregister_driver(par_drv); return -1; } @@ -1006,8 +1013,14 @@ static int __init pcd_init(void) pcd_probe_capabilities(); if (register_blkdev(major, name)) { - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; + + blk_cleanup_queue(cd->disk->queue); + blk_mq_free_tag_set(&cd->tag_set); put_disk(cd->disk); + } return -EBUSY; } @@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void) int unit; for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; + if (cd->present) { del_gendisk(cd->disk); pi_release(cd->pi); diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index e92e7a8eeeb2..35e6e271b219 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -761,8 +761,14 @@ static int pf_detect(void) return 0; printk("%s: No ATAPI disk detected\n", name); - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->disk) + continue; + blk_cleanup_queue(pf->disk->queue); + pf->disk->queue = NULL; + blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); + } pi_unregister_driver(par_drv); return -1; } @@ -1025,8 +1031,13 @@ static int __init pf_init(void) pf_busy = 0; if (register_blkdev(major, name)) { - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->disk) + continue; + blk_cleanup_queue(pf->disk->queue); + blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); + } return -EBUSY; } @@ -1047,13 +1058,18 @@ static void __exit pf_exit(void) int unit; unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { - if (!pf->present) + if (!pf->disk) continue; - del_gendisk(pf->disk); + + if (pf->present) + del_gendisk(pf->disk); + blk_cleanup_queue(pf->disk->queue); blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); - pi_release(pf->pi); + + if (pf->present) + pi_release(pf->pi); } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4ba967d65cf9..2210c1b9491b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private) pctx->opts->queue_depth = intval; break; case Opt_alloc_size: - if (intval < 1) { + if (intval < SECTOR_SIZE) { pr_err("alloc_size out of range\n"); return -EINVAL; } @@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc) kref_put(&rbdc->kref, rbd_client_release); } -static int wait_for_latest_osdmap(struct ceph_client *client) -{ - u64 newest_epoch; - int ret; - - ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch); - if (ret) - return ret; - - if (client->osdc.osdmap->epoch >= newest_epoch) - return 0; - - ceph_osdc_maybe_request_map(&client->osdc); - return ceph_monc_wait_osdmap(&client->monc, newest_epoch, - client->options->mount_timeout); -} - /* * Get a ceph client with specific addr and configuration, if one does * not exist create it. Either way, ceph_opts is consumed by this @@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) * Using an existing client. Make sure ->pg_pools is up to * date before we look up the pool id in do_rbd_add(). */ - ret = wait_for_latest_osdmap(rbdc->client); + ret = ceph_wait_for_latest_osdmap(rbdc->client, + rbdc->client->options->mount_timeout); if (ret) { rbd_warn(NULL, "failed to get latest osdmap: %d", ret); rbd_put_client(rbdc); @@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, objset_bytes); - blk_queue_io_opt(q, objset_bytes); + blk_queue_io_min(q, rbd_dev->opts->alloc_size); + blk_queue_io_opt(q, rbd_dev->opts->alloc_size); if (rbd_dev->opts->trim) { blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = objset_bytes; + q->limits.discard_granularity = rbd_dev->opts->alloc_size; blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4bc083b7c9b5..2a7ca4a1e6f7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk) if (err) num_vqs = 1; + num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); + vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); if (!vblk->vqs) return -ENOMEM; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 87ccef4bd69e..32a21b8d1d85 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace) return 0; err_read: + /* prevent double queue cleanup */ + ace->gd->queue = NULL; put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e7a5f1d1c314..d58a359a6622 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev, struct zram *zram = dev_to_zram(dev); unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; int index; - char mode_buf[8]; - ssize_t sz; - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); - if (sz <= 0) - return -EINVAL; - - /* ignore trailing new line */ - if (mode_buf[sz - 1] == '\n') - mode_buf[sz - 1] = 0x00; - - if (strcmp(mode_buf, "all")) + if (!sysfs_streq(buf, "all")) return -EINVAL; down_read(&zram->init_lock); @@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev, struct bio bio; struct bio_vec bio_vec; struct page *page; - ssize_t ret, sz; - char mode_buf[8]; - int mode = -1; + ssize_t ret; + int mode; unsigned long blk_idx = 0; - sz = strscpy(mode_buf, buf, sizeof(mode_buf)); - if (sz <= 0) - return -EINVAL; - - /* ignore trailing newline */ - if (mode_buf[sz - 1] == '\n') - mode_buf[sz - 1] = 0x00; - - if (!strcmp(mode_buf, "idle")) + if (sysfs_streq(buf, "idle")) mode = IDLE_WRITEBACK; - else if (!strcmp(mode_buf, "huge")) + else if (sysfs_streq(buf, "huge")) mode = HUGE_WRITEBACK; - - if (mode == -1) + else return -EINVAL; down_read(&zram->init_lock); @@ -794,18 +774,18 @@ struct zram_work { struct zram *zram; unsigned long entry; struct bio *bio; + struct bio_vec bvec; }; #if PAGE_SIZE != 4096 static void zram_sync_read(struct work_struct *work) { - struct bio_vec bvec; struct zram_work *zw = container_of(work, struct zram_work, work); struct zram *zram = zw->zram; unsigned long entry = zw->entry; struct bio *bio = zw->bio; - read_from_bdev_async(zram, &bvec, entry, bio); + read_from_bdev_async(zram, &zw->bvec, entry, bio); } /* @@ -818,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, { struct zram_work work; + work.bvec = *bvec; work.zram = zram; work.entry = entry; work.bio = bio; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ded198328f21..7db48ae65cd2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) return 0; } + irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, 0, "OOB Wake-on-BT", data); if (ret) { @@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) } data->oob_wake_irq = irq; - disable_irq(irq); bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); return 0; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 72866a004f07..466ebd84ad17 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -348,7 +348,7 @@ config XILINX_HWICAP config R3964 tristate "Siemens R3964 line discipline" - depends on TTY + depends on TTY && BROKEN ---help--- This driver allows synchronous communication with devices using the Siemens R3964 packet protocol. Unless you are dealing with special diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index ff0b199be472..f2411468f33f 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, return; } - memset(&p, 0, sizeof(p)); p.addr = base_addr; p.space = space; p.regspacing = offset; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e8ba67834746..00bf4b17edbf 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -214,6 +214,9 @@ struct ipmi_user { /* Does this interface receive IPMI events? */ bool gets_events; + + /* Free must run in process context for RCU cleanup. */ + struct work_struct remove_work; }; static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) @@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } +static void free_user_work(struct work_struct *work) +{ + struct ipmi_user *user = container_of(work, struct ipmi_user, + remove_work); + + cleanup_srcu_struct(&user->release_barrier); + kfree(user); +} + int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, @@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num, goto out_kfree; found: + INIT_WORK(&new_user->remove_work, free_user_work); + rv = init_srcu_struct(&new_user->release_barrier); if (rv) goto out_kfree; @@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - cleanup_srcu_struct(&user->release_barrier); - kfree(user); + + /* SRCU cleanup must happen in task context. */ + schedule_work(&user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 01946cad3d13..682221eebd66 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c @@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void) char *str; char *si_type[SI_MAX_PARMS]; + memset(si_type, 0, sizeof(si_type)); + /* Parse out the si_type string into its components. */ str = si_type_str; if (*str != '\0') { diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index d8b77133a83a..f824563fc28d 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -37,8 +37,8 @@ * * Returns size of the event. If it is an invalid event, returns 0. */ -static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event, - struct tcg_pcr_event *event_header) +static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, + struct tcg_pcr_event *event_header) { struct tcg_efi_specid_event_head *efispecid; struct tcg_event_field *event_field; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 8856cce5a23b..817ae09a369e 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) __poll_t mask = 0; poll_wait(file, &priv->async_wait, wait); + mutex_lock(&priv->buffer_mutex); - if (!priv->response_read || priv->response_length) + /* + * The response_length indicates if there is still response + * (or part of it) to be consumed. Partial reads decrease it + * by the number of bytes read, and write resets it the zero. + */ + if (priv->response_length) mask = EPOLLIN | EPOLLRDNORM; else mask = EPOLLOUT | EPOLLWRNORM; + mutex_unlock(&priv->buffer_mutex); return mask; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 83ece5639f86..ae1030c9b086 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev) if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) return 0; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + if (!tpm_chip_start(chip)) { + if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); - tpm_chip_stop(chip); - } - mutex_unlock(&chip->tpm_mutex); - } else { - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + else + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + + tpm_chip_stop(chip); } return rc; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index e705aab9e38b..03854a9d6f5e 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -219,6 +219,13 @@ config COMMON_CLK_XGENE ---help--- Sypport for the APM X-Gene SoC reference, PLL, and device clocks. +config COMMON_CLK_LOCHNAGAR + tristate "Cirrus Logic Lochnagar clock driver" + depends on MFD_LOCHNAGAR + help + This driver supports the clocking features of the Cirrus Logic + Lochnagar audio development board. + config COMMON_CLK_NXP def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) select REGMAP_MMIO if ARCH_LPC32XX @@ -310,6 +317,7 @@ source "drivers/clk/qcom/Kconfig" source "drivers/clk/renesas/Kconfig" source "drivers/clk/samsung/Kconfig" source "drivers/clk/sprd/Kconfig" +source "drivers/clk/sunxi/Kconfig" source "drivers/clk/sunxi-ng/Kconfig" source "drivers/clk/tegra/Kconfig" source "drivers/clk/ti/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 1db133652f0c..020ff8d635f2 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -32,8 +32,10 @@ obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o +obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o +obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 89d6f3736dbf..f8edbb65eda3 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -20,8 +20,7 @@ #define PROG_ID_MAX 7 #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) -#define PROG_PRES_MASK 0x7 -#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) +#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask) #define PROG_MAX_RM9200_CSS 3 struct clk_programmable { @@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_programmable *prog = to_clk_programmable(hw); + const struct clk_programmable_layout *layout = prog->layout; unsigned int pckr; + unsigned long rate; regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); - return parent_rate >> PROG_PRES(prog->layout, pckr); + if (layout->is_pres_direct) + rate = parent_rate / (PROG_PRES(layout, pckr) + 1); + else + rate = parent_rate >> PROG_PRES(layout, pckr); + + return rate; } static int clk_programmable_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { + struct clk_programmable *prog = to_clk_programmable(hw); + const struct clk_programmable_layout *layout = prog->layout; struct clk_hw *parent; long best_rate = -EINVAL; unsigned long parent_rate; - unsigned long tmp_rate; + unsigned long tmp_rate = 0; int shift; int i; @@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw, continue; parent_rate = clk_hw_get_rate(parent); - for (shift = 0; shift < PROG_PRES_MASK; shift++) { - tmp_rate = parent_rate >> shift; - if (tmp_rate <= req->rate) - break; + if (layout->is_pres_direct) { + for (shift = 0; shift <= layout->pres_mask; shift++) { + tmp_rate = parent_rate / (shift + 1); + if (tmp_rate <= req->rate) + break; + } + } else { + for (shift = 0; shift < layout->pres_mask; shift++) { + tmp_rate = parent_rate >> shift; + if (tmp_rate <= req->rate) + break; + } } if (tmp_rate > req->rate) @@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, if (!div) return -EINVAL; - shift = fls(div) - 1; + if (layout->is_pres_direct) { + shift = div - 1; - if (div != (1 << shift)) - return -EINVAL; + if (shift > layout->pres_mask) + return -EINVAL; + } else { + shift = fls(div) - 1; - if (shift >= PROG_PRES_MASK) - return -EINVAL; + if (div != (1 << shift)) + return -EINVAL; + + if (shift >= layout->pres_mask) + return -EINVAL; + } regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), - PROG_PRES_MASK << layout->pres_shift, + layout->pres_mask << layout->pres_shift, shift << layout->pres_shift); return 0; @@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap, } const struct clk_programmable_layout at91rm9200_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 0, + .is_pres_direct = 0, }; const struct clk_programmable_layout at91sam9g45_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 1, + .is_pres_direct = 0, }; const struct clk_programmable_layout at91sam9x5_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 4, .css_mask = 0x7, .have_slck_mck = 0, + .is_pres_direct = 0, }; diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 672a79bda88c..a0e5ce9c9b9e 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -71,9 +71,11 @@ struct clk_pll_characteristics { }; struct clk_programmable_layout { + u8 pres_mask; u8 pres_shift; u8 css_mask; u8 have_slck_mck; + u8 is_pres_direct; }; extern const struct clk_programmable_layout at91rm9200_programmable_layout; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 1f70cb164b06..81943fac4537 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -125,6 +125,14 @@ static const struct { .pll = true }, }; +static const struct clk_programmable_layout sama5d2_programmable_layout = { + .pres_mask = 0xff, + .pres_shift = 4, + .css_mask = 0x7, + .have_slck_mck = 0, + .is_pres_direct = 1, +}; + static void __init sama5d2_pmc_setup(struct device_node *np) { struct clk_range range = CLK_RANGE(0, 0); @@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 6, i, - &at91sam9x5_programmable_layout); + &sama5d2_programmable_layout); if (IS_ERR(hw)) goto err_free; } diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 596136793fc4..42b4df6ba249 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c @@ -87,10 +87,10 @@ struct aspeed_clk_gate { /* TODO: ask Aspeed about the actual parent data */ static const struct aspeed_gate_data aspeed_gates[] = { /* clk rst name parent flags */ - [ASPEED_CLK_GATE_ECLK] = { 0, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */ + [ASPEED_CLK_GATE_ECLK] = { 0, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */ [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ - [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ + [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, @@ -113,6 +113,24 @@ static const struct aspeed_gate_data aspeed_gates[] = { [ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */ }; +static const char * const eclk_parent_names[] = { + "mpll", + "hpll", + "dpll", +}; + +static const struct clk_div_table ast2500_eclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 2 }, + { 0x2, 3 }, + { 0x3, 4 }, + { 0x4, 5 }, + { 0x5, 6 }, + { 0x6, 7 }, + { 0x7, 8 }, + { 0 } +}; + static const struct clk_div_table ast2500_mac_div_table[] = { { 0x0, 4 }, /* Yep, really. Aspeed confirmed this is correct */ { 0x1, 4 }, @@ -192,18 +210,21 @@ static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val) struct aspeed_clk_soc_data { const struct clk_div_table *div_table; + const struct clk_div_table *eclk_div_table; const struct clk_div_table *mac_div_table; struct clk_hw *(*calc_pll)(const char *name, u32 val); }; static const struct aspeed_clk_soc_data ast2500_data = { .div_table = ast2500_div_table, + .eclk_div_table = ast2500_eclk_div_table, .mac_div_table = ast2500_mac_div_table, .calc_pll = aspeed_ast2500_calc_pll, }; static const struct aspeed_clk_soc_data ast2400_data = { .div_table = ast2400_div_table, + .eclk_div_table = ast2400_div_table, .mac_div_table = ast2400_div_table, .calc_pll = aspeed_ast2400_calc_pll, }; @@ -522,6 +543,22 @@ static int aspeed_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_clk_data->hws[ASPEED_CLK_24M] = hw; + hw = clk_hw_register_mux(dev, "eclk-mux", eclk_parent_names, + ARRAY_SIZE(eclk_parent_names), 0, + scu_base + ASPEED_CLK_SELECTION, 2, 0x3, 0, + &aspeed_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_clk_data->hws[ASPEED_CLK_ECLK_MUX] = hw; + + hw = clk_hw_register_divider_table(dev, "eclk", "eclk-mux", 0, + scu_base + ASPEED_CLK_SELECTION, 28, + 3, 0, soc_data->eclk_div_table, + &aspeed_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_clk_data->hws[ASPEED_CLK_ECLK] = hw; + /* * TODO: There are a number of clocks that not included in this driver * as more information is required: @@ -531,7 +568,6 @@ static int aspeed_clk_probe(struct platform_device *pdev) * RGMII * RMII * UART[1..5] clock source mux - * Video Engine (ECLK) mux and clock divider */ for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index e5a17265cfaf..46852e9cd4b1 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -25,6 +25,22 @@ * parent - fixed parent. No clk_set_parent support */ +static inline u32 clk_div_readl(struct clk_divider *divider) +{ + if (divider->flags & CLK_DIVIDER_BIG_ENDIAN) + return ioread32be(divider->reg); + + return readl(divider->reg); +} + +static inline void clk_div_writel(struct clk_divider *divider, u32 val) +{ + if (divider->flags & CLK_DIVIDER_BIG_ENDIAN) + iowrite32be(val, divider->reg); + else + writel(val, divider->reg); +} + static unsigned int _get_table_maxdiv(const struct clk_div_table *table, u8 width) { @@ -135,7 +151,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, struct clk_divider *divider = to_clk_divider(hw); unsigned int val; - val = clk_readl(divider->reg) >> divider->shift; + val = clk_div_readl(divider) >> divider->shift; val &= clk_div_mask(divider->width); return divider_recalc_rate(hw, parent_rate, val, divider->table, @@ -370,7 +386,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_READ_ONLY) { u32 val; - val = clk_readl(divider->reg) >> divider->shift; + val = clk_div_readl(divider) >> divider->shift; val &= clk_div_mask(divider->width); return divider_ro_round_rate(hw, rate, prate, divider->table, @@ -420,11 +436,11 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { val = clk_div_mask(divider->width) << (divider->shift + 16); } else { - val = clk_readl(divider->reg); + val = clk_div_readl(divider); val &= ~(clk_div_mask(divider->width) << divider->shift); } val |= (u32)value << divider->shift; - clk_writel(val, divider->reg); + clk_div_writel(divider, val); if (divider->lock) spin_unlock_irqrestore(divider->lock, flags); diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index fdfe2e423d15..638a9bbc2ab8 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -13,6 +13,22 @@ #include <linux/slab.h> #include <linux/rational.h> +static inline u32 clk_fd_readl(struct clk_fractional_divider *fd) +{ + if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN) + return ioread32be(fd->reg); + + return readl(fd->reg); +} + +static inline void clk_fd_writel(struct clk_fractional_divider *fd, u32 val) +{ + if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN) + iowrite32be(val, fd->reg); + else + writel(val, fd->reg); +} + static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -27,7 +43,7 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, else __acquire(fd->lock); - val = clk_readl(fd->reg); + val = clk_fd_readl(fd); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); @@ -115,10 +131,10 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, else __acquire(fd->lock); - val = clk_readl(fd->reg); + val = clk_fd_readl(fd); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); - clk_writel(val, fd->reg); + clk_fd_writel(fd, val); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index f05823cd9b21..0c0bb83f714e 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -23,6 +23,22 @@ * parent - fixed parent. No clk_set_parent support */ +static inline u32 clk_gate_readl(struct clk_gate *gate) +{ + if (gate->flags & CLK_GATE_BIG_ENDIAN) + return ioread32be(gate->reg); + + return readl(gate->reg); +} + +static inline void clk_gate_writel(struct clk_gate *gate, u32 val) +{ + if (gate->flags & CLK_GATE_BIG_ENDIAN) + iowrite32be(val, gate->reg); + else + writel(val, gate->reg); +} + /* * It works on following logic: * @@ -55,7 +71,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) if (set) reg |= BIT(gate->bit_idx); } else { - reg = clk_readl(gate->reg); + reg = clk_gate_readl(gate); if (set) reg |= BIT(gate->bit_idx); @@ -63,7 +79,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) reg &= ~BIT(gate->bit_idx); } - clk_writel(reg, gate->reg); + clk_gate_writel(gate, reg); if (gate->lock) spin_unlock_irqrestore(gate->lock, flags); @@ -88,7 +104,7 @@ int clk_gate_is_enabled(struct clk_hw *hw) u32 reg; struct clk_gate *gate = to_clk_gate(hw); - reg = clk_readl(gate->reg); + reg = clk_gate_readl(gate); /* if a set bit disables this clk, flip it before masking */ if (gate->flags & CLK_GATE_SET_TO_DISABLE) diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c index 8e4581004695..bd328b0eb243 100644 --- a/drivers/clk/clk-highbank.c +++ b/drivers/clk/clk-highbank.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> -#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> @@ -272,7 +271,7 @@ static const struct clk_ops periclk_ops = { .set_rate = clk_periclk_set_rate, }; -static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops) +static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags) { u32 reg; struct hb_clk *hb_clk; @@ -284,11 +283,11 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk rc = of_property_read_u32(node, "reg", ®); if (WARN_ON(rc)) - return NULL; + return; hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL); if (WARN_ON(!hb_clk)) - return NULL; + return; /* Map system registers */ srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); @@ -301,7 +300,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk init.name = clk_name; init.ops = ops; - init.flags = 0; + init.flags = clkflags; parent_name = of_clk_get_parent_name(node, 0); init.parent_names = &parent_name; init.num_parents = 1; @@ -311,33 +310,31 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk rc = clk_hw_register(NULL, &hb_clk->hw); if (WARN_ON(rc)) { kfree(hb_clk); - return NULL; + return; } - rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw); - return hb_clk->hw.clk; + of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw); } static void __init hb_pll_init(struct device_node *node) { - hb_clk_init(node, &clk_pll_ops); + hb_clk_init(node, &clk_pll_ops, 0); } CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init); static void __init hb_a9periph_init(struct device_node *node) { - hb_clk_init(node, &a9periphclk_ops); + hb_clk_init(node, &a9periphclk_ops, 0); } CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init); static void __init hb_a9bus_init(struct device_node *node) { - struct clk *clk = hb_clk_init(node, &a9bclk_ops); - clk_prepare_enable(clk); + hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL); } CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init); static void __init hb_emmc_init(struct device_node *node) { - hb_clk_init(node, &periclk_ops); + hb_clk_init(node, &periclk_ops, 0); } CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init); diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c new file mode 100644 index 000000000000..a2f31e58ee48 --- /dev/null +++ b/drivers/clk/clk-lochnagar.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lochnagar clock control + * + * Copyright (c) 2017-2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + * + * Author: Charles Keepax <ckeepax@opensource.cirrus.com> + */ + +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <linux/mfd/lochnagar.h> +#include <linux/mfd/lochnagar1_regs.h> +#include <linux/mfd/lochnagar2_regs.h> + +#include <dt-bindings/clk/lochnagar.h> + +#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1) + +struct lochnagar_clk { + const char * const name; + struct clk_hw hw; + + struct lochnagar_clk_priv *priv; + + u16 cfg_reg; + u16 ena_mask; + + u16 src_reg; + u16 src_mask; +}; + +struct lochnagar_clk_priv { + struct device *dev; + struct regmap *regmap; + enum lochnagar_type type; + + const char **parents; + unsigned int nparents; + + struct lochnagar_clk lclks[LOCHNAGAR_NUM_CLOCKS]; +}; + +static const char * const lochnagar1_clk_parents[] = { + "ln-none", + "ln-spdif-mclk", + "ln-psia1-mclk", + "ln-psia2-mclk", + "ln-cdc-clkout", + "ln-dsp-clkout", + "ln-pmic-32k", + "ln-gf-mclk1", + "ln-gf-mclk3", + "ln-gf-mclk2", + "ln-gf-mclk4", +}; + +static const char * const lochnagar2_clk_parents[] = { + "ln-none", + "ln-cdc-clkout", + "ln-dsp-clkout", + "ln-pmic-32k", + "ln-spdif-mclk", + "ln-clk-12m", + "ln-clk-11m", + "ln-clk-24m", + "ln-clk-22m", + "ln-clk-8m", + "ln-usb-clk-24m", + "ln-gf-mclk1", + "ln-gf-mclk3", + "ln-gf-mclk2", + "ln-psia1-mclk", + "ln-psia2-mclk", + "ln-spdif-clkout", + "ln-adat-mclk", + "ln-usb-clk-12m", +}; + +#define LN1_CLK(ID, NAME, REG) \ + [LOCHNAGAR_##ID] = { \ + .name = NAME, \ + .cfg_reg = LOCHNAGAR1_##REG, \ + .ena_mask = LOCHNAGAR1_##ID##_ENA_MASK, \ + .src_reg = LOCHNAGAR1_##ID##_SEL, \ + .src_mask = LOCHNAGAR1_SRC_MASK, \ + } + +#define LN2_CLK(ID, NAME) \ + [LOCHNAGAR_##ID] = { \ + .name = NAME, \ + .cfg_reg = LOCHNAGAR2_##ID##_CTRL, \ + .src_reg = LOCHNAGAR2_##ID##_CTRL, \ + .ena_mask = LOCHNAGAR2_CLK_ENA_MASK, \ + .src_mask = LOCHNAGAR2_CLK_SRC_MASK, \ + } + +static const struct lochnagar_clk lochnagar1_clks[LOCHNAGAR_NUM_CLOCKS] = { + LN1_CLK(CDC_MCLK1, "ln-cdc-mclk1", CDC_AIF_CTRL2), + LN1_CLK(CDC_MCLK2, "ln-cdc-mclk2", CDC_AIF_CTRL2), + LN1_CLK(DSP_CLKIN, "ln-dsp-clkin", DSP_AIF), + LN1_CLK(GF_CLKOUT1, "ln-gf-clkout1", GF_AIF1), +}; + +static const struct lochnagar_clk lochnagar2_clks[LOCHNAGAR_NUM_CLOCKS] = { + LN2_CLK(CDC_MCLK1, "ln-cdc-mclk1"), + LN2_CLK(CDC_MCLK2, "ln-cdc-mclk2"), + LN2_CLK(DSP_CLKIN, "ln-dsp-clkin"), + LN2_CLK(GF_CLKOUT1, "ln-gf-clkout1"), + LN2_CLK(GF_CLKOUT2, "ln-gf-clkout2"), + LN2_CLK(PSIA1_MCLK, "ln-psia1-mclk"), + LN2_CLK(PSIA2_MCLK, "ln-psia2-mclk"), + LN2_CLK(SPDIF_MCLK, "ln-spdif-mclk"), + LN2_CLK(ADAT_MCLK, "ln-adat-mclk"), + LN2_CLK(SOUNDCARD_MCLK, "ln-soundcard-mclk"), +}; + +static inline struct lochnagar_clk *lochnagar_hw_to_lclk(struct clk_hw *hw) +{ + return container_of(hw, struct lochnagar_clk, hw); +} + +static int lochnagar_clk_prepare(struct clk_hw *hw) +{ + struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw); + struct lochnagar_clk_priv *priv = lclk->priv; + struct regmap *regmap = priv->regmap; + int ret; + + ret = regmap_update_bits(regmap, lclk->cfg_reg, + lclk->ena_mask, lclk->ena_mask); + if (ret < 0) + dev_dbg(priv->dev, "Failed to prepare %s: %d\n", + lclk->name, ret); + + return ret; +} + +static void lochnagar_clk_unprepare(struct clk_hw *hw) +{ + struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw); + struct lochnagar_clk_priv *priv = lclk->priv; + struct regmap *regmap = priv->regmap; + int ret; + + ret = regmap_update_bits(regmap, lclk->cfg_reg, lclk->ena_mask, 0); + if (ret < 0) + dev_dbg(priv->dev, "Failed to unprepare %s: %d\n", + lclk->name, ret); +} + +static int lochnagar_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw); + struct lochnagar_clk_priv *priv = lclk->priv; + struct regmap *regmap = priv->regmap; + int ret; + + ret = regmap_update_bits(regmap, lclk->src_reg, lclk->src_mask, index); + if (ret < 0) + dev_dbg(priv->dev, "Failed to reparent %s: %d\n", + lclk->name, ret); + + return ret; +} + +static u8 lochnagar_clk_get_parent(struct clk_hw *hw) +{ + struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw); + struct lochnagar_clk_priv *priv = lclk->priv; + struct regmap *regmap = priv->regmap; + unsigned int val; + int ret; + + ret = regmap_read(regmap, lclk->src_reg, &val); + if (ret < 0) { + dev_dbg(priv->dev, "Failed to read parent of %s: %d\n", + lclk->name, ret); + return priv->nparents; + } + + val &= lclk->src_mask; + + return val; +} + +static const struct clk_ops lochnagar_clk_ops = { + .prepare = lochnagar_clk_prepare, + .unprepare = lochnagar_clk_unprepare, + .set_parent = lochnagar_clk_set_parent, + .get_parent = lochnagar_clk_get_parent, +}; + +static int lochnagar_init_parents(struct lochnagar_clk_priv *priv) +{ + struct device_node *np = priv->dev->of_node; + int i, j; + + switch (priv->type) { + case LOCHNAGAR1: + memcpy(priv->lclks, lochnagar1_clks, sizeof(lochnagar1_clks)); + + priv->nparents = ARRAY_SIZE(lochnagar1_clk_parents); + priv->parents = devm_kmemdup(priv->dev, lochnagar1_clk_parents, + sizeof(lochnagar1_clk_parents), + GFP_KERNEL); + break; + case LOCHNAGAR2: + memcpy(priv->lclks, lochnagar2_clks, sizeof(lochnagar2_clks)); + + priv->nparents = ARRAY_SIZE(lochnagar2_clk_parents); + priv->parents = devm_kmemdup(priv->dev, lochnagar2_clk_parents, + sizeof(lochnagar2_clk_parents), + GFP_KERNEL); + break; + default: + dev_err(priv->dev, "Unknown Lochnagar type: %d\n", priv->type); + return -EINVAL; + } + + if (!priv->parents) + return -ENOMEM; + + for (i = 0; i < priv->nparents; i++) { + j = of_property_match_string(np, "clock-names", + priv->parents[i]); + if (j >= 0) + priv->parents[i] = of_clk_get_parent_name(np, j); + } + + return 0; +} + +static struct clk_hw * +lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data) +{ + struct lochnagar_clk_priv *priv = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= ARRAY_SIZE(priv->lclks)) { + dev_err(priv->dev, "Invalid index %u\n", idx); + return ERR_PTR(-EINVAL); + } + + return &priv->lclks[idx].hw; +} + +static int lochnagar_init_clks(struct lochnagar_clk_priv *priv) +{ + struct clk_init_data clk_init = { + .ops = &lochnagar_clk_ops, + .parent_names = priv->parents, + .num_parents = priv->nparents, + }; + struct lochnagar_clk *lclk; + int ret, i; + + for (i = 0; i < ARRAY_SIZE(priv->lclks); i++) { + lclk = &priv->lclks[i]; + + if (!lclk->name) + continue; + + clk_init.name = lclk->name; + + lclk->priv = priv; + lclk->hw.init = &clk_init; + + ret = devm_clk_hw_register(priv->dev, &lclk->hw); + if (ret) { + dev_err(priv->dev, "Failed to register %s: %d\n", + lclk->name, ret); + return ret; + } + } + + ret = devm_of_clk_add_hw_provider(priv->dev, lochnagar_of_clk_hw_get, + priv); + if (ret < 0) + dev_err(priv->dev, "Failed to register provider: %d\n", ret); + + return ret; +} + +static const struct of_device_id lochnagar_of_match[] = { + { .compatible = "cirrus,lochnagar1-clk", .data = (void *)LOCHNAGAR1 }, + { .compatible = "cirrus,lochnagar2-clk", .data = (void *)LOCHNAGAR2 }, + {} +}; +MODULE_DEVICE_TABLE(of, lochnagar_of_match); + +static int lochnagar_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct lochnagar_clk_priv *priv; + const struct of_device_id *of_id; + int ret; + + of_id = of_match_device(lochnagar_of_match, dev); + if (!of_id) + return -EINVAL; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->regmap = dev_get_regmap(dev->parent, NULL); + priv->type = (enum lochnagar_type)of_id->data; + + ret = lochnagar_init_parents(priv); + if (ret) + return ret; + + return lochnagar_init_clks(priv); +} + +static struct platform_driver lochnagar_clk_driver = { + .driver = { + .name = "lochnagar-clk", + .of_match_table = lochnagar_of_match, + }, + .probe = lochnagar_clk_probe, +}; +module_platform_driver(lochnagar_clk_driver); + +MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>"); +MODULE_DESCRIPTION("Clock driver for Cirrus Logic Lochnagar Board"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c new file mode 100644 index 000000000000..5fc78faf820c --- /dev/null +++ b/drivers/clk/clk-milbeaut.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Socionext Inc. + * Copyright (C) 2016 Linaro Ltd. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define M10V_CLKSEL1 0x0 +#define CLKSEL(n) (((n) - 1) * 4 + M10V_CLKSEL1) + +#define M10V_PLL1 "pll1" +#define M10V_PLL1DIV2 "pll1-2" +#define M10V_PLL2 "pll2" +#define M10V_PLL2DIV2 "pll2-2" +#define M10V_PLL6 "pll6" +#define M10V_PLL6DIV2 "pll6-2" +#define M10V_PLL6DIV3 "pll6-3" +#define M10V_PLL7 "pll7" +#define M10V_PLL7DIV2 "pll7-2" +#define M10V_PLL7DIV5 "pll7-5" +#define M10V_PLL9 "pll9" +#define M10V_PLL10 "pll10" +#define M10V_PLL10DIV2 "pll10-2" +#define M10V_PLL11 "pll11" + +#define M10V_SPI_PARENT0 "spi-parent0" +#define M10V_SPI_PARENT1 "spi-parent1" +#define M10V_SPI_PARENT2 "spi-parent2" +#define M10V_UHS1CLK2_PARENT0 "uhs1clk2-parent0" +#define M10V_UHS1CLK2_PARENT1 "uhs1clk2-parent1" +#define M10V_UHS1CLK2_PARENT2 "uhs1clk2-parent2" +#define M10V_UHS1CLK1_PARENT0 "uhs1clk1-parent0" +#define M10V_UHS1CLK1_PARENT1 "uhs1clk1-parent1" +#define M10V_NFCLK_PARENT0 "nfclk-parent0" +#define M10V_NFCLK_PARENT1 "nfclk-parent1" +#define M10V_NFCLK_PARENT2 "nfclk-parent2" +#define M10V_NFCLK_PARENT3 "nfclk-parent3" +#define M10V_NFCLK_PARENT4 "nfclk-parent4" +#define M10V_NFCLK_PARENT5 "nfclk-parent5" + +#define M10V_DCHREQ 1 +#define M10V_UPOLL_RATE 1 +#define M10V_UTIMEOUT 250 + +#define M10V_EMMCCLK_ID 0 +#define M10V_ACLK_ID 1 +#define M10V_HCLK_ID 2 +#define M10V_PCLK_ID 3 +#define M10V_RCLK_ID 4 +#define M10V_SPICLK_ID 5 +#define M10V_NFCLK_ID 6 +#define M10V_UHS1CLK2_ID 7 +#define M10V_NUM_CLKS 8 + +#define to_m10v_div(_hw) container_of(_hw, struct m10v_clk_divider, hw) + +static struct clk_hw_onecell_data *m10v_clk_data; + +static DEFINE_SPINLOCK(m10v_crglock); + +struct m10v_clk_div_factors { + const char *name; + const char *parent_name; + u32 offset; + u8 shift; + u8 width; + const struct clk_div_table *table; + unsigned long div_flags; + int onecell_idx; +}; + +struct m10v_clk_div_fixed_data { + const char *name; + const char *parent_name; + u8 div; + u8 mult; + int onecell_idx; +}; + +struct m10v_clk_mux_factors { + const char *name; + const char * const *parent_names; + u8 num_parents; + u32 offset; + u8 shift; + u8 mask; + u32 *table; + unsigned long mux_flags; + int onecell_idx; +}; + +static const struct clk_div_table emmcclk_table[] = { + { .val = 0, .div = 8 }, + { .val = 1, .div = 9 }, + { .val = 2, .div = 10 }, + { .val = 3, .div = 15 }, + { .div = 0 }, +}; + +static const struct clk_div_table mclk400_table[] = { + { .val = 1, .div = 2 }, + { .val = 3, .div = 4 }, + { .div = 0 }, +}; + +static const struct clk_div_table mclk200_table[] = { + { .val = 3, .div = 4 }, + { .val = 7, .div = 8 }, + { .div = 0 }, +}; + +static const struct clk_div_table aclk400_table[] = { + { .val = 1, .div = 2 }, + { .val = 3, .div = 4 }, + { .div = 0 }, +}; + +static const struct clk_div_table aclk300_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 3 }, + { .div = 0 }, +}; + +static const struct clk_div_table aclk_table[] = { + { .val = 3, .div = 4 }, + { .val = 7, .div = 8 }, + { .div = 0 }, +}; + +static const struct clk_div_table aclkexs_table[] = { + { .val = 3, .div = 4 }, + { .val = 4, .div = 5 }, + { .val = 5, .div = 6 }, + { .val = 7, .div = 8 }, + { .div = 0 }, +}; + +static const struct clk_div_table hclk_table[] = { + { .val = 7, .div = 8 }, + { .val = 15, .div = 16 }, + { .div = 0 }, +}; + +static const struct clk_div_table hclkbmh_table[] = { + { .val = 3, .div = 4 }, + { .val = 7, .div = 8 }, + { .div = 0 }, +}; + +static const struct clk_div_table pclk_table[] = { + { .val = 15, .div = 16 }, + { .val = 31, .div = 32 }, + { .div = 0 }, +}; + +static const struct clk_div_table rclk_table[] = { + { .val = 0, .div = 8 }, + { .val = 1, .div = 16 }, + { .val = 2, .div = 24 }, + { .val = 3, .div = 32 }, + { .div = 0 }, +}; + +static const struct clk_div_table uhs1clk0_table[] = { + { .val = 0, .div = 2 }, + { .val = 1, .div = 3 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { .val = 4, .div = 16 }, + { .div = 0 }, +}; + +static const struct clk_div_table uhs2clk_table[] = { + { .val = 0, .div = 9 }, + { .val = 1, .div = 10 }, + { .val = 2, .div = 11 }, + { .val = 3, .div = 12 }, + { .val = 4, .div = 13 }, + { .val = 5, .div = 14 }, + { .val = 6, .div = 16 }, + { .val = 7, .div = 18 }, + { .div = 0 }, +}; + +static u32 spi_mux_table[] = {0, 1, 2}; +static const char * const spi_mux_names[] = { + M10V_SPI_PARENT0, M10V_SPI_PARENT1, M10V_SPI_PARENT2 +}; + +static u32 uhs1clk2_mux_table[] = {2, 3, 4, 8}; +static const char * const uhs1clk2_mux_names[] = { + M10V_UHS1CLK2_PARENT0, M10V_UHS1CLK2_PARENT1, + M10V_UHS1CLK2_PARENT2, M10V_PLL6DIV2 +}; + +static u32 uhs1clk1_mux_table[] = {3, 4, 8}; +static const char * const uhs1clk1_mux_names[] = { + M10V_UHS1CLK1_PARENT0, M10V_UHS1CLK1_PARENT1, M10V_PLL6DIV2 +}; + +static u32 nfclk_mux_table[] = {0, 1, 2, 3, 4, 8}; +static const char * const nfclk_mux_names[] = { + M10V_NFCLK_PARENT0, M10V_NFCLK_PARENT1, M10V_NFCLK_PARENT2, + M10V_NFCLK_PARENT3, M10V_NFCLK_PARENT4, M10V_NFCLK_PARENT5 +}; + +static const struct m10v_clk_div_fixed_data m10v_pll_fixed_data[] = { + {M10V_PLL1, NULL, 1, 40, -1}, + {M10V_PLL2, NULL, 1, 30, -1}, + {M10V_PLL6, NULL, 1, 35, -1}, + {M10V_PLL7, NULL, 1, 40, -1}, + {M10V_PLL9, NULL, 1, 33, -1}, + {M10V_PLL10, NULL, 5, 108, -1}, + {M10V_PLL10DIV2, M10V_PLL10, 2, 1, -1}, + {M10V_PLL11, NULL, 2, 75, -1}, +}; + +static const struct m10v_clk_div_fixed_data m10v_div_fixed_data[] = { + {"usb2", NULL, 2, 1, -1}, + {"pcisuppclk", NULL, 20, 1, -1}, + {M10V_PLL1DIV2, M10V_PLL1, 2, 1, -1}, + {M10V_PLL2DIV2, M10V_PLL2, 2, 1, -1}, + {M10V_PLL6DIV2, M10V_PLL6, 2, 1, -1}, + {M10V_PLL6DIV3, M10V_PLL6, 3, 1, -1}, + {M10V_PLL7DIV2, M10V_PLL7, 2, 1, -1}, + {M10V_PLL7DIV5, M10V_PLL7, 5, 1, -1}, + {"ca7wd", M10V_PLL2DIV2, 12, 1, -1}, + {"pclkca7wd", M10V_PLL1DIV2, 16, 1, -1}, + {M10V_SPI_PARENT0, M10V_PLL10DIV2, 2, 1, -1}, + {M10V_SPI_PARENT1, M10V_PLL10DIV2, 4, 1, -1}, + {M10V_SPI_PARENT2, M10V_PLL7DIV2, 8, 1, -1}, + {M10V_UHS1CLK2_PARENT0, M10V_PLL7, 4, 1, -1}, + {M10V_UHS1CLK2_PARENT1, M10V_PLL7, 8, 1, -1}, + {M10V_UHS1CLK2_PARENT2, M10V_PLL7, 16, 1, -1}, + {M10V_UHS1CLK1_PARENT0, M10V_PLL7, 8, 1, -1}, + {M10V_UHS1CLK1_PARENT1, M10V_PLL7, 16, 1, -1}, + {M10V_NFCLK_PARENT0, M10V_PLL7DIV2, 8, 1, -1}, + {M10V_NFCLK_PARENT1, M10V_PLL7DIV2, 10, 1, -1}, + {M10V_NFCLK_PARENT2, M10V_PLL7DIV2, 13, 1, -1}, + {M10V_NFCLK_PARENT3, M10V_PLL7DIV2, 16, 1, -1}, + {M10V_NFCLK_PARENT4, M10V_PLL7DIV2, 40, 1, -1}, + {M10V_NFCLK_PARENT5, M10V_PLL7DIV5, 10, 1, -1}, +}; + +static const struct m10v_clk_div_factors m10v_div_factor_data[] = { + {"emmc", M10V_PLL11, CLKSEL(1), 28, 3, emmcclk_table, 0, + M10V_EMMCCLK_ID}, + {"mclk400", M10V_PLL1DIV2, CLKSEL(10), 7, 3, mclk400_table, 0, -1}, + {"mclk200", M10V_PLL1DIV2, CLKSEL(10), 3, 4, mclk200_table, 0, -1}, + {"aclk400", M10V_PLL1DIV2, CLKSEL(10), 0, 3, aclk400_table, 0, -1}, + {"aclk300", M10V_PLL2DIV2, CLKSEL(12), 0, 2, aclk300_table, 0, -1}, + {"aclk", M10V_PLL1DIV2, CLKSEL(9), 20, 4, aclk_table, 0, M10V_ACLK_ID}, + {"aclkexs", M10V_PLL1DIV2, CLKSEL(9), 16, 4, aclkexs_table, 0, -1}, + {"hclk", M10V_PLL1DIV2, CLKSEL(9), 7, 5, hclk_table, 0, M10V_HCLK_ID}, + {"hclkbmh", M10V_PLL1DIV2, CLKSEL(9), 12, 4, hclkbmh_table, 0, -1}, + {"pclk", M10V_PLL1DIV2, CLKSEL(9), 0, 7, pclk_table, 0, M10V_PCLK_ID}, + {"uhs1clk0", M10V_PLL7, CLKSEL(1), 3, 5, uhs1clk0_table, 0, -1}, + {"uhs2clk", M10V_PLL6DIV3, CLKSEL(1), 18, 4, uhs2clk_table, 0, -1}, +}; + +static const struct m10v_clk_mux_factors m10v_mux_factor_data[] = { + {"spi", spi_mux_names, ARRAY_SIZE(spi_mux_names), + CLKSEL(8), 3, 7, spi_mux_table, 0, M10V_SPICLK_ID}, + {"uhs1clk2", uhs1clk2_mux_names, ARRAY_SIZE(uhs1clk2_mux_names), + CLKSEL(1), 13, 31, uhs1clk2_mux_table, 0, M10V_UHS1CLK2_ID}, + {"uhs1clk1", uhs1clk1_mux_names, ARRAY_SIZE(uhs1clk1_mux_names), + CLKSEL(1), 8, 31, uhs1clk1_mux_table, 0, -1}, + {"nfclk", nfclk_mux_names, ARRAY_SIZE(nfclk_mux_names), + CLKSEL(1), 22, 127, nfclk_mux_table, 0, M10V_NFCLK_ID}, +}; + +static u8 m10v_mux_get_parent(struct clk_hw *hw) +{ + struct clk_mux *mux = to_clk_mux(hw); + u32 val; + + val = readl(mux->reg) >> mux->shift; + val &= mux->mask; + + return clk_mux_val_to_index(hw, mux->table, mux->flags, val); +} + +static int m10v_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_mux *mux = to_clk_mux(hw); + u32 val = clk_mux_index_to_val(mux->table, mux->flags, index); + unsigned long flags = 0; + u32 reg; + u32 write_en = BIT(fls(mux->mask) - 1); + + if (mux->lock) + spin_lock_irqsave(mux->lock, flags); + else + __acquire(mux->lock); + + reg = readl(mux->reg); + reg &= ~(mux->mask << mux->shift); + + val = (val | write_en) << mux->shift; + reg |= val; + writel(reg, mux->reg); + + if (mux->lock) + spin_unlock_irqrestore(mux->lock, flags); + else + __release(mux->lock); + + return 0; +} + +static const struct clk_ops m10v_mux_ops = { + .get_parent = m10v_mux_get_parent, + .set_parent = m10v_mux_set_parent, + .determine_rate = __clk_mux_determine_rate, +}; + +static struct clk_hw *m10v_clk_hw_register_mux(struct device *dev, + const char *name, const char * const *parent_names, + u8 num_parents, unsigned long flags, void __iomem *reg, + u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, + spinlock_t *lock) +{ + struct clk_mux *mux; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &m10v_mux_ops; + init.flags = flags; + init.parent_names = parent_names; + init.num_parents = num_parents; + + mux->reg = reg; + mux->shift = shift; + mux->mask = mask; + mux->flags = clk_mux_flags; + mux->lock = lock; + mux->table = table; + mux->hw.init = &init; + + hw = &mux->hw; + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(mux); + hw = ERR_PTR(ret); + } + + return hw; + +} + +struct m10v_clk_divider { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; + void __iomem *write_valid_reg; +}; + +static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct m10v_clk_divider *divider = to_m10v_div(hw); + unsigned int val; + + val = readl(divider->reg) >> divider->shift; + val &= clk_div_mask(divider->width); + + return divider_recalc_rate(hw, parent_rate, val, divider->table, + divider->flags, divider->width); +} + +static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct m10v_clk_divider *divider = to_m10v_div(hw); + + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + u32 val; + + val = readl(divider->reg) >> divider->shift; + val &= clk_div_mask(divider->width); + + return divider_ro_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags, + val); + } + + return divider_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags); +} + +static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct m10v_clk_divider *divider = to_m10v_div(hw); + int value; + unsigned long flags = 0; + u32 val; + u32 write_en = BIT(divider->width - 1); + + value = divider_get_val(rate, parent_rate, divider->table, + divider->width, divider->flags); + if (value < 0) + return value; + + if (divider->lock) + spin_lock_irqsave(divider->lock, flags); + else + __acquire(divider->lock); + + val = readl(divider->reg); + val &= ~(clk_div_mask(divider->width) << divider->shift); + + val |= ((u32)value | write_en) << divider->shift; + writel(val, divider->reg); + + if (divider->write_valid_reg) { + writel(M10V_DCHREQ, divider->write_valid_reg); + if (readl_poll_timeout(divider->write_valid_reg, val, + !val, M10V_UPOLL_RATE, M10V_UTIMEOUT)) + pr_err("%s:%s couldn't stabilize\n", + __func__, divider->hw.init->name); + } + + if (divider->lock) + spin_unlock_irqrestore(divider->lock, flags); + else + __release(divider->lock); + + return 0; +} + +static const struct clk_ops m10v_clk_divider_ops = { + .recalc_rate = m10v_clk_divider_recalc_rate, + .round_rate = m10v_clk_divider_round_rate, + .set_rate = m10v_clk_divider_set_rate, +}; + +static struct clk_hw *m10v_clk_hw_register_divider(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + spinlock_t *lock, void __iomem *write_valid_reg) +{ + struct m10v_clk_divider *div; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &m10v_clk_divider_ops; + init.flags = flags; + init.parent_names = &parent_name; + init.num_parents = 1; + + div->reg = reg; + div->shift = shift; + div->width = width; + div->flags = clk_divider_flags; + div->lock = lock; + div->hw.init = &init; + div->table = table; + div->write_valid_reg = write_valid_reg; + + /* register the clock */ + hw = &div->hw; + ret = clk_hw_register(dev, hw); + if (ret) { + kfree(div); + hw = ERR_PTR(ret); + } + + return hw; +} + +static void m10v_reg_div_pre(const struct m10v_clk_div_factors *factors, + struct clk_hw_onecell_data *clk_data, + void __iomem *base) +{ + struct clk_hw *hw; + void __iomem *write_valid_reg; + + /* + * The registers on CLKSEL(9) or CLKSEL(10) need additional + * writing to become valid. + */ + if ((factors->offset == CLKSEL(9)) || (factors->offset == CLKSEL(10))) + write_valid_reg = base + CLKSEL(11); + else + write_valid_reg = NULL; + + hw = m10v_clk_hw_register_divider(NULL, factors->name, + factors->parent_name, + CLK_SET_RATE_PARENT, + base + factors->offset, + factors->shift, + factors->width, factors->div_flags, + factors->table, + &m10v_crglock, write_valid_reg); + + if (factors->onecell_idx >= 0) + clk_data->hws[factors->onecell_idx] = hw; +} + +static void m10v_reg_fixed_pre(const struct m10v_clk_div_fixed_data *factors, + struct clk_hw_onecell_data *clk_data, + const char *parent_name) +{ + struct clk_hw *hw; + const char *pn = factors->parent_name ? + factors->parent_name : parent_name; + + hw = clk_hw_register_fixed_factor(NULL, factors->name, pn, 0, + factors->mult, factors->div); + + if (factors->onecell_idx >= 0) + clk_data->hws[factors->onecell_idx] = hw; +} + +static void m10v_reg_mux_pre(const struct m10v_clk_mux_factors *factors, + struct clk_hw_onecell_data *clk_data, + void __iomem *base) +{ + struct clk_hw *hw; + + hw = m10v_clk_hw_register_mux(NULL, factors->name, + factors->parent_names, + factors->num_parents, + CLK_SET_RATE_PARENT, + base + factors->offset, factors->shift, + factors->mask, factors->mux_flags, + factors->table, &m10v_crglock); + + if (factors->onecell_idx >= 0) + clk_data->hws[factors->onecell_idx] = hw; +} + +static int m10v_clk_probe(struct platform_device *pdev) +{ + int id; + struct resource *res; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + void __iomem *base; + const char *parent_name; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + parent_name = of_clk_get_parent_name(np, 0); + + for (id = 0; id < ARRAY_SIZE(m10v_div_factor_data); ++id) + m10v_reg_div_pre(&m10v_div_factor_data[id], + m10v_clk_data, base); + + for (id = 0; id < ARRAY_SIZE(m10v_div_fixed_data); ++id) + m10v_reg_fixed_pre(&m10v_div_fixed_data[id], + m10v_clk_data, parent_name); + + for (id = 0; id < ARRAY_SIZE(m10v_mux_factor_data); ++id) + m10v_reg_mux_pre(&m10v_mux_factor_data[id], + m10v_clk_data, base); + + for (id = 0; id < M10V_NUM_CLKS; id++) { + if (IS_ERR(m10v_clk_data->hws[id])) + return PTR_ERR(m10v_clk_data->hws[id]); + } + + return 0; +} + +static const struct of_device_id m10v_clk_dt_ids[] = { + { .compatible = "socionext,milbeaut-m10v-ccu", }, + { } +}; + +static struct platform_driver m10v_clk_driver = { + .probe = m10v_clk_probe, + .driver = { + .name = "m10v-ccu", + .of_match_table = m10v_clk_dt_ids, + }, +}; +builtin_platform_driver(m10v_clk_driver); + +static void __init m10v_cc_init(struct device_node *np) +{ + int id; + void __iomem *base; + const char *parent_name; + struct clk_hw *hw; + + m10v_clk_data = kzalloc(struct_size(m10v_clk_data, hws, + M10V_NUM_CLKS), + GFP_KERNEL); + + if (!m10v_clk_data) + return; + + base = of_iomap(np, 0); + if (!base) { + kfree(m10v_clk_data); + return; + } + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) { + kfree(m10v_clk_data); + iounmap(base); + return; + } + + /* + * This way all clocks fetched before the platform device probes, + * except those we assign here for early use, will be deferred. + */ + for (id = 0; id < M10V_NUM_CLKS; id++) + m10v_clk_data->hws[id] = ERR_PTR(-EPROBE_DEFER); + + /* + * PLLs are set by bootloader so this driver registers them as the + * fixed factor. + */ + for (id = 0; id < ARRAY_SIZE(m10v_pll_fixed_data); ++id) + m10v_reg_fixed_pre(&m10v_pll_fixed_data[id], + m10v_clk_data, parent_name); + + /* + * timer consumes "rclk" so it needs to register here. + */ + hw = m10v_clk_hw_register_divider(NULL, "rclk", M10V_PLL10DIV2, 0, + base + CLKSEL(1), 0, 3, 0, rclk_table, + &m10v_crglock, NULL); + m10v_clk_data->hws[M10V_RCLK_ID] = hw; + + m10v_clk_data->num = M10V_NUM_CLKS; + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, m10v_clk_data); +} +CLK_OF_DECLARE_DRIVER(m10v_cc, "socionext,milbeaut-m10v-ccu", m10v_cc_init); diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c index 3c86f859c199..94470b4eadf4 100644 --- a/drivers/clk/clk-multiplier.c +++ b/drivers/clk/clk-multiplier.c @@ -11,6 +11,22 @@ #include <linux/of.h> #include <linux/slab.h> +static inline u32 clk_mult_readl(struct clk_multiplier *mult) +{ + if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN) + return ioread32be(mult->reg); + + return readl(mult->reg); +} + +static inline void clk_mult_writel(struct clk_multiplier *mult, u32 val) +{ + if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN) + iowrite32be(val, mult->reg); + else + writel(val, mult->reg); +} + static unsigned long __get_mult(struct clk_multiplier *mult, unsigned long rate, unsigned long parent_rate) @@ -27,7 +43,7 @@ static unsigned long clk_multiplier_recalc_rate(struct clk_hw *hw, struct clk_multiplier *mult = to_clk_multiplier(hw); unsigned long val; - val = clk_readl(mult->reg) >> mult->shift; + val = clk_mult_readl(mult) >> mult->shift; val &= GENMASK(mult->width - 1, 0); if (!val && mult->flags & CLK_MULTIPLIER_ZERO_BYPASS) @@ -118,10 +134,10 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate, else __acquire(mult->lock); - val = clk_readl(mult->reg); + val = clk_mult_readl(mult); val &= ~GENMASK(mult->width + mult->shift - 1, mult->shift); val |= factor << mult->shift; - clk_writel(val, mult->reg); + clk_mult_writel(mult, val); if (mult->lock) spin_unlock_irqrestore(mult->lock, flags); diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 2ad2df2e8909..893c9b285532 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -23,6 +23,22 @@ * parent - parent is adjustable through clk_set_parent */ +static inline u32 clk_mux_readl(struct clk_mux *mux) +{ + if (mux->flags & CLK_MUX_BIG_ENDIAN) + return ioread32be(mux->reg); + + return readl(mux->reg); +} + +static inline void clk_mux_writel(struct clk_mux *mux, u32 val) +{ + if (mux->flags & CLK_MUX_BIG_ENDIAN) + iowrite32be(val, mux->reg); + else + writel(val, mux->reg); +} + int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, unsigned int val) { @@ -73,7 +89,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw) struct clk_mux *mux = to_clk_mux(hw); u32 val; - val = clk_readl(mux->reg) >> mux->shift; + val = clk_mux_readl(mux) >> mux->shift; val &= mux->mask; return clk_mux_val_to_index(hw, mux->table, mux->flags, val); @@ -94,12 +110,12 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) if (mux->flags & CLK_MUX_HIWORD_MASK) { reg = mux->mask << (mux->shift + 16); } else { - reg = clk_readl(mux->reg); + reg = clk_mux_readl(mux); reg &= ~(mux->mask << mux->shift); } val = val << mux->shift; reg |= val; - clk_writel(reg, mux->reg); + clk_mux_writel(mux, reg); if (mux->lock) spin_unlock_irqrestore(mux->lock, flags); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 1212a9be7e80..4739a47ec8bd 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -34,6 +34,7 @@ #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ #define CGB_PLL1 4 #define CGB_PLL2 5 +#define MAX_PLL_DIV 16 struct clockgen_pll_div { struct clk *clk; @@ -41,7 +42,7 @@ struct clockgen_pll_div { }; struct clockgen_pll { - struct clockgen_pll_div div[8]; + struct clockgen_pll_div div[MAX_PLL_DIV]; }; #define CLKSEL_VALID 1 @@ -79,7 +80,7 @@ struct clockgen_chipinfo { const struct clockgen_muxinfo *cmux_groups[2]; const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; void (*init_periph)(struct clockgen *cg); - int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ + int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */ u32 pll_mask; /* 1 << n bit set if PLL n is valid */ u32 flags; /* CG_xxx */ }; @@ -245,6 +246,58 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = { }, }; +static const struct clockgen_muxinfo ls1028a_hwa1 = { + { + { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, + {}, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, + }, +}; + +static const struct clockgen_muxinfo ls1028a_hwa2 = { + { + { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, + {}, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, + }, +}; + +static const struct clockgen_muxinfo ls1028a_hwa3 = { + { + { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, + {}, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, + }, +}; + +static const struct clockgen_muxinfo ls1028a_hwa4 = { + { + { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, + { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, + {}, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, + }, +}; + static const struct clockgen_muxinfo ls1043a_hwa1 = { { {}, @@ -508,6 +561,21 @@ static const struct clockgen_chipinfo chipinfo[] = { .pll_mask = 0x03, }, { + .compat = "fsl,ls1028a-clockgen", + .cmux_groups = { + &clockgen2_cmux_cga12 + }, + .hwaccel = { + &ls1028a_hwa1, &ls1028a_hwa2, + &ls1028a_hwa3, &ls1028a_hwa4 + }, + .cmux_to_group = { + 0, 0, 0, 0, -1 + }, + .pll_mask = 0x07, + .flags = CG_VER3 | CG_LITTLE_ENDIAN, + }, + { .compat = "fsl,ls1043a-clockgen", .init_periph = t2080_init_periph, .cmux_groups = { @@ -601,7 +669,7 @@ static const struct clockgen_chipinfo chipinfo[] = { &p4080_cmux_grp1, &p4080_cmux_grp2 }, .cmux_to_group = { - 0, 0, 0, 0, 1, 1, 1, 1 + 0, 0, 0, 0, 1, 1, 1, 1, -1 }, .pll_mask = 0x1f, }, @@ -1128,7 +1196,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx) int ret; /* - * For platform PLL, there are 8 divider clocks. + * For platform PLL, there are MAX_PLL_DIV divider clocks. * For core PLL, there are 4 divider clocks at most. */ if (idx != PLATFORM_PLL && i >= 4) @@ -1423,6 +1491,7 @@ CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); +CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init); diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index 531b030d4d4e..d975465fe2a8 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -262,7 +262,7 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw, else __acquire(fd->lock); - val = clk_readl(fd->reg); + val = readl(fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); @@ -333,10 +333,10 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate, else __acquire(fd->lock); - val = clk_readl(fd->reg); + val = readl(fd->reg); val &= ~fd->mask; val |= (scale << fd->shift); - clk_writel(val, fd->reg); + writel(val, fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 96053a96fe2f..a72520720783 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -347,23 +347,18 @@ unsigned int __clk_get_enable_count(struct clk *clk) static unsigned long clk_core_get_rate_nolock(struct clk_core *core) { - unsigned long ret; - - if (!core) { - ret = 0; - goto out; - } - - ret = core->rate; - - if (!core->num_parents) - goto out; + if (!core) + return 0; - if (!core->parent) - ret = 0; + if (!core->num_parents || core->parent) + return core->rate; -out: - return ret; + /* + * Clk must have a parent because num_parents > 0 but the parent isn't + * known yet. Best to return 0 as the rate of this clk until we can + * properly recalc the rate based on the parent's rate. + */ + return 0; } unsigned long clk_hw_get_rate(const struct clk_hw *hw) @@ -524,9 +519,15 @@ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); /* + * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk + * @hw: mux type clk to determine rate on + * @req: rate request, also used to return preferred parent and frequencies + * * Helper for finding best parent to provide a given frequency. This can be used * directly as a determine_rate callback (e.g. for a mux), or from a more * complex clock that may combine a mux with other operations. + * + * Returns: 0 on success, -EERROR value on error */ int __clk_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) @@ -3318,8 +3319,10 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * - * clk_register is the primary interface for populating the clock tree with new - * clock nodes. It returns a pointer to the newly allocated struct clk which + * clk_register is the *deprecated* interface for populating the clock tree with + * new clock nodes. Use clk_hw_register() instead. + * + * Returns: a pointer to the newly allocated struct clk which * cannot be dereferenced by driver code but may be used in conjunction with the * rest of the clock API. In the event of an error clk_register will return an * error code; drivers must test for an error code after calling clk_register. @@ -3575,9 +3578,10 @@ static void devm_clk_hw_release(struct device *dev, void *res) * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * - * Managed clk_register(). Clocks returned from this function are - * automatically clk_unregister()ed on driver detach. See clk_register() for - * more information. + * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead. + * + * Clocks returned from this function are automatically clk_unregister()ed on + * driver detach. See clk_register() for more information. */ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) { @@ -3895,6 +3899,8 @@ EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); * @np: Device node pointer associated with clock provider * @clk_src_get: callback for decoding clock * @data: context pointer for @clk_src_get callback. + * + * This function is *deprecated*. Use of_clk_add_hw_provider() instead. */ int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 8c4435c53f09..6e787cc9e5b9 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -46,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) if (con_id) best_possible += 1; + lockdep_assert_held(&clocks_mutex); + list_for_each_entry(p, &clocks, node) { match = 0; if (p->dev_id) { @@ -402,7 +404,10 @@ void devm_clk_release_clkdev(struct device *dev, const char *con_id, struct clk_lookup *cl; int rval; + mutex_lock(&clocks_mutex); cl = clk_find(dev_id, con_id); + mutex_unlock(&clocks_mutex); + WARN_ON(!cl); rval = devres_release(dev, devm_clkdev_release, devm_clk_match_clkdev, cl); diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index d1bbee19ed0f..bdc52364b421 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -160,10 +160,8 @@ static int __init da8xx_cfgchip_register_div4p5(struct device *dev, struct da8xx_cfgchip_gate_clk *gate; gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_div4p5ena_info, regmap); - if (IS_ERR(gate)) - return PTR_ERR(gate); - return 0; + return PTR_ERR_OR_ZERO(gate); } static int __init diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index f40419959656..794eeff0d5d2 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = { "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, }, { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2", "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, }, + /* + * clk_gate_ufs_subsys is a system bus clock, mark it as critical + * clock and keep it on for system suspend and resume. + */ { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus", - CLK_SET_RATE_PARENT, 0x50, 21, 0, }, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, }, { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus", CLK_SET_RATE_PARENT, 0x50, 28, 0, }, { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus", diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c index 5fdc267bb2da..ba6afad66a2b 100644 --- a/drivers/clk/hisilicon/clk-hisi-phase.c +++ b/drivers/clk/hisilicon/clk-hisi-phase.c @@ -75,10 +75,10 @@ static int hisi_clk_set_phase(struct clk_hw *hw, int degrees) spin_lock_irqsave(phase->lock, flags); - val = clk_readl(phase->reg); + val = readl(phase->reg); val &= ~phase->mask; val |= regval << phase->shift; - clk_writel(val, phase->reg); + writel(val, phase->reg); spin_unlock_irqrestore(phase->lock, flags); diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile index 0d5180fbe988..05641c64b317 100644 --- a/drivers/clk/imx/Makefile +++ b/drivers/clk/imx/Makefile @@ -35,7 +35,7 @@ obj-$(CONFIG_SOC_IMX25) += clk-imx25.o obj-$(CONFIG_SOC_IMX27) += clk-imx27.o obj-$(CONFIG_SOC_IMX31) += clk-imx31.o obj-$(CONFIG_SOC_IMX35) += clk-imx35.o -obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o +obj-$(CONFIG_SOC_IMX5) += clk-imx5.o obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c index df1f8429fe16..2a8352a316c7 100644 --- a/drivers/clk/imx/clk-divider-gate.c +++ b/drivers/clk/imx/clk-divider-gate.c @@ -29,7 +29,7 @@ static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw, struct clk_divider *div = to_clk_divider(hw); unsigned int val; - val = clk_readl(div->reg) >> div->shift; + val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); if (!val) return 0; @@ -51,7 +51,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw, if (!clk_hw_is_enabled(hw)) { val = div_gate->cached_val; } else { - val = clk_readl(div->reg) >> div->shift; + val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); } @@ -87,10 +87,10 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate, spin_lock_irqsave(div->lock, flags); if (clk_hw_is_enabled(hw)) { - val = clk_readl(div->reg); + val = readl(div->reg); val &= ~(clk_div_mask(div->width) << div->shift); val |= (u32)value << div->shift; - clk_writel(val, div->reg); + writel(val, div->reg); } else { div_gate->cached_val = value; } @@ -114,9 +114,9 @@ static int clk_divider_enable(struct clk_hw *hw) spin_lock_irqsave(div->lock, flags); /* restore div val */ - val = clk_readl(div->reg); + val = readl(div->reg); val |= div_gate->cached_val << div->shift; - clk_writel(val, div->reg); + writel(val, div->reg); spin_unlock_irqrestore(div->lock, flags); @@ -133,10 +133,10 @@ static void clk_divider_disable(struct clk_hw *hw) spin_lock_irqsave(div->lock, flags); /* store the current div val */ - val = clk_readl(div->reg) >> div->shift; + val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); div_gate->cached_val = val; - clk_writel(0, div->reg); + writel(0, div->reg); spin_unlock_irqrestore(div->lock, flags); } @@ -146,7 +146,7 @@ static int clk_divider_is_enabled(struct clk_hw *hw) struct clk_divider *div = to_clk_divider(hw); u32 val; - val = clk_readl(div->reg) >> div->shift; + val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); return val ? 1 : 0; @@ -206,7 +206,7 @@ struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name, div_gate->divider.hw.init = &init; div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags; /* cache gate status */ - val = clk_readl(reg) >> shift; + val = readl(reg) >> shift; val &= clk_div_mask(width); div_gate->cached_val = val; diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx5.c index e91c826bce70..c85ebd74a8a5 100644 --- a/drivers/clk/imx/clk-imx51-imx53.c +++ b/drivers/clk/imx/clk-imx5.c @@ -164,10 +164,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base) clk[IMX5_CLK_CKIH1] = imx_obtain_fixed_clock("ckih1", 0); clk[IMX5_CLK_CKIH2] = imx_obtain_fixed_clock("ckih2", 0); - clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2, - periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); - clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, - main_bus_sel, ARRAY_SIZE(main_bus_sel)); clk[IMX5_CLK_PER_LP_APM] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1, per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel)); clk[IMX5_CLK_PER_PRED1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2); @@ -191,16 +187,10 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base) clk[IMX5_CLK_UART_PRED] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3); clk[IMX5_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3); - clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2, - standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); - clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2, - standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); clk[IMX5_CLK_ESDHC_A_PRED] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3); clk[IMX5_CLK_ESDHC_A_PODF] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3); clk[IMX5_CLK_ESDHC_B_PRED] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3); clk[IMX5_CLK_ESDHC_B_PODF] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3); - clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel)); - clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel)); clk[IMX5_CLK_EMI_SEL] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1, emi_slow_sel, ARRAY_SIZE(emi_slow_sel)); @@ -311,10 +301,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base) clk_register_clkdev(clk[IMX5_CLK_CPU_PODF], NULL, "cpu0"); clk_register_clkdev(clk[IMX5_CLK_GPC_DVFS], "gpc_dvfs", NULL); - /* Set SDHC parents to be PLL2 */ - clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]); - clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]); - /* move usb phy clk to 24MHz */ clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]); } @@ -342,8 +328,21 @@ static void __init mx50_clocks_init(struct device_node *np) mx5_clocks_common_init(ccm_base); + /* + * This clock is called periph_clk in the i.MX50 Reference Manual, but + * it comes closest in scope to the main_bus_clk of i.MX51 and i.MX53 + */ + clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1, lp_apm_sel, ARRAY_SIZE(lp_apm_sel)); + clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 21, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 20, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel)); + clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel)); clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2); clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6); clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10); @@ -372,6 +371,10 @@ static void __init mx50_clocks_init(struct device_node *np) clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + /* Set SDHC parents to be PLL2 */ + clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]); + clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]); + /* set SDHC root clock to 200MHZ*/ clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000); clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000); @@ -410,6 +413,10 @@ static void __init mx51_clocks_init(struct device_node *np) mx5_clocks_common_init(ccm_base); + clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2, + periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); + clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, + main_bus_sel, ARRAY_SIZE(main_bus_sel)); clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1, lp_apm_sel, ARRAY_SIZE(lp_apm_sel)); clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux_flags("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3, @@ -422,6 +429,12 @@ static void __init mx51_clocks_init(struct device_node *np) mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel)); clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30); clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3); + clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel)); + clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel)); clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2); clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6); clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10); @@ -452,6 +465,10 @@ static void __init mx51_clocks_init(struct device_node *np) /* set the usboh3 parent to pll2_sw */ clk_set_parent(clk[IMX5_CLK_USBOH3_SEL], clk[IMX5_CLK_PLL2_SW]); + /* Set SDHC parents to be PLL2 */ + clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]); + clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]); + /* set SDHC root clock to 166.25MHZ*/ clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 166250000); clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 166250000); @@ -506,6 +523,10 @@ static void __init mx53_clocks_init(struct device_node *np) mx5_clocks_common_init(ccm_base); + clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2, + periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); + clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, + main_bus_sel, ARRAY_SIZE(main_bus_sel)); clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1, lp_apm_sel, ARRAY_SIZE(lp_apm_sel)); clk[IMX5_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); @@ -527,6 +548,12 @@ static void __init mx53_clocks_init(struct device_node *np) mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT); clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30); clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3); + clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2, + standard_pll_sel, ARRAY_SIZE(standard_pll_sel)); + clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel)); + clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel)); clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2); clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6); clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10); @@ -589,6 +616,10 @@ static void __init mx53_clocks_init(struct device_node *np) clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + /* Set SDHC parents to be PLL2 */ + clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]); + clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]); + /* set SDHC root clock to 200MHZ*/ clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000); clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000); diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c index 3bd2044cf25c..7eea448cb9a9 100644 --- a/drivers/clk/imx/clk-imx6sll.c +++ b/drivers/clk/imx/clk-imx6sll.c @@ -76,6 +76,20 @@ static u32 share_count_ssi1; static u32 share_count_ssi2; static u32 share_count_ssi3; +static struct clk ** const uart_clks[] __initconst = { + &clks[IMX6SLL_CLK_UART1_IPG], + &clks[IMX6SLL_CLK_UART1_SERIAL], + &clks[IMX6SLL_CLK_UART2_IPG], + &clks[IMX6SLL_CLK_UART2_SERIAL], + &clks[IMX6SLL_CLK_UART3_IPG], + &clks[IMX6SLL_CLK_UART3_SERIAL], + &clks[IMX6SLL_CLK_UART4_IPG], + &clks[IMX6SLL_CLK_UART4_SERIAL], + &clks[IMX6SLL_CLK_UART5_IPG], + &clks[IMX6SLL_CLK_UART5_SERIAL], + NULL +}; + static void __init imx6sll_clocks_init(struct device_node *ccm_node) { struct device_node *np; @@ -268,7 +282,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node) clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20); clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22); clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24); - clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24); + clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26); clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30); @@ -334,6 +348,8 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node) clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + imx_register_uart_clocks(uart_clks); + /* Lower the AHB clock rate before changing the clock source. */ clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index cfbd8d4edb85..5b8a0c729f90 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -417,8 +417,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f); clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1); clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0); - clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f); - clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "osc", base + 0x130, 0x7f); + clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f); + clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f); clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT); clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index ce306631e844..66682100f14c 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -151,7 +151,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np) clks[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30); clks[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30); clks[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30); - clks[IMX7ULP_CLK_SNVS] = imx_clk_hw_gate("snvs", "nic1_bus_clk", base + 0x8c, 30); clks[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30); clks[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94); clks[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index a9b3888aef0c..daf1841b2adb 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -458,6 +458,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00); clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80); clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200); + clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6); clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00); clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80); clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00); diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c index 7e9134b205ab..fb567dcc2118 100644 --- a/drivers/clk/imx/clk-pfdv2.c +++ b/drivers/clk/imx/clk-pfdv2.c @@ -43,7 +43,7 @@ static int clk_pfdv2_wait(struct clk_pfdv2 *pfd) { u32 val; - return readl_poll_timeout(pfd->reg, val, val & pfd->vld_bit, + return readl_poll_timeout(pfd->reg, val, val & (1 << pfd->vld_bit), 0, LOCK_TIMEOUT_US); } @@ -55,7 +55,7 @@ static int clk_pfdv2_enable(struct clk_hw *hw) spin_lock_irqsave(&pfd_lock, flags); val = readl_relaxed(pfd->reg); - val &= ~pfd->gate_bit; + val &= ~(1 << pfd->gate_bit); writel_relaxed(val, pfd->reg); spin_unlock_irqrestore(&pfd_lock, flags); @@ -70,7 +70,7 @@ static void clk_pfdv2_disable(struct clk_hw *hw) spin_lock_irqsave(&pfd_lock, flags); val = readl_relaxed(pfd->reg); - val |= pfd->gate_bit; + val |= (1 << pfd->gate_bit); writel_relaxed(val, pfd->reg); spin_unlock_irqrestore(&pfd_lock, flags); } @@ -123,7 +123,7 @@ static int clk_pfdv2_is_enabled(struct clk_hw *hw) { struct clk_pfdv2 *pfd = to_clk_pfdv2(hw); - if (readl_relaxed(pfd->reg) & pfd->gate_bit) + if (readl_relaxed(pfd->reg) & (1 << pfd->gate_bit)) return 0; return 1; @@ -180,7 +180,7 @@ struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name, return ERR_PTR(-ENOMEM); pfd->reg = reg; - pfd->gate_bit = 1 << ((idx + 1) * 8 - 1); + pfd->gate_bit = (idx + 1) * 8 - 1; pfd->vld_bit = pfd->gate_bit - 1; pfd->frac_off = idx * 8; diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 1acfa3e3cfb4..b7213023b238 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -74,10 +74,9 @@ static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pll14xx *pll = to_clk_pll14xx(hw); - u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div; + u32 mdiv, pdiv, sdiv, pll_div; u64 fvco = parent_rate; - pll_gnrl = readl_relaxed(pll->base); pll_div = readl_relaxed(pll->base + 4); mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT; pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT; @@ -93,11 +92,10 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pll14xx *pll = to_clk_pll14xx(hw); - u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1; + u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1; short int kdiv; u64 fvco = parent_rate; - pll_gnrl = readl_relaxed(pll->base); pll_div_ctl0 = readl_relaxed(pll->base + 4); pll_div_ctl1 = readl_relaxed(pll->base + 8); mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT; @@ -362,7 +360,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, switch (pll_clk->type) { case PLL_1416X: - if (!pll->rate_table) + if (!pll_clk->rate_table) init.ops = &clk_pll1416x_min_ops; else init.ops = &clk_pll1416x_ops; diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 9af62ee8f347..4110e713d259 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -20,6 +20,8 @@ #define PLL_NUM_OFFSET 0x10 #define PLL_DENOM_OFFSET 0x20 +#define PLL_IMX7_NUM_OFFSET 0x20 +#define PLL_IMX7_DENOM_OFFSET 0x30 #define PLL_VF610_NUM_OFFSET 0x20 #define PLL_VF610_DENOM_OFFSET 0x30 @@ -49,6 +51,8 @@ struct clk_pllv3 { u32 div_mask; u32 div_shift; unsigned long ref_clock; + u32 num_offset; + u32 denom_offset; }; #define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw) @@ -219,8 +223,8 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pllv3 *pll = to_clk_pllv3(hw); - u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET); - u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET); + u32 mfn = readl_relaxed(pll->base + pll->num_offset); + u32 mfd = readl_relaxed(pll->base + pll->denom_offset); u32 div = readl_relaxed(pll->base) & pll->div_mask; u64 temp64 = (u64)parent_rate; @@ -289,8 +293,8 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, val &= ~pll->div_mask; val |= div; writel_relaxed(val, pll->base); - writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET); - writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET); + writel_relaxed(mfn, pll->base + pll->num_offset); + writel_relaxed(mfd, pll->base + pll->denom_offset); return clk_pllv3_wait_lock(pll); } @@ -352,8 +356,8 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw, struct clk_pllv3 *pll = to_clk_pllv3(hw); struct clk_pllv3_vf610_mf mf; - mf.mfn = readl_relaxed(pll->base + PLL_VF610_NUM_OFFSET); - mf.mfd = readl_relaxed(pll->base + PLL_VF610_DENOM_OFFSET); + mf.mfn = readl_relaxed(pll->base + pll->num_offset); + mf.mfd = readl_relaxed(pll->base + pll->denom_offset); mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20; return clk_pllv3_vf610_mf_to_rate(parent_rate, mf); @@ -382,8 +386,8 @@ static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate, val |= pll->div_mask; /* set bit for mfi=22 */ writel_relaxed(val, pll->base); - writel_relaxed(mf.mfn, pll->base + PLL_VF610_NUM_OFFSET); - writel_relaxed(mf.mfd, pll->base + PLL_VF610_DENOM_OFFSET); + writel_relaxed(mf.mfn, pll->base + pll->num_offset); + writel_relaxed(mf.mfd, pll->base + pll->denom_offset); return clk_pllv3_wait_lock(pll); } @@ -426,6 +430,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, return ERR_PTR(-ENOMEM); pll->power_bit = BM_PLL_POWER; + pll->num_offset = PLL_NUM_OFFSET; + pll->denom_offset = PLL_DENOM_OFFSET; switch (type) { case IMX_PLLV3_SYS: @@ -433,13 +439,20 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, break; case IMX_PLLV3_SYS_VF610: ops = &clk_pllv3_vf610_ops; + pll->num_offset = PLL_VF610_NUM_OFFSET; + pll->denom_offset = PLL_VF610_DENOM_OFFSET; break; case IMX_PLLV3_USB_VF610: pll->div_shift = 1; + /* fall through */ case IMX_PLLV3_USB: ops = &clk_pllv3_ops; pll->powerup_set = true; break; + case IMX_PLLV3_AV_IMX7: + pll->num_offset = PLL_IMX7_NUM_OFFSET; + pll->denom_offset = PLL_IMX7_DENOM_OFFSET; + /* fall through */ case IMX_PLLV3_AV: ops = &clk_pllv3_av_ops; break; @@ -454,6 +467,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, break; case IMX_PLLV3_DDR_IMX7: pll->power_bit = IMX7_DDR_PLL_POWER; + pll->num_offset = PLL_IMX7_NUM_OFFSET; + pll->denom_offset = PLL_IMX7_DENOM_OFFSET; ops = &clk_pllv3_av_ops; break; default: diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c index d38bc9f87c1d..d7e62c3620d3 100644 --- a/drivers/clk/imx/clk-pllv4.c +++ b/drivers/clk/imx/clk-pllv4.c @@ -30,6 +30,9 @@ /* PLL Denominator Register (xPLLDENOM) */ #define PLL_DENOM_OFFSET 0x14 +#define MAX_MFD 0x3fffffff +#define DEFAULT_MFD 1000000 + struct clk_pllv4 { struct clk_hw hw; void __iomem *base; @@ -64,13 +67,20 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pllv4 *pll = to_clk_pllv4(hw); - u32 div; + u32 mult, mfn, mfd; + u64 temp64; + + mult = readl_relaxed(pll->base + PLL_CFG_OFFSET); + mult &= BM_PLL_MULT; + mult >>= BP_PLL_MULT; - div = readl_relaxed(pll->base + PLL_CFG_OFFSET); - div &= BM_PLL_MULT; - div >>= BP_PLL_MULT; + mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET); + mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET); + temp64 = parent_rate; + temp64 *= mfn; + do_div(temp64, mfd); - return parent_rate * div; + return (parent_rate * mult) + (u32)temp64; } static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate, @@ -78,14 +88,46 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate, { unsigned long parent_rate = *prate; unsigned long round_rate, i; + u32 mfn, mfd = DEFAULT_MFD; + bool found = false; + u64 temp64; for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) { round_rate = parent_rate * pllv4_mult_table[i]; - if (rate >= round_rate) - return round_rate; + if (rate >= round_rate) { + found = true; + break; + } + } + + if (!found) { + pr_warn("%s: unable to round rate %lu, parent rate %lu\n", + clk_hw_get_name(hw), rate, parent_rate); + return 0; } - return round_rate; + if (parent_rate <= MAX_MFD) + mfd = parent_rate; + + temp64 = (u64)(rate - round_rate); + temp64 *= mfd; + do_div(temp64, parent_rate); + mfn = temp64; + + /* + * NOTE: The value of numerator must always be configured to be + * less than the value of the denominator. If we can't get a proper + * pair of mfn/mfd, we simply return the round_rate without using + * the frac part. + */ + if (mfn >= mfd) + return round_rate; + + temp64 = (u64)parent_rate; + temp64 *= mfn; + do_div(temp64, mfd); + + return round_rate + (u32)temp64; } static bool clk_pllv4_is_valid_mult(unsigned int mult) @@ -105,18 +147,30 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_pllv4 *pll = to_clk_pllv4(hw); - u32 val, mult; + u32 val, mult, mfn, mfd = DEFAULT_MFD; + u64 temp64; mult = rate / parent_rate; if (!clk_pllv4_is_valid_mult(mult)) return -EINVAL; + if (parent_rate <= MAX_MFD) + mfd = parent_rate; + + temp64 = (u64)(rate - mult * parent_rate); + temp64 *= mfd; + do_div(temp64, parent_rate); + mfn = temp64; + val = readl_relaxed(pll->base + PLL_CFG_OFFSET); val &= ~BM_PLL_MULT; val |= mult << BP_PLL_MULT; writel_relaxed(val, pll->base + PLL_CFG_OFFSET); + writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET); + writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET); + return 0; } diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c index 9dfd03a95557..991bbe63f156 100644 --- a/drivers/clk/imx/clk-sccg-pll.c +++ b/drivers/clk/imx/clk-sccg-pll.c @@ -348,7 +348,7 @@ static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw, temp64 = parent_rate; - val = clk_readl(pll->base + PLL_CFG0); + val = readl(pll->base + PLL_CFG0); if (val & SSCG_PLL_BYPASS2_MASK) { temp64 = parent_rate; } else if (val & SSCG_PLL_BYPASS1_MASK) { @@ -371,10 +371,10 @@ static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; /* set bypass here too since the parent might be the same */ - val = clk_readl(pll->base + PLL_CFG0); + val = readl(pll->base + PLL_CFG0); val &= ~SSCG_PLL_BYPASS_MASK; val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass); - clk_writel(val, pll->base + PLL_CFG0); + writel(val, pll->base + PLL_CFG0); val = readl_relaxed(pll->base + PLL_CFG2); val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK); @@ -395,7 +395,7 @@ static u8 clk_sccg_pll_get_parent(struct clk_hw *hw) u32 val; u8 ret = pll->parent; - val = clk_readl(pll->base + PLL_CFG0); + val = readl(pll->base + PLL_CFG0); if (val & SSCG_PLL_BYPASS2_MASK) ret = pll->bypass2; else if (val & SSCG_PLL_BYPASS1_MASK) @@ -408,10 +408,10 @@ static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index) struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); u32 val; - val = clk_readl(pll->base + PLL_CFG0); + val = readl(pll->base + PLL_CFG0); val &= ~SSCG_PLL_BYPASS_MASK; val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass); - clk_writel(val, pll->base + PLL_CFG0); + writel(val, pll->base + PLL_CFG0); return clk_sccg_pll_wait_lock(pll); } diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 5748ec8673e4..8639a8f2153e 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -77,6 +77,7 @@ enum imx_pllv3_type { IMX_PLLV3_ENET_IMX7, IMX_PLLV3_SYS_VF610, IMX_PLLV3_DDR_IMX7, + IMX_PLLV3_AV_IMX7, }; struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, @@ -138,11 +139,6 @@ static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate) return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate); } -static inline struct clk_hw *imx_get_clk_hw_fixed(const char *name, int rate) -{ - return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate); -} - static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents) diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 584ff4ff81c7..8901ea0295b7 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -205,6 +205,12 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { .parents = { JZ4725B_CLK_EXT512, JZ4725B_CLK_OSC32K, -1, -1 }, .mux = { CGU_REG_OPCR, 2, 1}, }, + + [JZ4725B_CLK_UDC_PHY] = { + "udc_phy", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .gate = { CGU_REG_OPCR, 6, true }, + }, }; static void __init jz4725b_cgu_init(struct device_node *np) diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index 53edade25a1d..4d8a9aef95f6 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -216,4 +216,87 @@ config COMMON_CLK_MT8173 default ARCH_MEDIATEK ---help--- This driver supports MediaTek MT8173 clocks. + +config COMMON_CLK_MT8183 + bool "Clock driver for MediaTek MT8183" + depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK && ARM64 + help + This driver supports MediaTek MT8183 basic clocks. + +config COMMON_CLK_MT8183_AUDIOSYS + bool "Clock driver for MediaTek MT8183 audiosys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 audiosys clocks. + +config COMMON_CLK_MT8183_CAMSYS + bool "Clock driver for MediaTek MT8183 camsys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 camsys clocks. + +config COMMON_CLK_MT8183_IMGSYS + bool "Clock driver for MediaTek MT8183 imgsys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 imgsys clocks. + +config COMMON_CLK_MT8183_IPU_CORE0 + bool "Clock driver for MediaTek MT8183 ipu_core0" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 ipu_core0 clocks. + +config COMMON_CLK_MT8183_IPU_CORE1 + bool "Clock driver for MediaTek MT8183 ipu_core1" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 ipu_core1 clocks. + +config COMMON_CLK_MT8183_IPU_ADL + bool "Clock driver for MediaTek MT8183 ipu_adl" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 ipu_adl clocks. + +config COMMON_CLK_MT8183_IPU_CONN + bool "Clock driver for MediaTek MT8183 ipu_conn" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 ipu_conn clocks. + +config COMMON_CLK_MT8183_MFGCFG + bool "Clock driver for MediaTek MT8183 mfgcfg" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 mfgcfg clocks. + +config COMMON_CLK_MT8183_MMSYS + bool "Clock driver for MediaTek MT8183 mmsys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 mmsys clocks. + +config COMMON_CLK_MT8183_VDECSYS + bool "Clock driver for MediaTek MT8183 vdecsys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 vdecsys clocks. + +config COMMON_CLK_MT8183_VENCSYS + bool "Clock driver for MediaTek MT8183 vencsys" + depends on COMMON_CLK_MT8183 + help + This driver supports MediaTek MT8183 vencsys clocks. + +config COMMON_CLK_MT8516 + bool "Clock driver for MediaTek MT8516" + depends on ARCH_MEDIATEK || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK + help + This driver supports MediaTek MT8516 clocks. + endmenu diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index ee4410ff43ab..f74937b35f68 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o +obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o + obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o @@ -31,3 +32,16 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o +obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o +obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o +obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o +obj-$(CONFIG_COMMON_CLK_MT8183_IMGSYS) += clk-mt8183-img.o +obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE0) += clk-mt8183-ipu0.o +obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE1) += clk-mt8183-ipu1.o +obj-$(CONFIG_COMMON_CLK_MT8183_IPU_ADL) += clk-mt8183-ipu_adl.o +obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CONN) += clk-mt8183-ipu_conn.o +obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o +obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o +obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o +obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o +obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index 9628d4e7690b..85daf826619a 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c @@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate( return ERR_PTR(-ENOMEM); init.name = name; - init.flags = CLK_SET_RATE_PARENT; + init.flags = flags | CLK_SET_RATE_PARENT; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; init.ops = ops; - init.flags = flags; cg->regmap = regmap; cg->set_ofs = set_ofs; diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h index 9f766dfe1d57..ab240163f9f8 100644 --- a/drivers/clk/mediatek/clk-gate.h +++ b/drivers/clk/mediatek/clk-gate.h @@ -50,4 +50,18 @@ struct clk *mtk_clk_register_gate( const struct clk_ops *ops, unsigned long flags); +#define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, \ + _ops, _flags) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = _regs, \ + .shift = _shift, \ + .ops = _ops, \ + .flags = _flags, \ + } + +#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops) \ + GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0) + #endif /* __DRV_CLK_GATE_H */ diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c new file mode 100644 index 000000000000..c87450180b7b --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-audio.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs audio0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs audio1_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x4, + .sta_ofs = 0x4, +}; + +#define GATE_AUDIO0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr) + +#define GATE_AUDIO1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr) + +static const struct mtk_gate audio_clks[] = { + /* AUDIO0 */ + GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_sel", + 2), + GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_eng1_sel", + 8), + GATE_AUDIO0(CLK_AUDIO_24M, "aud_24m", "aud_eng2_sel", + 9), + GATE_AUDIO0(CLK_AUDIO_APLL2_TUNER, "aud_apll2_tuner", "aud_eng2_sel", + 18), + GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner", "aud_eng1_sel", + 19), + GATE_AUDIO0(CLK_AUDIO_TDM, "aud_tdm", "apll12_divb", + 20), + GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_sel", + 24), + GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_sel", + 25), + GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis", "audio_sel", + 26), + GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_sel", + 27), + /* AUDIO1 */ + GATE_AUDIO1(CLK_AUDIO_I2S1, "aud_i2s1", "audio_sel", + 4), + GATE_AUDIO1(CLK_AUDIO_I2S2, "aud_i2s2", "audio_sel", + 5), + GATE_AUDIO1(CLK_AUDIO_I2S3, "aud_i2s3", "audio_sel", + 6), + GATE_AUDIO1(CLK_AUDIO_I2S4, "aud_i2s4", "audio_sel", + 7), + GATE_AUDIO1(CLK_AUDIO_PDN_ADDA6_ADC, "aud_pdn_adda6_adc", "audio_sel", + 20), +}; + +static int clk_mt8183_audio_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + int r; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK); + + mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + return r; + + r = devm_of_platform_populate(&pdev->dev); + if (r) + of_clk_del_provider(node); + + return r; +} + +static const struct of_device_id of_match_clk_mt8183_audio[] = { + { .compatible = "mediatek,mt8183-audiosys", }, + {} +}; + +static struct platform_driver clk_mt8183_audio_drv = { + .probe = clk_mt8183_audio_probe, + .driver = { + .name = "clk-mt8183-audio", + .of_match_table = of_match_clk_mt8183_audio, + }, +}; + +builtin_platform_driver(clk_mt8183_audio_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c new file mode 100644 index 000000000000..8643802c4471 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-cam.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs cam_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_CAM(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate cam_clks[] = { + GATE_CAM(CLK_CAM_LARB6, "cam_larb6", "cam_sel", 0), + GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "cam_sel", 1), + GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "cam_sel", 2), + GATE_CAM(CLK_CAM_CAM, "cam_cam", "cam_sel", 6), + GATE_CAM(CLK_CAM_CAMTG, "cam_camtg", "cam_sel", 7), + GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "cam_sel", 8), + GATE_CAM(CLK_CAM_CAMSV0, "cam_camsv0", "cam_sel", 9), + GATE_CAM(CLK_CAM_CAMSV1, "cam_camsv1", "cam_sel", 10), + GATE_CAM(CLK_CAM_CAMSV2, "cam_camsv2", "cam_sel", 11), + GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12), +}; + +static int clk_mt8183_cam_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK); + + mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_cam[] = { + { .compatible = "mediatek,mt8183-camsys", }, + {} +}; + +static struct platform_driver clk_mt8183_cam_drv = { + .probe = clk_mt8183_cam_probe, + .driver = { + .name = "clk-mt8183-cam", + .of_match_table = of_match_clk_mt8183_cam, + }, +}; + +builtin_platform_driver(clk_mt8183_cam_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c new file mode 100644 index 000000000000..470d676a4a10 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-img.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs img_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IMG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate img_clks[] = { + GATE_IMG(CLK_IMG_LARB5, "img_larb5", "img_sel", 0), + GATE_IMG(CLK_IMG_LARB2, "img_larb2", "img_sel", 1), + GATE_IMG(CLK_IMG_DIP, "img_dip", "img_sel", 2), + GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "img_sel", 3), + GATE_IMG(CLK_IMG_DPE, "img_dpe", "img_sel", 4), + GATE_IMG(CLK_IMG_RSC, "img_rsc", "img_sel", 5), + GATE_IMG(CLK_IMG_MFB, "img_mfb", "img_sel", 6), + GATE_IMG(CLK_IMG_WPE_A, "img_wpe_a", "img_sel", 7), + GATE_IMG(CLK_IMG_WPE_B, "img_wpe_b", "img_sel", 8), + GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9), +}; + +static int clk_mt8183_img_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); + + mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_img[] = { + { .compatible = "mediatek,mt8183-imgsys", }, + {} +}; + +static struct platform_driver clk_mt8183_img_drv = { + .probe = clk_mt8183_img_probe, + .driver = { + .name = "clk-mt8183-img", + .of_match_table = of_match_clk_mt8183_img, + }, +}; + +builtin_platform_driver(clk_mt8183_img_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c new file mode 100644 index 000000000000..c5cb76fc9e5e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs ipu_core0_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IPU_CORE0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_core0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate ipu_core0_clks[] = { + GATE_IPU_CORE0(CLK_IPU_CORE0_JTAG, "ipu_core0_jtag", "dsp_sel", 0), + GATE_IPU_CORE0(CLK_IPU_CORE0_AXI, "ipu_core0_axi", "dsp_sel", 1), + GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2), +}; + +static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK); + + mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = { + { .compatible = "mediatek,mt8183-ipu_core0", }, + {} +}; + +static struct platform_driver clk_mt8183_ipu_core0_drv = { + .probe = clk_mt8183_ipu_core0_probe, + .driver = { + .name = "clk-mt8183-ipu_core0", + .of_match_table = of_match_clk_mt8183_ipu_core0, + }, +}; + +builtin_platform_driver(clk_mt8183_ipu_core0_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c new file mode 100644 index 000000000000..8fd5fe002890 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs ipu_core1_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IPU_CORE1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_core1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate ipu_core1_clks[] = { + GATE_IPU_CORE1(CLK_IPU_CORE1_JTAG, "ipu_core1_jtag", "dsp_sel", 0), + GATE_IPU_CORE1(CLK_IPU_CORE1_AXI, "ipu_core1_axi", "dsp_sel", 1), + GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2), +}; + +static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK); + + mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = { + { .compatible = "mediatek,mt8183-ipu_core1", }, + {} +}; + +static struct platform_driver clk_mt8183_ipu_core1_drv = { + .probe = clk_mt8183_ipu_core1_probe, + .driver = { + .name = "clk-mt8183-ipu_core1", + .of_match_table = of_match_clk_mt8183_ipu_core1, + }, +}; + +builtin_platform_driver(clk_mt8183_ipu_core1_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c new file mode 100644 index 000000000000..3f37d0ef1df1 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs ipu_adl_cg_regs = { + .set_ofs = 0x204, + .clr_ofs = 0x204, + .sta_ofs = 0x204, +}; + +#define GATE_IPU_ADL_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_adl_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +static const struct mtk_gate ipu_adl_clks[] = { + GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24), +}; + +static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK); + + mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = { + { .compatible = "mediatek,mt8183-ipu_adl", }, + {} +}; + +static struct platform_driver clk_mt8183_ipu_adl_drv = { + .probe = clk_mt8183_ipu_adl_probe, + .driver = { + .name = "clk-mt8183-ipu_adl", + .of_match_table = of_match_clk_mt8183_ipu_adl, + }, +}; + +builtin_platform_driver(clk_mt8183_ipu_adl_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c new file mode 100644 index 000000000000..7e0eef79c461 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs ipu_conn_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs ipu_conn_apb_cg_regs = { + .set_ofs = 0x10, + .clr_ofs = 0x10, + .sta_ofs = 0x10, +}; + +static const struct mtk_gate_regs ipu_conn_axi_cg_regs = { + .set_ofs = 0x18, + .clr_ofs = 0x18, + .sta_ofs = 0x18, +}; + +static const struct mtk_gate_regs ipu_conn_axi1_cg_regs = { + .set_ofs = 0x1c, + .clr_ofs = 0x1c, + .sta_ofs = 0x1c, +}; + +static const struct mtk_gate_regs ipu_conn_axi2_cg_regs = { + .set_ofs = 0x20, + .clr_ofs = 0x20, + .sta_ofs = 0x20, +}; + +#define GATE_IPU_CONN(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_conn_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +#define GATE_IPU_CONN_APB(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_conn_apb_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr) + +#define GATE_IPU_CONN_AXI_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_conn_axi_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +#define GATE_IPU_CONN_AXI1_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_conn_axi1_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +#define GATE_IPU_CONN_AXI2_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipu_conn_axi2_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +static const struct mtk_gate ipu_conn_clks[] = { + GATE_IPU_CONN(CLK_IPU_CONN_IPU, + "ipu_conn_ipu", "dsp_sel", 0), + GATE_IPU_CONN(CLK_IPU_CONN_AHB, + "ipu_conn_ahb", "dsp_sel", 1), + GATE_IPU_CONN(CLK_IPU_CONN_AXI, + "ipu_conn_axi", "dsp_sel", 2), + GATE_IPU_CONN(CLK_IPU_CONN_ISP, + "ipu_conn_isp", "dsp_sel", 3), + GATE_IPU_CONN(CLK_IPU_CONN_CAM_ADL, + "ipu_conn_cam_adl", "dsp_sel", 4), + GATE_IPU_CONN(CLK_IPU_CONN_IMG_ADL, + "ipu_conn_img_adl", "dsp_sel", 5), + GATE_IPU_CONN_APB(CLK_IPU_CONN_DAP_RX, + "ipu_conn_dap_rx", "dsp1_sel", 0), + GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AXI, + "ipu_conn_apb2axi", "dsp1_sel", 3), + GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AHB, + "ipu_conn_apb2ahb", "dsp1_sel", 20), + GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU_CAB1TO2, + "ipu_conn_ipu_cab1to2", "dsp1_sel", 6), + GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU1_CAB1TO2, + "ipu_conn_ipu1_cab1to2", "dsp1_sel", 13), + GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU2_CAB1TO2, + "ipu_conn_ipu2_cab1to2", "dsp1_sel", 20), + GATE_IPU_CONN_AXI1_I(CLK_IPU_CONN_CAB3TO3, + "ipu_conn_cab3to3", "dsp1_sel", 0), + GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB2TO1, + "ipu_conn_cab2to1", "dsp1_sel", 14), + GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB3TO1_SLICE, + "ipu_conn_cab3to1_slice", "dsp1_sel", 17), +}; + +static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK); + + mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = { + { .compatible = "mediatek,mt8183-ipu_conn", }, + {} +}; + +static struct platform_driver clk_mt8183_ipu_conn_drv = { + .probe = clk_mt8183_ipu_conn_probe, + .driver = { + .name = "clk-mt8183-ipu_conn", + .of_match_table = of_match_clk_mt8183_ipu_conn, + }, +}; + +builtin_platform_driver(clk_mt8183_ipu_conn_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c new file mode 100644 index 000000000000..99a6b020833e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs mfg_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_MFG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mfg_clks[] = { + GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0) +}; + +static int clk_mt8183_mfg_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK); + + mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_mfg[] = { + { .compatible = "mediatek,mt8183-mfgcfg", }, + {} +}; + +static struct platform_driver clk_mt8183_mfg_drv = { + .probe = clk_mt8183_mfg_probe, + .driver = { + .name = "clk-mt8183-mfg", + .of_match_table = of_match_clk_mt8183_mfg, + }, +}; + +builtin_platform_driver(clk_mt8183_mfg_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c new file mode 100644 index 000000000000..720c696b506d --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-mm.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs mm0_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x108, + .sta_ofs = 0x100, +}; + +static const struct mtk_gate_regs mm1_cg_regs = { + .set_ofs = 0x114, + .clr_ofs = 0x118, + .sta_ofs = 0x110, +}; + +#define GATE_MM0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +#define GATE_MM1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mm_clks[] = { + /* MM0 */ + GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0), + GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1), + GATE_MM0(CLK_MM_SMI_LARB1, "mm_smi_larb1", "mm_sel", 2), + GATE_MM0(CLK_MM_GALS_COMM0, "mm_gals_comm0", "mm_sel", 3), + GATE_MM0(CLK_MM_GALS_COMM1, "mm_gals_comm1", "mm_sel", 4), + GATE_MM0(CLK_MM_GALS_CCU2MM, "mm_gals_ccu2mm", "mm_sel", 5), + GATE_MM0(CLK_MM_GALS_IPU12MM, "mm_gals_ipu12mm", "mm_sel", 6), + GATE_MM0(CLK_MM_GALS_IMG2MM, "mm_gals_img2mm", "mm_sel", 7), + GATE_MM0(CLK_MM_GALS_CAM2MM, "mm_gals_cam2mm", "mm_sel", 8), + GATE_MM0(CLK_MM_GALS_IPU2MM, "mm_gals_ipu2mm", "mm_sel", 9), + GATE_MM0(CLK_MM_MDP_DL_TXCK, "mm_mdp_dl_txck", "mm_sel", 10), + GATE_MM0(CLK_MM_IPU_DL_TXCK, "mm_ipu_dl_txck", "mm_sel", 11), + GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 12), + GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 13), + GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 14), + GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 15), + GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 16), + GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 17), + GATE_MM0(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 18), + GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 19), + GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 20), + GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 21), + GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 22), + GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 23), + GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 24), + GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 25), + GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 26), + GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 27), + GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 28), + GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 29), + GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 30), + GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31), + /* MM1 */ + GATE_MM1(CLK_MM_DSI0_MM, "mm_dsi0_mm", "mm_sel", 0), + GATE_MM1(CLK_MM_DSI0_IF, "mm_dsi0_if", "mm_sel", 1), + GATE_MM1(CLK_MM_DPI_MM, "mm_dpi_mm", "mm_sel", 2), + GATE_MM1(CLK_MM_DPI_IF, "mm_dpi_if", "dpi0_sel", 3), + GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 4), + GATE_MM1(CLK_MM_MDP_DL_RX, "mm_mdp_dl_rx", "mm_sel", 5), + GATE_MM1(CLK_MM_IPU_DL_RX, "mm_ipu_dl_rx", "mm_sel", 6), + GATE_MM1(CLK_MM_26M, "mm_26m", "f_f26m_ck", 7), + GATE_MM1(CLK_MM_MMSYS_R2Y, "mm_mmsys_r2y", "mm_sel", 8), + GATE_MM1(CLK_MM_DISP_RSZ, "mm_disp_rsz", "mm_sel", 9), + GATE_MM1(CLK_MM_MDP_AAL, "mm_mdp_aal", "mm_sel", 10), + GATE_MM1(CLK_MM_MDP_CCORR, "mm_mdp_ccorr", "mm_sel", 11), + GATE_MM1(CLK_MM_DBI_MM, "mm_dbi_mm", "mm_sel", 12), + GATE_MM1(CLK_MM_DBI_IF, "mm_dbi_if", "dpi0_sel", 13), +}; + +static int clk_mt8183_mm_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); + + mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_mm[] = { + { .compatible = "mediatek,mt8183-mmsys", }, + {} +}; + +static struct platform_driver clk_mt8183_mm_drv = { + .probe = clk_mt8183_mm_probe, + .driver = { + .name = "clk-mt8183-mm", + .of_match_table = of_match_clk_mt8183_mm, + }, +}; + +builtin_platform_driver(clk_mt8183_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c new file mode 100644 index 000000000000..6250fd1e0edc --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-vdec.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs vdec0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x4, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs vdec1_cg_regs = { + .set_ofs = 0x8, + .clr_ofs = 0xc, + .sta_ofs = 0x8, +}; + +#define GATE_VDEC0_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) + +#define GATE_VDEC1_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate vdec_clks[] = { + /* VDEC0 */ + GATE_VDEC0_I(CLK_VDEC_VDEC, "vdec_vdec", "mm_sel", 0), + /* VDEC1 */ + GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0), +}; + +static int clk_mt8183_vdec_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK); + + mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_vdec[] = { + { .compatible = "mediatek,mt8183-vdecsys", }, + {} +}; + +static struct platform_driver clk_mt8183_vdec_drv = { + .probe = clk_mt8183_vdec_probe, + .driver = { + .name = "clk-mt8183-vdec", + .of_match_table = of_match_clk_mt8183_vdec, + }, +}; + +builtin_platform_driver(clk_mt8183_vdec_drv); diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c new file mode 100644 index 000000000000..6678ef03fab2 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183-venc.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static const struct mtk_gate_regs venc_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_VENC_I(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate venc_clks[] = { + GATE_VENC_I(CLK_VENC_LARB, "venc_larb", + "mm_sel", 0), + GATE_VENC_I(CLK_VENC_VENC, "venc_venc", + "mm_sel", 4), + GATE_VENC_I(CLK_VENC_JPGENC, "venc_jpgenc", + "mm_sel", 8), +}; + +static int clk_mt8183_venc_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK); + + mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183_venc[] = { + { .compatible = "mediatek,mt8183-vencsys", }, + {} +}; + +static struct platform_driver clk_mt8183_venc_drv = { + .probe = clk_mt8183_venc_probe, + .driver = { + .name = "clk-mt8183-venc", + .of_match_table = of_match_clk_mt8183_venc, + }, +}; + +builtin_platform_driver(clk_mt8183_venc_drv); diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c new file mode 100644 index 000000000000..9d8651033ae9 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -0,0 +1,1284 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2018 MediaTek Inc. +// Author: Weiyi Lu <weiyi.lu@mediatek.com> + +#include <linux/delay.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "clk-mtk.h" +#include "clk-mux.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8183-clk.h> + +static DEFINE_SPINLOCK(mt8183_clk_lock); + +static const struct mtk_fixed_clk top_fixed_clks[] = { + FIXED_CLK(CLK_TOP_CLK26M, "f_f26m_ck", "clk26m", 26000000), + FIXED_CLK(CLK_TOP_ULPOSC, "osc", NULL, 250000), + FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), +}; + +static const struct mtk_fixed_factor top_divs[] = { + FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, + 2), + FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, + 1), + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_D2_D2, "syspll_d2_d2", "syspll_d2", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_D2_D4, "syspll_d2_d4", "syspll_d2", 1, + 4), + FACTOR(CLK_TOP_SYSPLL_D2_D8, "syspll_d2_d8", "syspll_d2", 1, + 8), + FACTOR(CLK_TOP_SYSPLL_D2_D16, "syspll_d2_d16", "syspll_d2", 1, + 16), + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, + 3), + FACTOR(CLK_TOP_SYSPLL_D3_D2, "syspll_d3_d2", "syspll_d3", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_D3_D4, "syspll_d3_d4", "syspll_d3", 1, + 4), + FACTOR(CLK_TOP_SYSPLL_D3_D8, "syspll_d3_d8", "syspll_d3", 1, + 8), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, + 5), + FACTOR(CLK_TOP_SYSPLL_D5_D2, "syspll_d5_d2", "syspll_d5", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_D5_D4, "syspll_d5_d4", "syspll_d5", 1, + 4), + FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, + 7), + FACTOR(CLK_TOP_SYSPLL_D7_D2, "syspll_d7_d2", "syspll_d7", 1, + 2), + FACTOR(CLK_TOP_SYSPLL_D7_D4, "syspll_d7_d4", "syspll_d7", 1, + 4), + FACTOR(CLK_TOP_UNIVPLL_CK, "univpll_ck", "univpll", 1, + 1), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1, + 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1, + 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1, + 4), + FACTOR(CLK_TOP_UNIVPLL_D2_D8, "univpll_d2_d8", "univpll_d2", 1, + 8), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, + 3), + FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1, + 2), + FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1, + 4), + FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1, + 8), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, + 5), + FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, + 2), + FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, + 4), + FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, + 8), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, + 7), + FACTOR(CLK_TOP_UNIVP_192M_CK, "univ_192m_ck", "univpll_192m", 1, + 1), + FACTOR(CLK_TOP_UNIVP_192M_D2, "univ_192m_d2", "univ_192m_ck", 1, + 2), + FACTOR(CLK_TOP_UNIVP_192M_D4, "univ_192m_d4", "univ_192m_ck", 1, + 4), + FACTOR(CLK_TOP_UNIVP_192M_D8, "univ_192m_d8", "univ_192m_ck", 1, + 8), + FACTOR(CLK_TOP_UNIVP_192M_D16, "univ_192m_d16", "univ_192m_ck", 1, + 16), + FACTOR(CLK_TOP_UNIVP_192M_D32, "univ_192m_d32", "univ_192m_ck", 1, + 32), + FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1, + 1), + FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1, + 2), + FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, + 4), + FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1, + 8), + FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1, + 1), + FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, + 2), + FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, + 4), + FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1, + 8), + FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1, + 1), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, + 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, + 4), + FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1, + 8), + FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1, + 16), + FACTOR(CLK_TOP_MMPLL_CK, "mmpll_ck", "mmpll", 1, + 1), + FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, + 4), + FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, + 2), + FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1, + 4), + FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, + 5), + FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, + 2), + FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1, + 4), + FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, + 6), + FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, + 7), + FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1, + 1), + FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1, + 1), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, + 2), + FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, + 4), + FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, + 8), + FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, + 16), + FACTOR(CLK_TOP_AD_OSC_CK, "ad_osc_ck", "osc", 1, + 1), + FACTOR(CLK_TOP_OSC_D2, "osc_d2", "osc", 1, + 2), + FACTOR(CLK_TOP_OSC_D4, "osc_d4", "osc", 1, + 4), + FACTOR(CLK_TOP_OSC_D8, "osc_d8", "osc", 1, + 8), + FACTOR(CLK_TOP_OSC_D16, "osc_d16", "osc", 1, + 16), + FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, + 2), + FACTOR(CLK_TOP_UNIVPLL_D3_D16, "univpll_d3_d16", "univpll_d3", 1, + 16), +}; + +static const char * const axi_parents[] = { + "clk26m", + "syspll_d2_d4", + "syspll_d7", + "osc_d4" +}; + +static const char * const mm_parents[] = { + "clk26m", + "mmpll_d7", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "syspll_d3_d2" +}; + +static const char * const img_parents[] = { + "clk26m", + "mmpll_d6", + "univpll_d3", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "univpll_d3_d2", + "syspll_d3_d2" +}; + +static const char * const cam_parents[] = { + "clk26m", + "syspll_d2", + "mmpll_d6", + "syspll_d3", + "mmpll_d7", + "univpll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "syspll_d3_d2", + "univpll_d3_d2" +}; + +static const char * const dsp_parents[] = { + "clk26m", + "mmpll_d6", + "mmpll_d7", + "univpll_d3", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "univpll_d3_d2", + "syspll_d3_d2" +}; + +static const char * const dsp1_parents[] = { + "clk26m", + "mmpll_d6", + "mmpll_d7", + "univpll_d3", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "univpll_d3_d2", + "syspll_d3_d2" +}; + +static const char * const dsp2_parents[] = { + "clk26m", + "mmpll_d6", + "mmpll_d7", + "univpll_d3", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "univpll_d3_d2", + "syspll_d3_d2" +}; + +static const char * const ipu_if_parents[] = { + "clk26m", + "mmpll_d6", + "mmpll_d7", + "univpll_d3", + "syspll_d3", + "univpll_d2_d2", + "syspll_d2_d2", + "univpll_d3_d2", + "syspll_d3_d2" +}; + +static const char * const mfg_parents[] = { + "clk26m", + "mfgpll_ck", + "univpll_d3", + "syspll_d3" +}; + +static const char * const f52m_mfg_parents[] = { + "clk26m", + "univpll_d3_d2", + "univpll_d3_d4", + "univpll_d3_d8" +}; + +static const char * const camtg_parents[] = { + "clk26m", + "univ_192m_d8", + "univpll_d3_d8", + "univ_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univ_192m_d16", + "univ_192m_d32" +}; + +static const char * const camtg2_parents[] = { + "clk26m", + "univ_192m_d8", + "univpll_d3_d8", + "univ_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univ_192m_d16", + "univ_192m_d32" +}; + +static const char * const camtg3_parents[] = { + "clk26m", + "univ_192m_d8", + "univpll_d3_d8", + "univ_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univ_192m_d16", + "univ_192m_d32" +}; + +static const char * const camtg4_parents[] = { + "clk26m", + "univ_192m_d8", + "univpll_d3_d8", + "univ_192m_d4", + "univpll_d3_d16", + "csw_f26m_ck_d2", + "univ_192m_d16", + "univ_192m_d32" +}; + +static const char * const uart_parents[] = { + "clk26m", + "univpll_d3_d8" +}; + +static const char * const spi_parents[] = { + "clk26m", + "syspll_d5_d2", + "syspll_d3_d4", + "msdcpll_d4" +}; + +static const char * const msdc50_hclk_parents[] = { + "clk26m", + "syspll_d2_d2", + "syspll_d3_d2" +}; + +static const char * const msdc50_0_parents[] = { + "clk26m", + "msdcpll_ck", + "msdcpll_d2", + "univpll_d2_d4", + "syspll_d3_d2", + "univpll_d2_d2" +}; + +static const char * const msdc30_1_parents[] = { + "clk26m", + "univpll_d3_d2", + "syspll_d3_d2", + "syspll_d7", + "msdcpll_d2" +}; + +static const char * const msdc30_2_parents[] = { + "clk26m", + "univpll_d3_d2", + "syspll_d3_d2", + "syspll_d7", + "msdcpll_d2" +}; + +static const char * const audio_parents[] = { + "clk26m", + "syspll_d5_d4", + "syspll_d7_d4", + "syspll_d2_d16" +}; + +static const char * const aud_intbus_parents[] = { + "clk26m", + "syspll_d2_d4", + "syspll_d7_d2" +}; + +static const char * const pmicspi_parents[] = { + "clk26m", + "syspll_d2_d8", + "osc_d8" +}; + +static const char * const fpwrap_ulposc_parents[] = { + "clk26m", + "osc_d16", + "osc_d4", + "osc_d8" +}; + +static const char * const atb_parents[] = { + "clk26m", + "syspll_d2_d2", + "syspll_d5" +}; + +static const char * const sspm_parents[] = { + "clk26m", + "univpll_d2_d4", + "syspll_d2_d2", + "univpll_d2_d2", + "syspll_d3" +}; + +static const char * const dpi0_parents[] = { + "clk26m", + "tvdpll_d2", + "tvdpll_d4", + "tvdpll_d8", + "tvdpll_d16", + "univpll_d5_d2", + "univpll_d3_d4", + "syspll_d3_d4", + "univpll_d3_d8" +}; + +static const char * const scam_parents[] = { + "clk26m", + "syspll_d5_d2" +}; + +static const char * const disppwm_parents[] = { + "clk26m", + "univpll_d3_d4", + "osc_d2", + "osc_d4", + "osc_d16" +}; + +static const char * const usb_top_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d3_d4", + "univpll_d5_d2" +}; + + +static const char * const ssusb_top_xhci_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d3_d4", + "univpll_d5_d2" +}; + +static const char * const spm_parents[] = { + "clk26m", + "syspll_d2_d8" +}; + +static const char * const i2c_parents[] = { + "clk26m", + "syspll_d2_d8", + "univpll_d5_d2" +}; + +static const char * const scp_parents[] = { + "clk26m", + "univpll_d2_d8", + "syspll_d5", + "syspll_d2_d2", + "univpll_d2_d2", + "syspll_d3", + "univpll_d3" +}; + +static const char * const seninf_parents[] = { + "clk26m", + "univpll_d2_d2", + "univpll_d3_d2", + "univpll_d2_d4" +}; + +static const char * const dxcc_parents[] = { + "clk26m", + "syspll_d2_d2", + "syspll_d2_d4", + "syspll_d2_d8" +}; + +static const char * const aud_engen1_parents[] = { + "clk26m", + "apll1_d2", + "apll1_d4", + "apll1_d8" +}; + +static const char * const aud_engen2_parents[] = { + "clk26m", + "apll2_d2", + "apll2_d4", + "apll2_d8" +}; + +static const char * const faes_ufsfde_parents[] = { + "clk26m", + "syspll_d2", + "syspll_d2_d2", + "syspll_d3", + "syspll_d2_d4", + "univpll_d3" +}; + +static const char * const fufs_parents[] = { + "clk26m", + "syspll_d2_d4", + "syspll_d2_d8", + "syspll_d2_d16" +}; + +static const char * const aud_1_parents[] = { + "clk26m", + "apll1_ck" +}; + +static const char * const aud_2_parents[] = { + "clk26m", + "apll2_ck" +}; + +/* + * CRITICAL CLOCK: + * axi_sel is the main bus clock of whole SOC. + * spm_sel is the clock of the always-on co-processor. + */ +static const struct mtk_mux top_muxes[] = { + /* CLK_CFG_0 */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_AXI, "axi_sel", + axi_parents, 0x40, + 0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MM, "mm_sel", + mm_parents, 0x40, + 0x44, 0x48, 8, 3, 15, 0x004, 1), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IMG, "img_sel", + img_parents, 0x40, + 0x44, 0x48, 16, 3, 23, 0x004, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAM, "cam_sel", + cam_parents, 0x40, + 0x44, 0x48, 24, 4, 31, 0x004, 3), + /* CLK_CFG_1 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP, "dsp_sel", + dsp_parents, 0x50, + 0x54, 0x58, 0, 4, 7, 0x004, 4), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP1, "dsp1_sel", + dsp1_parents, 0x50, + 0x54, 0x58, 8, 4, 15, 0x004, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP2, "dsp2_sel", + dsp2_parents, 0x50, + 0x54, 0x58, 16, 4, 23, 0x004, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IPU_IF, "ipu_if_sel", + ipu_if_parents, 0x50, + 0x54, 0x58, 24, 4, 31, 0x004, 7), + /* CLK_CFG_2 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MFG, "mfg_sel", + mfg_parents, 0x60, + 0x64, 0x68, 0, 2, 7, 0x004, 8), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_F52M_MFG, "f52m_mfg_sel", + f52m_mfg_parents, 0x60, + 0x64, 0x68, 8, 2, 15, 0x004, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG, "camtg_sel", + camtg_parents, 0x60, + 0x64, 0x68, 16, 3, 23, 0x004, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG2, "camtg2_sel", + camtg2_parents, 0x60, + 0x64, 0x68, 24, 3, 31, 0x004, 11), + /* CLK_CFG_3 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG3, "camtg3_sel", + camtg3_parents, 0x70, + 0x74, 0x78, 0, 3, 7, 0x004, 12), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG4, "camtg4_sel", + camtg4_parents, 0x70, + 0x74, 0x78, 8, 3, 15, 0x004, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_UART, "uart_sel", + uart_parents, 0x70, + 0x74, 0x78, 16, 1, 23, 0x004, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SPI, "spi_sel", + spi_parents, 0x70, + 0x74, 0x78, 24, 2, 31, 0x004, 15), + /* CLK_CFG_4 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_hclk_sel", + msdc50_hclk_parents, 0x80, + 0x84, 0x88, 0, 2, 7, 0x004, 16), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel", + msdc50_0_parents, 0x80, + 0x84, 0x88, 8, 3, 15, 0x004, 17), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel", + msdc30_1_parents, 0x80, + 0x84, 0x88, 16, 3, 23, 0x004, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel", + msdc30_2_parents, 0x80, + 0x84, 0x88, 24, 3, 31, 0x004, 19), + /* CLK_CFG_5 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUDIO, "audio_sel", + audio_parents, 0x90, + 0x94, 0x98, 0, 2, 7, 0x004, 20), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel", + aud_intbus_parents, 0x90, + 0x94, 0x98, 8, 2, 15, 0x004, 21), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_PMICSPI, "pmicspi_sel", + pmicspi_parents, 0x90, + 0x94, 0x98, 16, 2, 23, 0x004, 22), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FPWRAP_ULPOSC, "fpwrap_ulposc_sel", + fpwrap_ulposc_parents, 0x90, + 0x94, 0x98, 24, 2, 31, 0x004, 23), + /* CLK_CFG_6 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel", + atb_parents, 0xa0, + 0xa4, 0xa8, 0, 2, 7, 0x004, 24), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSPM, "sspm_sel", + sspm_parents, 0xa0, + 0xa4, 0xa8, 8, 3, 15, 0x004, 25), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel", + dpi0_parents, 0xa0, + 0xa4, 0xa8, 16, 4, 23, 0x004, 26), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel", + scam_parents, 0xa0, + 0xa4, 0xa8, 24, 1, 31, 0x004, 27), + /* CLK_CFG_7 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DISP_PWM, "disppwm_sel", + disppwm_parents, 0xb0, + 0xb4, 0xb8, 0, 3, 7, 0x004, 28), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_USB_TOP, "usb_top_sel", + usb_top_parents, 0xb0, + 0xb4, 0xb8, 8, 2, 15, 0x004, 29), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel", + ssusb_top_xhci_parents, 0xb0, + 0xb4, 0xb8, 16, 2, 23, 0x004, 30), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SPM, "spm_sel", + spm_parents, 0xb0, + 0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL), + /* CLK_CFG_8 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_I2C, "i2c_sel", + i2c_parents, 0xc0, + 0xc4, 0xc8, 0, 2, 7, 0x008, 1), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCP, "scp_sel", + scp_parents, 0xc0, + 0xc4, 0xc8, 8, 3, 15, 0x008, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SENINF, "seninf_sel", + seninf_parents, 0xc0, + 0xc4, 0xc8, 16, 2, 23, 0x008, 3), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DXCC, "dxcc_sel", + dxcc_parents, 0xc0, + 0xc4, 0xc8, 24, 2, 31, 0x008, 4), + /* CLK_CFG_9 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG1, "aud_eng1_sel", + aud_engen1_parents, 0xd0, + 0xd4, 0xd8, 0, 2, 7, 0x008, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG2, "aud_eng2_sel", + aud_engen2_parents, 0xd0, + 0xd4, 0xd8, 8, 2, 15, 0x008, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FAES_UFSFDE, "faes_ufsfde_sel", + faes_ufsfde_parents, 0xd0, + 0xd4, 0xd8, 16, 3, 23, 0x008, 7), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FUFS, "fufs_sel", + fufs_parents, 0xd0, + 0xd4, 0xd8, 24, 2, 31, 0x008, 8), + /* CLK_CFG_10 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_1, "aud_1_sel", + aud_1_parents, 0xe0, + 0xe4, 0xe8, 0, 1, 7, 0x008, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_2, "aud_2_sel", + aud_2_parents, 0xe0, + 0xe4, 0xe8, 8, 1, 15, 0x008, 10), +}; + +static const char * const apll_i2s0_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const apll_i2s1_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const apll_i2s2_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const apll_i2s3_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const apll_i2s4_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static const char * const apll_i2s5_parents[] = { + "aud_1_sel", + "aud_2_sel" +}; + +static struct mtk_composite top_aud_muxes[] = { + MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents, + 0x320, 8, 1), + MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents, + 0x320, 9, 1), + MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents, + 0x320, 10, 1), + MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents, + 0x320, 11, 1), + MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents, + 0x320, 12, 1), + MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents, + 0x328, 20, 1), +}; + +static const char * const mcu_mp0_parents[] = { + "clk26m", + "armpll_ll", + "armpll_div_pll1", + "armpll_div_pll2" +}; + +static const char * const mcu_mp2_parents[] = { + "clk26m", + "armpll_l", + "armpll_div_pll1", + "armpll_div_pll2" +}; + +static const char * const mcu_bus_parents[] = { + "clk26m", + "ccipll", + "armpll_div_pll1", + "armpll_div_pll2" +}; + +static struct mtk_composite mcu_muxes[] = { + /* mp0_pll_divider_cfg */ + MUX(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0, 9, 2), + /* mp2_pll_divider_cfg */ + MUX(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8, 9, 2), + /* bus_pll_divider_cfg */ + MUX(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0, 9, 2), +}; + +static struct mtk_composite top_aud_divs[] = { + DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel", + 0x320, 2, 0x324, 8, 0), + DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel", + 0x320, 3, 0x324, 8, 8), + DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel", + 0x320, 4, 0x324, 8, 16), + DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel", + 0x320, 5, 0x324, 8, 24), + DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel", + 0x320, 6, 0x328, 8, 0), + DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4", + 0x320, 7, 0x328, 8, 8), +}; + +static const struct mtk_gate_regs top_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x104, + .sta_ofs = 0x104, +}; + +#define GATE_TOP(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &top_cg_regs, _shift, \ + &mtk_clk_gate_ops_no_setclr_inv) + +static const struct mtk_gate top_clks[] = { + /* TOP */ + GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL1, "armpll_div_pll1", "mainpll", 4), + GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL2, "armpll_div_pll2", "univpll", 5), +}; + +static const struct mtk_gate_regs infra0_cg_regs = { + .set_ofs = 0x80, + .clr_ofs = 0x84, + .sta_ofs = 0x90, +}; + +static const struct mtk_gate_regs infra1_cg_regs = { + .set_ofs = 0x88, + .clr_ofs = 0x8c, + .sta_ofs = 0x94, +}; + +static const struct mtk_gate_regs infra2_cg_regs = { + .set_ofs = 0xa4, + .clr_ofs = 0xa8, + .sta_ofs = 0xac, +}; + +static const struct mtk_gate_regs infra3_cg_regs = { + .set_ofs = 0xc0, + .clr_ofs = 0xc4, + .sta_ofs = 0xc8, +}; + +#define GATE_INFRA0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +#define GATE_INFRA1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +#define GATE_INFRA2(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +#define GATE_INFRA3(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate infra_clks[] = { + /* INFRA0 */ + GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", + "axi_sel", 0), + GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", + "axi_sel", 1), + GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md", + "axi_sel", 2), + GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn", + "axi_sel", 3), + GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp", + "scp_sel", 4), + GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej", + "f_f26m_ck", 5), + GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt", + "axi_sel", 6), + GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb", + "axi_sel", 8), + GATE_INFRA0(CLK_INFRA_GCE, "infra_gce", + "axi_sel", 9), + GATE_INFRA0(CLK_INFRA_THERM, "infra_therm", + "axi_sel", 10), + GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0", + "i2c_sel", 11), + GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1", + "i2c_sel", 12), + GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2", + "i2c_sel", 13), + GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3", + "i2c_sel", 14), + GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk", + "axi_sel", 15), + GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1", + "i2c_sel", 16), + GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2", + "i2c_sel", 17), + GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3", + "i2c_sel", 18), + GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4", + "i2c_sel", 19), + GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm", + "i2c_sel", 21), + GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0", + "uart_sel", 22), + GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1", + "uart_sel", 23), + GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2", + "uart_sel", 24), + GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3", + "uart_sel", 25), + GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m", + "axi_sel", 27), + GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc", + "axi_sel", 28), + GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif", + "axi_sel", 31), + /* INFRA1 */ + GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0", + "spi_sel", 1), + GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0", + "msdc50_hclk_sel", 2), + GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1", + "axi_sel", 4), + GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2", + "axi_sel", 5), + GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck", + "msdc50_0_sel", 6), + GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc", + "f_f26m_ck", 7), + GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu", + "axi_sel", 8), + GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng", + "axi_sel", 9), + GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc", + "f_f26m_ck", 10), + GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum", + "axi_sel", 11), + GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap", + "axi_sel", 12), + GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md", + "axi_sel", 13), + GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md", + "f_f26m_ck", 14), + GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck", + "msdc30_1_sel", 16), + GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck", + "msdc30_2_sel", 17), + GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma", + "axi_sel", 18), + GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu", + "axi_sel", 19), + GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc", + "axi_sel", 20), + GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", + "axi_sel", 23), + GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys", + "axi_sel", 24), + GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio", + "axi_sel", 25), + GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md", + "axi_sel", 26), + GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core", + "dxcc_sel", 27), + GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao", + "dxcc_sel", 28), + GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk", + "axi_sel", 30), + GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", + "f_f26m_ck", 31), + /* INFRA2 */ + GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx", + "f_f26m_ck", 0), + GATE_INFRA2(CLK_INFRA_USB, "infra_usb", + "usb_top_sel", 1), + GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm", + "axi_sel", 2), + GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk", + "axi_sel", 3), + GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk", + "f_f26m_ck", 4), + GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1", + "spi_sel", 6), + GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4", + "i2c_sel", 7), + GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share", + "f_f26m_ck", 8), + GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2", + "spi_sel", 9), + GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3", + "spi_sel", 10), + GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck", + "ssusb_top_xhci_sel", 11), + GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", + "fufs_sel", 12), + GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", + "fufs_sel", 13), + GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", + "axi_sel", 14), + GATE_INFRA2(CLK_INFRA_SSPM, "infra_sspm", + "sspm_sel", 15), + GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", + "axi_sel", 16), + GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", + "axi_sel", 17), + GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", + "i2c_sel", 18), + GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", + "i2c_sel", 19), + GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", + "i2c_sel", 20), + GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter", + "i2c_sel", 21), + GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm", + "i2c_sel", 22), + GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter", + "i2c_sel", 23), + GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm", + "i2c_sel", 24), + GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4", + "spi_sel", 25), + GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5", + "spi_sel", 26), + GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma", + "axi_sel", 27), + GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs", + "fufs_sel", 28), + GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde", + "faes_ufsfde_sel", 29), + GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick", + "fufs_sel", 30), + /* INFRA3 */ + GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", + "msdc50_0_sel", 0), + GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", + "msdc50_0_sel", 1), + GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", + "msdc50_0_sel", 2), + GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", + "f_f26m_ck", 3), + GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", + "f_f26m_ck", 4), + GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", + "axi_sel", 5), + GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", + "i2c_sel", 6), + GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", + "msdc50_hclk_sel", 7), + GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0", + "msdc50_hclk_sel", 8), + GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap", + "axi_sel", 16), + GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md", + "axi_sel", 17), + GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap", + "axi_sel", 18), + GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md", + "axi_sel", 19), + GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m", + "f_f26m_ck", 20), + GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk", + "axi_sel", 21), + GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7", + "i2c_sel", 22), + GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8", + "i2c_sel", 23), + GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc", + "msdc50_0_sel", 24), +}; + +static const struct mtk_gate_regs apmixed_cg_regs = { + .set_ofs = 0x20, + .clr_ofs = 0x20, + .sta_ofs = 0x20, +}; + +#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \ + GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \ + _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags) + +#define GATE_APMIXED(_id, _name, _parent, _shift) \ + GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0) + +/* + * CRITICAL CLOCK: + * apmixed_appll26m is the toppest clock gate of all PLLs. + */ +static const struct mtk_gate apmixed_clks[] = { + /* AUDIO0 */ + GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m", + "f_f26m_ck", 4), + GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m", + "f_f26m_ck", 5, CLK_IS_CRITICAL), + GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", + "f_f26m_ck", 6), + GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m", + "f_f26m_ck", 7), + GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m", + "f_f26m_ck", 8), + GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m", + "f_f26m_ck", 9), + GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", + "f_f26m_ck", 11), + GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m", + "f_f26m_ck", 13), + GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m", + "f_f26m_ck", 14), + GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", + "f_f26m_ck", 16), + GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m", + "f_f26m_ck", 17), +}; + +#define MT8183_PLL_FMAX (3800UL * MHZ) +#define MT8183_PLL_FMIN (1500UL * MHZ) + +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg, _div_table) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = _rst_bar_mask, \ + .fmax = MT8183_PLL_FMAX, \ + .fmin = MT8183_PLL_FMIN, \ + .pcwbits = _pcwbits, \ + .pcwibits = _pcwibits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .tuner_en_reg = _tuner_en_reg, \ + .tuner_en_bit = _tuner_en_bit, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + .pcw_chg_reg = _pcw_chg_reg, \ + .div_table = _div_table, \ + } + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \ + _pd_shift, _tuner_reg, _tuner_en_reg, \ + _tuner_en_bit, _pcw_reg, _pcw_shift, \ + _pcw_chg_reg, NULL) + +static const struct mtk_pll_div_table armpll_div_table[] = { + { .div = 0, .freq = MT8183_PLL_FMAX }, + { .div = 1, .freq = 1500 * MHZ }, + { .div = 2, .freq = 750 * MHZ }, + { .div = 3, .freq = 375 * MHZ }, + { .div = 4, .freq = 187500000 }, + { } /* sentinel */ +}; + +static const struct mtk_pll_div_table mfgpll_div_table[] = { + { .div = 0, .freq = MT8183_PLL_FMAX }, + { .div = 1, .freq = 1600 * MHZ }, + { .div = 2, .freq = 800 * MHZ }, + { .div = 3, .freq = 400 * MHZ }, + { .div = 4, .freq = 200 * MHZ }, + { } /* sentinel */ +}; + +static const struct mtk_pll_data plls[] = { + PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001, + HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0, + 0x0204, 0, 0, armpll_div_table), + PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001, + HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0, + 0x0214, 0, 0, armpll_div_table), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001, + HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0, + 0x0294, 0, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001, + HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0, + 0x0224, 0, 0), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001, + HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0, + 0x0234, 0, 0), + PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001, + 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0, + mfgpll_div_table), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001, + 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001, + 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001, + HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0, + 0x0274, 0, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001, + 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001, + 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4), +}; + +static int clk_mt8183_apmixed_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static int clk_mt8183_top_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + void __iomem *base; + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), + clk_data); + + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + + mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), + node, &mt8183_clk_lock, clk_data); + + mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), + base, &mt8183_clk_lock, clk_data); + + mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), + base, &mt8183_clk_lock, clk_data); + + mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static int clk_mt8183_infra_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + + clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); + + mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), + clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static int clk_mt8183_mcu_probe(struct platform_device *pdev) +{ + struct clk_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + void __iomem *base; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK); + + mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base, + &mt8183_clk_lock, clk_data); + + return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +static const struct of_device_id of_match_clk_mt8183[] = { + { + .compatible = "mediatek,mt8183-apmixedsys", + .data = clk_mt8183_apmixed_probe, + }, { + .compatible = "mediatek,mt8183-topckgen", + .data = clk_mt8183_top_probe, + }, { + .compatible = "mediatek,mt8183-infracfg", + .data = clk_mt8183_infra_probe, + }, { + .compatible = "mediatek,mt8183-mcucfg", + .data = clk_mt8183_mcu_probe, + }, { + /* sentinel */ + } +}; + +static int clk_mt8183_probe(struct platform_device *pdev) +{ + int (*clk_probe)(struct platform_device *pdev); + int r; + + clk_probe = of_device_get_match_data(&pdev->dev); + if (!clk_probe) + return -EINVAL; + + r = clk_probe(pdev); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_mt8183_drv = { + .probe = clk_mt8183_probe, + .driver = { + .name = "clk-mt8183", + .of_match_table = of_match_clk_mt8183, + }, +}; + +static int __init clk_mt8183_init(void) +{ + return platform_driver_register(&clk_mt8183_drv); +} + +arch_initcall(clk_mt8183_init); diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c new file mode 100644 index 000000000000..26fe43cc9ea2 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8516.c @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: James Liao <jamesjj.liao@mediatek.com> + * Fabien Parent <fparent@baylibre.com> + */ + +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8516-clk.h> + +static DEFINE_SPINLOCK(mt8516_clk_lock); + +static const struct mtk_fixed_clk fixed_clks[] __initconst = { + FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0), + FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000), + FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000), +}; + +static const struct mtk_fixed_factor top_divs[] __initconst = { + FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1), + FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8), + FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16), + FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11), + FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22), + FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6), + FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12), + FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10), + FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20), + FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40), + FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7), + FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6), + FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12), + FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20), + FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1), + FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2), + FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3), + FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26), + FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1), + FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2), + FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2), + FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2), + FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1), + FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2), + FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2), + FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2), + FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1), + FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2), + FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2), + FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2), + FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2), +}; + +static const char * const uart0_parents[] __initconst = { + "clk26m_ck", + "univpll_d24" +}; + +static const char * const ahb_infra_parents[] __initconst = { + "clk_null", + "clk26m_ck", + "mainpll_d11", + "clk_null", + "mainpll_d12", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "mainpll_d10" +}; + +static const char * const msdc0_parents[] __initconst = { + "clk26m_ck", + "univpll_d6", + "mainpll_d8", + "univpll_d8", + "mainpll_d16", + "mmpll_200m", + "mainpll_d12", + "mmpll_d2" +}; + +static const char * const uart1_parents[] __initconst = { + "clk26m_ck", + "univpll_d24" +}; + +static const char * const msdc1_parents[] __initconst = { + "clk26m_ck", + "univpll_d6", + "mainpll_d8", + "univpll_d8", + "mainpll_d16", + "mmpll_200m", + "mainpll_d12", + "mmpll_d2" +}; + +static const char * const pmicspi_parents[] __initconst = { + "univpll_d20", + "usb_phy48m_ck", + "univpll_d16", + "clk26m_ck" +}; + +static const char * const qaxi_aud26m_parents[] __initconst = { + "clk26m_ck", + "ahb_infra_sel" +}; + +static const char * const aud_intbus_parents[] __initconst = { + "clk_null", + "clk26m_ck", + "mainpll_d22", + "clk_null", + "mainpll_d11" +}; + +static const char * const nfi2x_pad_parents[] __initconst = { + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk26m_ck", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "mainpll_d12", + "mainpll_d8", + "clk_null", + "mainpll_d6", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "mainpll_d4", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "clk_null", + "mainpll_d10", + "mainpll_d7", + "clk_null", + "mainpll_d5" +}; + +static const char * const nfi1x_pad_parents[] __initconst = { + "ahb_infra_sel", + "nfi1x_ck" +}; + +static const char * const ddrphycfg_parents[] __initconst = { + "clk26m_ck", + "mainpll_d16" +}; + +static const char * const usb_78m_parents[] __initconst = { + "clk_null", + "clk26m_ck", + "univpll_d16", + "clk_null", + "mainpll_d20" +}; + +static const char * const spinor_parents[] __initconst = { + "clk26m_d2", + "clk26m_ck", + "mainpll_d40", + "univpll_d24", + "univpll_d20", + "mainpll_d20", + "mainpll_d16", + "univpll_d12" +}; + +static const char * const msdc2_parents[] __initconst = { + "clk26m_ck", + "univpll_d6", + "mainpll_d8", + "univpll_d8", + "mainpll_d16", + "mmpll_200m", + "mainpll_d12", + "mmpll_d2" +}; + +static const char * const eth_parents[] __initconst = { + "clk26m_ck", + "mainpll_d40", + "univpll_d24", + "univpll_d20", + "mainpll_d20" +}; + +static const char * const aud1_parents[] __initconst = { + "clk26m_ck", + "apll1_ck" +}; + +static const char * const aud2_parents[] __initconst = { + "clk26m_ck", + "apll2_ck" +}; + +static const char * const aud_engen1_parents[] __initconst = { + "clk26m_ck", + "rg_apll1_d2_en", + "rg_apll1_d4_en", + "rg_apll1_d8_en" +}; + +static const char * const aud_engen2_parents[] __initconst = { + "clk26m_ck", + "rg_apll2_d2_en", + "rg_apll2_d4_en", + "rg_apll2_d8_en" +}; + +static const char * const i2c_parents[] __initconst = { + "clk26m_ck", + "univpll_d20", + "univpll_d16", + "univpll_d12" +}; + +static const char * const aud_i2s0_m_parents[] __initconst = { + "rg_aud1", + "rg_aud2" +}; + +static const char * const pwm_parents[] __initconst = { + "clk26m_ck", + "univpll_d12" +}; + +static const char * const spi_parents[] __initconst = { + "clk26m_ck", + "univpll_d12", + "univpll_d8", + "univpll_d6" +}; + +static const char * const aud_spdifin_parents[] __initconst = { + "clk26m_ck", + "univpll_d2" +}; + +static const char * const uart2_parents[] __initconst = { + "clk26m_ck", + "univpll_d24" +}; + +static const char * const bsi_parents[] __initconst = { + "clk26m_ck", + "mainpll_d10", + "mainpll_d12", + "mainpll_d20" +}; + +static const char * const dbg_atclk_parents[] __initconst = { + "clk_null", + "clk26m_ck", + "mainpll_d5", + "clk_null", + "univpll_d5" +}; + +static const char * const csw_nfiecc_parents[] __initconst = { + "clk_null", + "mainpll_d7", + "mainpll_d6", + "clk_null", + "mainpll_d5" +}; + +static const char * const nfiecc_parents[] __initconst = { + "clk_null", + "nfi2x_pad_sel", + "mainpll_d4", + "clk_null", + "csw_nfiecc_sel" +}; + +static struct mtk_composite top_muxes[] __initdata = { + /* CLK_MUX_SEL0 */ + MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents, + 0x000, 0, 1), + MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents, + 0x000, 4, 4), + MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents, + 0x000, 11, 3), + MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents, + 0x000, 19, 1), + MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents, + 0x000, 20, 3), + MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, + 0x000, 24, 2), + MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents, + 0x000, 26, 1), + MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents, + 0x000, 27, 3), + /* CLK_MUX_SEL1 */ + MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents, + 0x004, 0, 7), + MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents, + 0x004, 7, 1), + MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents, + 0x004, 20, 3), + /* CLK_MUX_SEL8 */ + MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents, + 0x040, 0, 3), + MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents, + 0x040, 3, 3), + MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents, + 0x040, 6, 3), + MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents, + 0x040, 22, 1), + MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents, + 0x040, 23, 1), + MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents, + 0x040, 24, 2), + MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents, + 0x040, 26, 2), + MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, + 0x040, 28, 2), + /* CLK_SEL_9 */ + MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents, + 0x044, 12, 1), + MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents, + 0x044, 13, 1), + MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents, + 0x044, 14, 1), + MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents, + 0x044, 15, 1), + MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents, + 0x044, 16, 1), + MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents, + 0x044, 17, 1), + MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents, + 0x044, 18, 1), + /* CLK_MUX_SEL13 */ + MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, + 0x07c, 0, 1), + MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, + 0x07c, 1, 2), + MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents, + 0x07c, 3, 1), + MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents, + 0x07c, 4, 1), + MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents, + 0x07c, 5, 2), + MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents, + 0x07c, 7, 3), + MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents, + 0x07c, 10, 3), + MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents, + 0x07c, 13, 3), +}; + +static const char * const ifr_mux1_parents[] __initconst = { + "clk26m_ck", + "armpll", + "univpll", + "mainpll_d2" +}; + +static const char * const ifr_eth_25m_parents[] __initconst = { + "eth_d2_ck", + "rg_eth" +}; + +static const char * const ifr_i2c0_parents[] __initconst = { + "ahb_infra_d2", + "rg_i2c" +}; + +static const struct mtk_composite ifr_muxes[] __initconst = { + MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000, + 2, 2), + MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080, + 0, 1), + MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080, + 1, 1), + MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080, + 2, 1), + MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080, + 3, 1), +}; + +#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .div_reg = _reg, \ + .div_shift = _shift, \ + .div_width = _width, \ +} + +static const struct mtk_clk_divider top_adj_divs[] = { + DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel", + 0x0048, 0, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel", + 0x0048, 8, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel", + 0x0048, 16, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel", + 0x0048, 24, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel", + 0x004c, 0, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4", + 0x004c, 8, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel", + 0x004c, 16, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5", + 0x004c, 24, 8), + DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel", + 0x0078, 0, 8), +}; + +static const struct mtk_gate_regs top1_cg_regs = { + .set_ofs = 0x54, + .clr_ofs = 0x84, + .sta_ofs = 0x24, +}; + +static const struct mtk_gate_regs top2_cg_regs = { + .set_ofs = 0x6c, + .clr_ofs = 0x9c, + .sta_ofs = 0x3c, +}; + +static const struct mtk_gate_regs top3_cg_regs = { + .set_ofs = 0xa0, + .clr_ofs = 0xb0, + .sta_ofs = 0x70, +}; + +static const struct mtk_gate_regs top4_cg_regs = { + .set_ofs = 0xa4, + .clr_ofs = 0xb4, + .sta_ofs = 0x74, +}; + +static const struct mtk_gate_regs top5_cg_regs = { + .set_ofs = 0x44, + .clr_ofs = 0x44, + .sta_ofs = 0x44, +}; + +#define GATE_TOP1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_TOP2(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top2_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_TOP2_I(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top2_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ + } + +#define GATE_TOP3(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top3_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_TOP4_I(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top4_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr_inv, \ + } + +#define GATE_TOP5(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &top5_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +static const struct mtk_gate top_clks[] __initconst = { + /* TOP1 */ + GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel", 1), + GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel", 2), + GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel", 3), + GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel", 4), + GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel", 5), + GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel", 6), + GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc", 7), + GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk", 8), + GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel", 9), + GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel", 10), + GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel", 11), + GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel", 12), + GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m", 13), + GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck", 14), + GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel", 15), + GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel", 16), + GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel", 17), + GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel", 18), + GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel", 19), + GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck", 20), + GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel", 21), + GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck", 22), + GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel", 23), + GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck", 24), + GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck", 25), + GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck", 27), + GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck", 28), + GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck", 29), + GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck", 30), + GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck", 31), + /* TOP2 */ + GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel", 0), + GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12", 1), + GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel", 2), + GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel", 4), + GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel", 5), + GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck", 6), + GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel", 7), + GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra", 8), + GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra", 9), + GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra", 10), + GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra", 11), + GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra", 12), + GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra", 13), + GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m", 14), + GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel", + 15), + GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2", 19), + GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel", 20), + GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel", 21), + GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth", 22), + GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel", 23), + GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel", 24), + GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2", 25), + GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel", 26), + GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0", 28), + GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1", 29), + GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2", 30), + GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel", 31), + /* TOP3 */ + GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel", 0), + GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel", 1), + GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel", 2), + GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel", 8), + GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel", 9), + GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel", 10), + GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel", 11), + GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel", 12), + GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel", 13), + GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel", + 14), + GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel", 15), + GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel", 16), + GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel", 17), + GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel", 18), + /* TOP4 */ + GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2", 8), + GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4", 9), + GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8", 10), + GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2", 11), + GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4", 12), + GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8", 13), + /* TOP5 */ + GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0", 0), + GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1", 1), + GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2", 2), + GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3", 3), + GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4", 4), + GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b", 5), + GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5", 6), + GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b", 7), + GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8), +}; + +static void __init mtk_topckgen_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + void __iomem *base; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), + clk_data); + mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data); + + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, + &mt8516_clk_lock, clk_data); + mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), + base, &mt8516_clk_lock, clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} +CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init); + +static void __init mtk_infracfg_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + void __iomem *base; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK); + + mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base, + &mt8516_clk_lock, clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} +CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8516-infracfg", mtk_infracfg_init); + +#define MT8516_PLL_FMAX (1502UL * MHZ) + +#define CON0_MT8516_RST_BAR BIT(27) + +#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift, _div_table) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = CON0_MT8516_RST_BAR, \ + .fmax = MT8516_PLL_FMAX, \ + .pcwbits = _pcwbits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + .div_table = _div_table, \ + } + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \ + _pcw_shift) \ + PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \ + _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \ + NULL) + +static const struct mtk_pll_div_table mmpll_div_table[] = { + { .div = 0, .freq = MT8516_PLL_FMAX }, + { .div = 1, .freq = 1000000000 }, + { .div = 2, .freq = 604500000 }, + { .div = 3, .freq = 253500000 }, + { .div = 4, .freq = 126750000 }, + { } /* sentinel */ +}; + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0, + 21, 0x0104, 24, 0, 0x0104, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001, + HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001, + HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0), + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0, + 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table), + PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0, + 31, 0x0180, 1, 0x0194, 0x0184, 0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0, + 31, 0x01A0, 1, 0x01B4, 0x01A4, 0), +}; + +static void __init mtk_apmixedsys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + void __iomem *base; + int r; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + +} +CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8516-apmixedsys", + mtk_apmixedsys_init); diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index fb27b5bf30d9..33ab1731482f 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -227,10 +227,13 @@ struct mtk_pll_data { unsigned int flags; const struct clk_ops *ops; u32 rst_bar_mask; + unsigned long fmin; unsigned long fmax; int pcwbits; + int pcwibits; uint32_t pcw_reg; int pcw_shift; + uint32_t pcw_chg_reg; const struct mtk_pll_div_table *div_table; const char *parent_name; }; diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c new file mode 100644 index 000000000000..76f9cd039195 --- /dev/null +++ b/drivers/clk/mediatek/clk-mux.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen <owen.chen@mediatek.com> + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> + +#include "clk-mtk.h" +#include "clk-mux.h" + +static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw) +{ + return container_of(hw, struct mtk_clk_mux, hw); +} + +static int mtk_clk_mux_enable(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 mask = BIT(mux->data->gate_shift); + + return regmap_update_bits(mux->regmap, mux->data->mux_ofs, + mask, ~mask); +} + +static void mtk_clk_mux_disable(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 mask = BIT(mux->data->gate_shift); + + regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask); +} + +static int mtk_clk_mux_enable_setclr(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + + return regmap_write(mux->regmap, mux->data->clr_ofs, + BIT(mux->data->gate_shift)); +} + +static void mtk_clk_mux_disable_setclr(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + + regmap_write(mux->regmap, mux->data->set_ofs, + BIT(mux->data->gate_shift)); +} + +static int mtk_clk_mux_is_enabled(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 val; + + regmap_read(mux->regmap, mux->data->mux_ofs, &val); + + return (val & BIT(mux->data->gate_shift)) == 0; +} + +static u8 mtk_clk_mux_get_parent(struct clk_hw *hw) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 mask = GENMASK(mux->data->mux_width - 1, 0); + u32 val; + + regmap_read(mux->regmap, mux->data->mux_ofs, &val); + val = (val >> mux->data->mux_shift) & mask; + + return val; +} + +static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 mask = GENMASK(mux->data->mux_width - 1, 0); + unsigned long flags = 0; + + if (mux->lock) + spin_lock_irqsave(mux->lock, flags); + else + __acquire(mux->lock); + + regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, + index << mux->data->mux_shift); + + if (mux->lock) + spin_unlock_irqrestore(mux->lock, flags); + else + __release(mux->lock); + + return 0; +} + +static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + u32 mask = GENMASK(mux->data->mux_width - 1, 0); + u32 val, orig; + unsigned long flags = 0; + + if (mux->lock) + spin_lock_irqsave(mux->lock, flags); + else + __acquire(mux->lock); + + regmap_read(mux->regmap, mux->data->mux_ofs, &orig); + val = (orig & ~(mask << mux->data->mux_shift)) + | (index << mux->data->mux_shift); + + if (val != orig) { + regmap_write(mux->regmap, mux->data->clr_ofs, + mask << mux->data->mux_shift); + regmap_write(mux->regmap, mux->data->set_ofs, + index << mux->data->mux_shift); + + if (mux->data->upd_shift >= 0) + regmap_write(mux->regmap, mux->data->upd_ofs, + BIT(mux->data->upd_shift)); + } + + if (mux->lock) + spin_unlock_irqrestore(mux->lock, flags); + else + __release(mux->lock); + + return 0; +} + +const struct clk_ops mtk_mux_ops = { + .get_parent = mtk_clk_mux_get_parent, + .set_parent = mtk_clk_mux_set_parent_lock, +}; + +const struct clk_ops mtk_mux_clr_set_upd_ops = { + .get_parent = mtk_clk_mux_get_parent, + .set_parent = mtk_clk_mux_set_parent_setclr_lock, +}; + +const struct clk_ops mtk_mux_gate_ops = { + .enable = mtk_clk_mux_enable, + .disable = mtk_clk_mux_disable, + .is_enabled = mtk_clk_mux_is_enabled, + .get_parent = mtk_clk_mux_get_parent, + .set_parent = mtk_clk_mux_set_parent_lock, +}; + +const struct clk_ops mtk_mux_gate_clr_set_upd_ops = { + .enable = mtk_clk_mux_enable_setclr, + .disable = mtk_clk_mux_disable_setclr, + .is_enabled = mtk_clk_mux_is_enabled, + .get_parent = mtk_clk_mux_get_parent, + .set_parent = mtk_clk_mux_set_parent_setclr_lock, +}; + +struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, + struct regmap *regmap, + spinlock_t *lock) +{ + struct mtk_clk_mux *clk_mux; + struct clk_init_data init; + struct clk *clk; + + clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); + if (!clk_mux) + return ERR_PTR(-ENOMEM); + + init.name = mux->name; + init.flags = mux->flags | CLK_SET_RATE_PARENT; + init.parent_names = mux->parent_names; + init.num_parents = mux->num_parents; + init.ops = mux->ops; + + clk_mux->regmap = regmap; + clk_mux->data = mux; + clk_mux->lock = lock; + clk_mux->hw.init = &init; + + clk = clk_register(NULL, &clk_mux->hw); + if (IS_ERR(clk)) { + kfree(clk_mux); + return clk; + } + + return clk; +} + +int mtk_clk_register_muxes(const struct mtk_mux *muxes, + int num, struct device_node *node, + spinlock_t *lock, + struct clk_onecell_data *clk_data) +{ + struct regmap *regmap; + struct clk *clk; + int i; + + regmap = syscon_node_to_regmap(node); + if (IS_ERR(regmap)) { + pr_err("Cannot find regmap for %pOF: %ld\n", node, + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + for (i = 0; i < num; i++) { + const struct mtk_mux *mux = &muxes[i]; + + if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) { + clk = mtk_clk_register_mux(mux, regmap, lock); + + if (IS_ERR(clk)) { + pr_err("Failed to register clk %s: %ld\n", + mux->name, PTR_ERR(clk)); + continue; + } + + clk_data->clks[mux->id] = clk; + } + } + + return 0; +} diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h new file mode 100644 index 000000000000..f5625f4d9e6c --- /dev/null +++ b/drivers/clk/mediatek/clk-mux.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Owen Chen <owen.chen@mediatek.com> + */ + +#ifndef __DRV_CLK_MTK_MUX_H +#define __DRV_CLK_MTK_MUX_H + +#include <linux/clk-provider.h> + +struct mtk_clk_mux { + struct clk_hw hw; + struct regmap *regmap; + const struct mtk_mux *data; + spinlock_t *lock; +}; + +struct mtk_mux { + int id; + const char *name; + const char * const *parent_names; + unsigned int flags; + + u32 mux_ofs; + u32 set_ofs; + u32 clr_ofs; + u32 upd_ofs; + + u8 mux_shift; + u8 mux_width; + u8 gate_shift; + s8 upd_shift; + + const struct clk_ops *ops; + + signed char num_parents; +}; + +extern const struct clk_ops mtk_mux_ops; +extern const struct clk_ops mtk_mux_clr_set_upd_ops; +extern const struct clk_ops mtk_mux_gate_ops; +extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops; + +#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \ + _mux_set_ofs, _mux_clr_ofs, _shift, _width, \ + _gate, _upd_ofs, _upd, _flags, _ops) { \ + .id = _id, \ + .name = _name, \ + .mux_ofs = _mux_ofs, \ + .set_ofs = _mux_set_ofs, \ + .clr_ofs = _mux_clr_ofs, \ + .upd_ofs = _upd_ofs, \ + .mux_shift = _shift, \ + .mux_width = _width, \ + .gate_shift = _gate, \ + .upd_shift = _upd, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .flags = _flags, \ + .ops = &_ops, \ + } + +#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \ + _mux_set_ofs, _mux_clr_ofs, _shift, _width, \ + _gate, _upd_ofs, _upd, _flags) \ + GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \ + _mux_set_ofs, _mux_clr_ofs, _shift, _width, \ + _gate, _upd_ofs, _upd, _flags, \ + mtk_mux_gate_clr_set_upd_ops) + +#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \ + _mux_set_ofs, _mux_clr_ofs, _shift, _width, \ + _gate, _upd_ofs, _upd) \ + MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, \ + _mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift, \ + _width, _gate, _upd_ofs, _upd, \ + CLK_SET_RATE_PARENT) + +struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, + struct regmap *regmap, + spinlock_t *lock); + +int mtk_clk_register_muxes(const struct mtk_mux *muxes, + int num, struct device_node *node, + spinlock_t *lock, + struct clk_onecell_data *clk_data); + +#endif /* __DRV_CLK_MTK_MUX_H */ diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index f54e4015b0b1..8d556fc99fed 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -27,11 +27,13 @@ #define CON0_BASE_EN BIT(0) #define CON0_PWR_ON BIT(0) #define CON0_ISO_EN BIT(1) -#define CON0_PCW_CHG BIT(31) +#define PCW_CHG_MASK BIT(31) #define AUDPLL_TUNER_EN BIT(31) #define POSTDIV_MASK 0x7 + +/* default 7 bits integer, can be overridden with pcwibits. */ #define INTEGER_BITS 7 /* @@ -49,6 +51,7 @@ struct mtk_clk_pll { void __iomem *tuner_addr; void __iomem *tuner_en_addr; void __iomem *pcw_addr; + void __iomem *pcw_chg_addr; const struct mtk_pll_data *data; }; @@ -68,12 +71,15 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, u32 pcw, int postdiv) { int pcwbits = pll->data->pcwbits; - int pcwfbits; + int pcwfbits = 0; + int ibits; u64 vco; u8 c = 0; /* The fractional part of the PLL divider. */ - pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0; + ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS; + if (pcwbits > ibits) + pcwfbits = pcwbits - ibits; vco = (u64)fin * pcw; @@ -88,13 +94,39 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, return ((unsigned long)vco + postdiv - 1) / postdiv; } +static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + +static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, int postdiv) { - u32 con1, val; - int pll_en; + u32 chg, val; - pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; + /* disable tuner */ + __mtk_pll_tuner_disable(pll); /* set postdiv */ val = readl(pll->pd_addr); @@ -112,18 +144,15 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, pll->data->pcw_shift); val |= pcw << pll->data->pcw_shift; writel(val, pll->pcw_addr); - - con1 = readl(pll->base_addr + REG_CON1); - - if (pll_en) - con1 |= CON0_PCW_CHG; - - writel(con1, pll->base_addr + REG_CON1); + chg = readl(pll->pcw_chg_addr) | PCW_CHG_MASK; + writel(chg, pll->pcw_chg_addr); if (pll->tuner_addr) - writel(con1 + 1, pll->tuner_addr); + writel(val + 1, pll->tuner_addr); + + /* restore tuner_en */ + __mtk_pll_tuner_enable(pll); - if (pll_en) - udelay(20); + udelay(20); } /* @@ -138,9 +167,10 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv, u32 freq, u32 fin) { - unsigned long fmin = 1000 * MHZ; + unsigned long fmin = pll->data->fmin ? pll->data->fmin : (1000 * MHZ); const struct mtk_pll_div_table *div_table = pll->data->div_table; u64 _pcw; + int ibits; u32 val; if (freq > pll->data->fmax) @@ -164,7 +194,8 @@ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv, } /* _pcw = freq * postdiv / fin * 2^pcwfbits */ - _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS); + ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS; + _pcw = ((u64)freq << val) << (pll->data->pcwbits - ibits); do_div(_pcw, fin); *pcw = (u32)_pcw; @@ -228,13 +259,7 @@ static int mtk_pll_prepare(struct clk_hw *hw) r |= pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_enable(pll); udelay(20); @@ -258,13 +283,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw) writel(r, pll->base_addr + REG_CON0); } - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_disable(pll); r = readl(pll->base_addr + REG_CON0); r &= ~CON0_BASE_EN; @@ -302,6 +321,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, pll->pwr_addr = base + data->pwr_reg; pll->pd_addr = base + data->pd_reg; pll->pcw_addr = base + data->pcw_reg; + if (data->pcw_chg_reg) + pll->pcw_chg_addr = base + data->pcw_chg_reg; + else + pll->pcw_chg_addr = pll->base_addr + REG_CON1; if (data->tuner_reg) pll->tuner_addr = base + data->tuner_reg; if (data->tuner_en_reg) diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index 7ab200b6c3bf..8028ff6f6610 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -20,18 +20,18 @@ #include "clk-phase.h" #include "sclk-div.h" -#define AXG_MST_IN_COUNT 8 -#define AXG_SLV_SCLK_COUNT 10 -#define AXG_SLV_LRCLK_COUNT 10 +#define AUD_MST_IN_COUNT 8 +#define AUD_SLV_SCLK_COUNT 10 +#define AUD_SLV_LRCLK_COUNT 10 -#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \ -struct clk_regmap axg_##_name = { \ +#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) \ +struct clk_regmap aud_##_name = { \ .data = &(struct clk_regmap_gate_data){ \ .offset = (_reg), \ .bit_idx = (_bit), \ }, \ .hw.init = &(struct clk_init_data) { \ - .name = "axg_"#_name, \ + .name = "aud_"#_name, \ .ops = &clk_regmap_gate_ops, \ .parent_names = (const char *[]){ _pname }, \ .num_parents = 1, \ @@ -39,8 +39,8 @@ struct clk_regmap axg_##_name = { \ }, \ } -#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \ -struct clk_regmap axg_##_name = { \ +#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \ +struct clk_regmap aud_##_name = { \ .data = &(struct clk_regmap_mux_data){ \ .offset = (_reg), \ .mask = (_mask), \ @@ -48,7 +48,7 @@ struct clk_regmap axg_##_name = { \ .flags = (_dflags), \ }, \ .hw.init = &(struct clk_init_data){ \ - .name = "axg_"#_name, \ + .name = "aud_"#_name, \ .ops = &clk_regmap_mux_ops, \ .parent_names = (_pnames), \ .num_parents = ARRAY_SIZE(_pnames), \ @@ -56,8 +56,8 @@ struct clk_regmap axg_##_name = { \ }, \ } -#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \ -struct clk_regmap axg_##_name = { \ +#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \ +struct clk_regmap aud_##_name = { \ .data = &(struct clk_regmap_div_data){ \ .offset = (_reg), \ .shift = (_shift), \ @@ -65,7 +65,7 @@ struct clk_regmap axg_##_name = { \ .flags = (_dflags), \ }, \ .hw.init = &(struct clk_init_data){ \ - .name = "axg_"#_name, \ + .name = "aud_"#_name, \ .ops = &clk_regmap_divider_ops, \ .parent_names = (const char *[]) { _pname }, \ .num_parents = 1, \ @@ -73,109 +73,113 @@ struct clk_regmap axg_##_name = { \ }, \ } -#define AXG_PCLK_GATE(_name, _bit) \ - AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0) +#define AUD_PCLK_GATE(_name, _bit) \ + AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "audio_pclk", 0) /* Audio peripheral clocks */ -static AXG_PCLK_GATE(ddr_arb, 0); -static AXG_PCLK_GATE(pdm, 1); -static AXG_PCLK_GATE(tdmin_a, 2); -static AXG_PCLK_GATE(tdmin_b, 3); -static AXG_PCLK_GATE(tdmin_c, 4); -static AXG_PCLK_GATE(tdmin_lb, 5); -static AXG_PCLK_GATE(tdmout_a, 6); -static AXG_PCLK_GATE(tdmout_b, 7); -static AXG_PCLK_GATE(tdmout_c, 8); -static AXG_PCLK_GATE(frddr_a, 9); -static AXG_PCLK_GATE(frddr_b, 10); -static AXG_PCLK_GATE(frddr_c, 11); -static AXG_PCLK_GATE(toddr_a, 12); -static AXG_PCLK_GATE(toddr_b, 13); -static AXG_PCLK_GATE(toddr_c, 14); -static AXG_PCLK_GATE(loopback, 15); -static AXG_PCLK_GATE(spdifin, 16); -static AXG_PCLK_GATE(spdifout, 17); -static AXG_PCLK_GATE(resample, 18); -static AXG_PCLK_GATE(power_detect, 19); +static AUD_PCLK_GATE(ddr_arb, 0); +static AUD_PCLK_GATE(pdm, 1); +static AUD_PCLK_GATE(tdmin_a, 2); +static AUD_PCLK_GATE(tdmin_b, 3); +static AUD_PCLK_GATE(tdmin_c, 4); +static AUD_PCLK_GATE(tdmin_lb, 5); +static AUD_PCLK_GATE(tdmout_a, 6); +static AUD_PCLK_GATE(tdmout_b, 7); +static AUD_PCLK_GATE(tdmout_c, 8); +static AUD_PCLK_GATE(frddr_a, 9); +static AUD_PCLK_GATE(frddr_b, 10); +static AUD_PCLK_GATE(frddr_c, 11); +static AUD_PCLK_GATE(toddr_a, 12); +static AUD_PCLK_GATE(toddr_b, 13); +static AUD_PCLK_GATE(toddr_c, 14); +static AUD_PCLK_GATE(loopback, 15); +static AUD_PCLK_GATE(spdifin, 16); +static AUD_PCLK_GATE(spdifout, 17); +static AUD_PCLK_GATE(resample, 18); +static AUD_PCLK_GATE(power_detect, 19); +static AUD_PCLK_GATE(spdifout_b, 21); /* Audio Master Clocks */ static const char * const mst_mux_parent_names[] = { - "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3", - "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7", + "aud_mst_in0", "aud_mst_in1", "aud_mst_in2", "aud_mst_in3", + "aud_mst_in4", "aud_mst_in5", "aud_mst_in6", "aud_mst_in7", }; -#define AXG_MST_MUX(_name, _reg, _flag) \ - AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \ - mst_mux_parent_names, CLK_SET_RATE_PARENT) - -#define AXG_MST_MCLK_MUX(_name, _reg) \ - AXG_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST) - -#define AXG_MST_SYS_MUX(_name, _reg) \ - AXG_MST_MUX(_name, _reg, 0) - -static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL); -static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL); -static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL); -static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL); -static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL); -static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL); -static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); -static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); -static AXG_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); -static AXG_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); - -#define AXG_MST_DIV(_name, _reg, _flag) \ - AXG_AUD_DIV(_name##_div, _reg, 0, 16, _flag, \ - "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \ - -#define AXG_MST_MCLK_DIV(_name, _reg) \ - AXG_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST) - -#define AXG_MST_SYS_DIV(_name, _reg) \ - AXG_MST_DIV(_name, _reg, 0) - -static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL); -static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL); -static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL); -static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL); -static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL); -static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL); -static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); -static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); -static AXG_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); -static AXG_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); - -#define AXG_MST_MCLK_GATE(_name, _reg) \ - AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \ - CLK_SET_RATE_PARENT) - -static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL); -static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL); -static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL); -static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL); -static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL); -static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL); -static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); -static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); -static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); -static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +#define AUD_MST_MUX(_name, _reg, _flag) \ + AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \ + mst_mux_parent_names, CLK_SET_RATE_PARENT) + +#define AUD_MST_MCLK_MUX(_name, _reg) \ + AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST) + +#define AUD_MST_SYS_MUX(_name, _reg) \ + AUD_MST_MUX(_name, _reg, 0) + +static AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL); +static AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL); +static AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL); +static AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL); +static AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL); +static AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL); +static AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); +static AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); +static AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); +static AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL); + +#define AUD_MST_DIV(_name, _reg, _flag) \ + AUD_DIV(_name##_div, _reg, 0, 16, _flag, \ + "aud_"#_name"_sel", CLK_SET_RATE_PARENT) \ + +#define AUD_MST_MCLK_DIV(_name, _reg) \ + AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST) + +#define AUD_MST_SYS_DIV(_name, _reg) \ + AUD_MST_DIV(_name, _reg, 0) + +static AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL); +static AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL); +static AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL); +static AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL); +static AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL); +static AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL); +static AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); +static AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); +static AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); +static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL); + +#define AUD_MST_MCLK_GATE(_name, _reg) \ + AUD_GATE(_name, _reg, 31, "aud_"#_name"_div", \ + CLK_SET_RATE_PARENT) + +static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL); +static AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL); +static AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL); +static AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL); +static AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL); +static AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL); +static AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); +static AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); +static AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); +static AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL); /* Sample Clocks */ -#define AXG_MST_SCLK_PRE_EN(_name, _reg) \ - AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \ - "axg_mst_"#_name"_mclk", 0) - -static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0); -static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0); -static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0); -static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0); -static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0); -static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0); - -#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \ +#define AUD_MST_SCLK_PRE_EN(_name, _reg) \ + AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \ + "aud_mst_"#_name"_mclk", 0) + +static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0); +static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0); +static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0); +static AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0); +static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0); +static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0); + +#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \ _hi_shift, _hi_width, _pname, _iflags) \ -struct clk_regmap axg_##_name = { \ +struct clk_regmap aud_##_name = { \ .data = &(struct meson_sclk_div_data) { \ .div = { \ .reg_off = (_reg), \ @@ -189,7 +193,7 @@ struct clk_regmap axg_##_name = { \ }, \ }, \ .hw.init = &(struct clk_init_data) { \ - .name = "axg_"#_name, \ + .name = "aud_"#_name, \ .ops = &meson_sclk_div_ops, \ .parent_names = (const char *[]) { _pname }, \ .num_parents = 1, \ @@ -197,32 +201,32 @@ struct clk_regmap axg_##_name = { \ }, \ } -#define AXG_MST_SCLK_DIV(_name, _reg) \ - AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \ - "axg_mst_"#_name"_sclk_pre_en", \ - CLK_SET_RATE_PARENT) - -static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0); -static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0); -static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); -static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0); -static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0); -static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0); - -#define AXG_MST_SCLK_POST_EN(_name, _reg) \ - AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \ - "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT) - -static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0); -static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0); -static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0); -static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0); -static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0); -static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0); - -#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \ +#define AUD_MST_SCLK_DIV(_name, _reg) \ + AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \ + "aud_mst_"#_name"_sclk_pre_en", \ + CLK_SET_RATE_PARENT) + +static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0); +static AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0); +static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); +static AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0); +static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0); +static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0); + +#define AUD_MST_SCLK_POST_EN(_name, _reg) \ + AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \ + "aud_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT) + +static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0); +static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0); +static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0); +static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0); +static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0); +static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0); + +#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \ _pname, _iflags) \ -struct clk_regmap axg_##_name = { \ +struct clk_regmap aud_##_name = { \ .data = &(struct meson_clk_triphase_data) { \ .ph0 = { \ .reg_off = (_reg), \ @@ -241,7 +245,7 @@ struct clk_regmap axg_##_name = { \ }, \ }, \ .hw.init = &(struct clk_init_data) { \ - .name = "axg_"#_name, \ + .name = "aud_"#_name, \ .ops = &meson_clk_triphase_ops, \ .parent_names = (const char *[]) { _pname }, \ .num_parents = 1, \ @@ -249,87 +253,87 @@ struct clk_regmap axg_##_name = { \ }, \ } -#define AXG_MST_SCLK(_name, _reg) \ - AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \ - "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT) - -static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1); -static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1); -static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1); -static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1); -static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1); -static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1); - -#define AXG_MST_LRCLK_DIV(_name, _reg) \ - AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \ - "axg_mst_"#_name"_sclk_post_en", 0) \ - -static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0); -static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0); -static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); -static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0); -static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0); -static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0); - -#define AXG_MST_LRCLK(_name, _reg) \ - AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \ - "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT) - -static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1); -static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1); -static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1); -static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1); -static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1); -static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1); +#define AUD_MST_SCLK(_name, _reg) \ + AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \ + "aud_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT) + +static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1); +static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1); +static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1); +static AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1); +static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1); +static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1); + +#define AUD_MST_LRCLK_DIV(_name, _reg) \ + AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \ + "aud_mst_"#_name"_sclk_post_en", 0) \ + +static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0); +static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0); +static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); +static AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0); +static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0); +static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0); + +#define AUD_MST_LRCLK(_name, _reg) \ + AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \ + "aud_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT) + +static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1); +static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1); +static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1); +static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1); +static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1); +static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1); static const char * const tdm_sclk_parent_names[] = { - "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk", - "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk", - "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2", - "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5", - "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8", - "axg_slv_sclk9" + "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk", + "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk", + "aud_slv_sclk0", "aud_slv_sclk1", "aud_slv_sclk2", + "aud_slv_sclk3", "aud_slv_sclk4", "aud_slv_sclk5", + "aud_slv_sclk6", "aud_slv_sclk7", "aud_slv_sclk8", + "aud_slv_sclk9" }; -#define AXG_TDM_SCLK_MUX(_name, _reg) \ - AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \ +#define AUD_TDM_SCLK_MUX(_name, _reg) \ + AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \ CLK_MUX_ROUND_CLOSEST, \ tdm_sclk_parent_names, 0) -static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL); -static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL); -static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL); -static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); -static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL); -static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL); -static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL); - -#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \ - AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \ - "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT) - -static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL); -static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL); -static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL); -static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); -static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL); -static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL); -static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL); - -#define AXG_TDM_SCLK_POST_EN(_name, _reg) \ - AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \ - "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT) - -static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL); -static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL); -static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL); -static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); -static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL); -static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL); -static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL); - -#define AXG_TDM_SCLK(_name, _reg) \ - struct clk_regmap axg_tdm##_name##_sclk = { \ +static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL); +static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL); +static AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL); +static AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); +static AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL); +static AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL); +static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL); + +#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \ + AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \ + "aud_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT) + +static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL); +static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL); +static AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL); +static AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); +static AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL); +static AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL); +static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL); + +#define AUD_TDM_SCLK_POST_EN(_name, _reg) \ + AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \ + "aud_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT) + +static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL); +static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL); +static AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL); +static AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); +static AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL); +static AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL); +static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL); + +#define AUD_TDM_SCLK(_name, _reg) \ + struct clk_regmap aud_tdm##_name##_sclk = { \ .data = &(struct meson_clk_phase_data) { \ .ph = { \ .reg_off = (_reg), \ @@ -338,44 +342,83 @@ static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL); }, \ }, \ .hw.init = &(struct clk_init_data) { \ - .name = "axg_tdm"#_name"_sclk", \ + .name = "aud_tdm"#_name"_sclk", \ .ops = &meson_clk_phase_ops, \ .parent_names = (const char *[]) \ - { "axg_tdm"#_name"_sclk_post_en" }, \ + { "aud_tdm"#_name"_sclk_post_en" }, \ .num_parents = 1, \ .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \ }, \ } -static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL); -static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL); -static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL); -static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); -static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL); -static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL); -static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL); +static AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL); +static AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL); +static AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL); +static AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); +static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL); +static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL); +static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL); static const char * const tdm_lrclk_parent_names[] = { - "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk", - "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk", - "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2", - "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5", - "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8", - "axg_slv_lrclk9" + "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk", + "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk", + "aud_slv_lrclk0", "aud_slv_lrclk1", "aud_slv_lrclk2", + "aud_slv_lrclk3", "aud_slv_lrclk4", "aud_slv_lrclk5", + "aud_slv_lrclk6", "aud_slv_lrclk7", "aud_slv_lrclk8", + "aud_slv_lrclk9" }; -#define AXG_TDM_LRLCK(_name, _reg) \ - AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \ - CLK_MUX_ROUND_CLOSEST, \ - tdm_lrclk_parent_names, 0) +#define AUD_TDM_LRLCK(_name, _reg) \ + AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \ + CLK_MUX_ROUND_CLOSEST, \ + tdm_lrclk_parent_names, 0) + +static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL); +static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL); +static AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL); +static AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); +static AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL); +static AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL); +static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL); + +/* G12a Pad control */ +#define AUD_TDM_PAD_CTRL(_name, _reg, _shift, _parents) \ + AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \ + CLK_SET_RATE_NO_REPARENT) + +static const char * const mclk_pad_ctrl_parent_names[] = { + "aud_mst_a_mclk", "aud_mst_b_mclk", "aud_mst_c_mclk", + "aud_mst_d_mclk", "aud_mst_e_mclk", "aud_mst_f_mclk", +}; + +static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0, + mclk_pad_ctrl_parent_names); +static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4, + mclk_pad_ctrl_parent_names); + +static const char * const lrclk_pad_ctrl_parent_names[] = { + "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk", + "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk", +}; -static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL); -static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL); -static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL); -static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL); -static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL); -static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL); -static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL); +static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16, + lrclk_pad_ctrl_parent_names); +static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20, + lrclk_pad_ctrl_parent_names); +static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24, + lrclk_pad_ctrl_parent_names); + +static const char * const sclk_pad_ctrl_parent_names[] = { + "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk", + "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk", +}; + +static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0, + sclk_pad_ctrl_parent_names); +static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4, + sclk_pad_ctrl_parent_names); +static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8, + sclk_pad_ctrl_parent_names); /* * Array of all clocks provided by this provider @@ -383,255 +426,416 @@ static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL); */ static struct clk_hw_onecell_data axg_audio_hw_onecell_data = { .hws = { - [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw, - [AUD_CLKID_PDM] = &axg_pdm.hw, - [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw, - [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw, - [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw, - [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw, - [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw, - [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw, - [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw, - [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw, - [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw, - [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw, - [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw, - [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw, - [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw, - [AUD_CLKID_LOOPBACK] = &axg_loopback.hw, - [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw, - [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw, - [AUD_CLKID_RESAMPLE] = &axg_resample.hw, - [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw, - [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw, - [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw, - [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw, - [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw, - [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw, - [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw, - [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw, - [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw, - [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw, - [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw, - [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw, - [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw, - [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw, - [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw, - [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw, - [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw, - [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw, - [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw, - [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw, - [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw, - [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw, - [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw, - [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw, - [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw, - [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw, - [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw, - [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw, - [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw, - [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw, - [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw, - [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw, - [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw, - [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw, - [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw, - [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw, - [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw, - [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw, - [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw, - [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw, - [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw, - [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw, - [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw, - [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw, - [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw, - [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw, - [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw, - [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw, - [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw, - [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw, - [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw, - [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw, - [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw, - [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw, - [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw, - [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw, - [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw, - [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw, - [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw, - [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw, - [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw, - [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw, - [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw, - [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw, - [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw, - [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw, - [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw, - [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw, - [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw, - [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw, - [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw, - [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw, - [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw, - [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw, - [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw, - [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw, - [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw, - [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw, - [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw, - [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw, - [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw, - [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw, - [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw, - [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw, - [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw, - [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw, - [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw, - [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw, - [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw, - [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw, - [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw, - [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw, - [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw, - [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw, - [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw, - [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw, - [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw, - [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw, - [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw, - [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw, - [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw, - [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw, + [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw, + [AUD_CLKID_PDM] = &aud_pdm.hw, + [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw, + [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw, + [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw, + [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw, + [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw, + [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw, + [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw, + [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw, + [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw, + [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw, + [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw, + [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw, + [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw, + [AUD_CLKID_LOOPBACK] = &aud_loopback.hw, + [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw, + [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw, + [AUD_CLKID_RESAMPLE] = &aud_resample.hw, + [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw, + [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw, + [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw, + [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw, + [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw, + [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw, + [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw, + [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw, + [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw, + [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw, + [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw, + [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw, + [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw, + [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw, + [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw, + [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw, + [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw, + [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw, + [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw, + [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw, + [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw, + [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw, + [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw, + [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw, + [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw, + [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw, + [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw, + [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw, + [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw, + [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw, + [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw, + [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw, + [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw, + [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw, + [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw, + [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw, + [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw, + [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw, + [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw, + [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw, + [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw, + [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw, + [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw, + [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw, + [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw, + [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw, + [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw, + [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw, + [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw, + [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw, + [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw, + [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw, + [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw, + [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw, + [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw, + [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw, + [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw, + [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw, + [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw, + [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw, + [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw, + [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw, + [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw, + [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw, + [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw, + [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw, + [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw, + [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw, + [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw, + [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw, + [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw, + [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw, + [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw, + [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw, + [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw, + [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw, + [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw, + [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw, + [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw, + [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw, + [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw, + [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw, + [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw, + [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw, + [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw, + [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw, + [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw, + [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw, + [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw, + [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw, + [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw, + [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, }; -/* Convenience table to populate regmap in .probe() */ -static struct clk_regmap *const axg_audio_clk_regmaps[] = { - &axg_ddr_arb, - &axg_pdm, - &axg_tdmin_a, - &axg_tdmin_b, - &axg_tdmin_c, - &axg_tdmin_lb, - &axg_tdmout_a, - &axg_tdmout_b, - &axg_tdmout_c, - &axg_frddr_a, - &axg_frddr_b, - &axg_frddr_c, - &axg_toddr_a, - &axg_toddr_b, - &axg_toddr_c, - &axg_loopback, - &axg_spdifin, - &axg_spdifout, - &axg_resample, - &axg_power_detect, - &axg_mst_a_mclk_sel, - &axg_mst_b_mclk_sel, - &axg_mst_c_mclk_sel, - &axg_mst_d_mclk_sel, - &axg_mst_e_mclk_sel, - &axg_mst_f_mclk_sel, - &axg_mst_a_mclk_div, - &axg_mst_b_mclk_div, - &axg_mst_c_mclk_div, - &axg_mst_d_mclk_div, - &axg_mst_e_mclk_div, - &axg_mst_f_mclk_div, - &axg_mst_a_mclk, - &axg_mst_b_mclk, - &axg_mst_c_mclk, - &axg_mst_d_mclk, - &axg_mst_e_mclk, - &axg_mst_f_mclk, - &axg_spdifout_clk_sel, - &axg_spdifout_clk_div, - &axg_spdifout_clk, - &axg_spdifin_clk_sel, - &axg_spdifin_clk_div, - &axg_spdifin_clk, - &axg_pdm_dclk_sel, - &axg_pdm_dclk_div, - &axg_pdm_dclk, - &axg_pdm_sysclk_sel, - &axg_pdm_sysclk_div, - &axg_pdm_sysclk, - &axg_mst_a_sclk_pre_en, - &axg_mst_b_sclk_pre_en, - &axg_mst_c_sclk_pre_en, - &axg_mst_d_sclk_pre_en, - &axg_mst_e_sclk_pre_en, - &axg_mst_f_sclk_pre_en, - &axg_mst_a_sclk_div, - &axg_mst_b_sclk_div, - &axg_mst_c_sclk_div, - &axg_mst_d_sclk_div, - &axg_mst_e_sclk_div, - &axg_mst_f_sclk_div, - &axg_mst_a_sclk_post_en, - &axg_mst_b_sclk_post_en, - &axg_mst_c_sclk_post_en, - &axg_mst_d_sclk_post_en, - &axg_mst_e_sclk_post_en, - &axg_mst_f_sclk_post_en, - &axg_mst_a_sclk, - &axg_mst_b_sclk, - &axg_mst_c_sclk, - &axg_mst_d_sclk, - &axg_mst_e_sclk, - &axg_mst_f_sclk, - &axg_mst_a_lrclk_div, - &axg_mst_b_lrclk_div, - &axg_mst_c_lrclk_div, - &axg_mst_d_lrclk_div, - &axg_mst_e_lrclk_div, - &axg_mst_f_lrclk_div, - &axg_mst_a_lrclk, - &axg_mst_b_lrclk, - &axg_mst_c_lrclk, - &axg_mst_d_lrclk, - &axg_mst_e_lrclk, - &axg_mst_f_lrclk, - &axg_tdmin_a_sclk_sel, - &axg_tdmin_b_sclk_sel, - &axg_tdmin_c_sclk_sel, - &axg_tdmin_lb_sclk_sel, - &axg_tdmout_a_sclk_sel, - &axg_tdmout_b_sclk_sel, - &axg_tdmout_c_sclk_sel, - &axg_tdmin_a_sclk_pre_en, - &axg_tdmin_b_sclk_pre_en, - &axg_tdmin_c_sclk_pre_en, - &axg_tdmin_lb_sclk_pre_en, - &axg_tdmout_a_sclk_pre_en, - &axg_tdmout_b_sclk_pre_en, - &axg_tdmout_c_sclk_pre_en, - &axg_tdmin_a_sclk_post_en, - &axg_tdmin_b_sclk_post_en, - &axg_tdmin_c_sclk_post_en, - &axg_tdmin_lb_sclk_post_en, - &axg_tdmout_a_sclk_post_en, - &axg_tdmout_b_sclk_post_en, - &axg_tdmout_c_sclk_post_en, - &axg_tdmin_a_sclk, - &axg_tdmin_b_sclk, - &axg_tdmin_c_sclk, - &axg_tdmin_lb_sclk, - &axg_tdmout_a_sclk, - &axg_tdmout_b_sclk, - &axg_tdmout_c_sclk, - &axg_tdmin_a_lrclk, - &axg_tdmin_b_lrclk, - &axg_tdmin_c_lrclk, - &axg_tdmin_lb_lrclk, - &axg_tdmout_a_lrclk, - &axg_tdmout_b_lrclk, - &axg_tdmout_c_lrclk, +/* + * Array of all G12A clocks provided by this provider + * The input clocks of the controller will be populated at runtime + */ +static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = { + .hws = { + [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw, + [AUD_CLKID_PDM] = &aud_pdm.hw, + [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw, + [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw, + [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw, + [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw, + [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw, + [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw, + [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw, + [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw, + [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw, + [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw, + [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw, + [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw, + [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw, + [AUD_CLKID_LOOPBACK] = &aud_loopback.hw, + [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw, + [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw, + [AUD_CLKID_RESAMPLE] = &aud_resample.hw, + [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw, + [AUD_CLKID_SPDIFOUT_B] = &aud_spdifout_b.hw, + [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw, + [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw, + [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw, + [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw, + [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw, + [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw, + [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw, + [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw, + [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw, + [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw, + [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw, + [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw, + [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw, + [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw, + [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw, + [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw, + [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw, + [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw, + [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw, + [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw, + [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw, + [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &aud_spdifout_b_clk_sel.hw, + [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &aud_spdifout_b_clk_div.hw, + [AUD_CLKID_SPDIFOUT_B_CLK] = &aud_spdifout_b_clk.hw, + [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw, + [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw, + [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw, + [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw, + [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw, + [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw, + [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw, + [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw, + [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw, + [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw, + [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw, + [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw, + [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw, + [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw, + [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw, + [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw, + [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw, + [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw, + [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw, + [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw, + [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw, + [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw, + [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw, + [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw, + [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw, + [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw, + [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw, + [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw, + [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw, + [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw, + [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw, + [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw, + [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw, + [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw, + [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw, + [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw, + [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw, + [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw, + [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw, + [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw, + [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw, + [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw, + [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw, + [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw, + [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw, + [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw, + [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw, + [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw, + [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw, + [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw, + [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw, + [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw, + [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw, + [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw, + [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw, + [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw, + [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw, + [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw, + [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw, + [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw, + [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw, + [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw, + [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw, + [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw, + [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw, + [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw, + [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw, + [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw, + [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw, + [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw, + [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw, + [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw, + [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw, + [AUD_CLKID_TDM_MCLK_PAD0] = &aud_tdm_mclk_pad_0.hw, + [AUD_CLKID_TDM_MCLK_PAD1] = &aud_tdm_mclk_pad_1.hw, + [AUD_CLKID_TDM_LRCLK_PAD0] = &aud_tdm_lrclk_pad_0.hw, + [AUD_CLKID_TDM_LRCLK_PAD1] = &aud_tdm_lrclk_pad_1.hw, + [AUD_CLKID_TDM_LRCLK_PAD2] = &aud_tdm_lrclk_pad_2.hw, + [AUD_CLKID_TDM_SCLK_PAD0] = &aud_tdm_sclk_pad_0.hw, + [AUD_CLKID_TDM_SCLK_PAD1] = &aud_tdm_sclk_pad_1.hw, + [AUD_CLKID_TDM_SCLK_PAD2] = &aud_tdm_sclk_pad_2.hw, + [NR_CLKS] = NULL, + }, + .num = NR_CLKS, +}; + +/* Convenience table to populate regmap in .probe() + * Note that this table is shared between both AXG and G12A, + * with spdifout_b clocks being exclusive to G12A. Since those + * clocks are not declared within the AXG onecell table, we do not + * feel the need to have separate AXG/G12A regmap tables. + */ +static struct clk_regmap *const aud_clk_regmaps[] = { + &aud_ddr_arb, + &aud_pdm, + &aud_tdmin_a, + &aud_tdmin_b, + &aud_tdmin_c, + &aud_tdmin_lb, + &aud_tdmout_a, + &aud_tdmout_b, + &aud_tdmout_c, + &aud_frddr_a, + &aud_frddr_b, + &aud_frddr_c, + &aud_toddr_a, + &aud_toddr_b, + &aud_toddr_c, + &aud_loopback, + &aud_spdifin, + &aud_spdifout, + &aud_resample, + &aud_power_detect, + &aud_spdifout_b, + &aud_mst_a_mclk_sel, + &aud_mst_b_mclk_sel, + &aud_mst_c_mclk_sel, + &aud_mst_d_mclk_sel, + &aud_mst_e_mclk_sel, + &aud_mst_f_mclk_sel, + &aud_mst_a_mclk_div, + &aud_mst_b_mclk_div, + &aud_mst_c_mclk_div, + &aud_mst_d_mclk_div, + &aud_mst_e_mclk_div, + &aud_mst_f_mclk_div, + &aud_mst_a_mclk, + &aud_mst_b_mclk, + &aud_mst_c_mclk, + &aud_mst_d_mclk, + &aud_mst_e_mclk, + &aud_mst_f_mclk, + &aud_spdifout_clk_sel, + &aud_spdifout_clk_div, + &aud_spdifout_clk, + &aud_spdifin_clk_sel, + &aud_spdifin_clk_div, + &aud_spdifin_clk, + &aud_pdm_dclk_sel, + &aud_pdm_dclk_div, + &aud_pdm_dclk, + &aud_pdm_sysclk_sel, + &aud_pdm_sysclk_div, + &aud_pdm_sysclk, + &aud_mst_a_sclk_pre_en, + &aud_mst_b_sclk_pre_en, + &aud_mst_c_sclk_pre_en, + &aud_mst_d_sclk_pre_en, + &aud_mst_e_sclk_pre_en, + &aud_mst_f_sclk_pre_en, + &aud_mst_a_sclk_div, + &aud_mst_b_sclk_div, + &aud_mst_c_sclk_div, + &aud_mst_d_sclk_div, + &aud_mst_e_sclk_div, + &aud_mst_f_sclk_div, + &aud_mst_a_sclk_post_en, + &aud_mst_b_sclk_post_en, + &aud_mst_c_sclk_post_en, + &aud_mst_d_sclk_post_en, + &aud_mst_e_sclk_post_en, + &aud_mst_f_sclk_post_en, + &aud_mst_a_sclk, + &aud_mst_b_sclk, + &aud_mst_c_sclk, + &aud_mst_d_sclk, + &aud_mst_e_sclk, + &aud_mst_f_sclk, + &aud_mst_a_lrclk_div, + &aud_mst_b_lrclk_div, + &aud_mst_c_lrclk_div, + &aud_mst_d_lrclk_div, + &aud_mst_e_lrclk_div, + &aud_mst_f_lrclk_div, + &aud_mst_a_lrclk, + &aud_mst_b_lrclk, + &aud_mst_c_lrclk, + &aud_mst_d_lrclk, + &aud_mst_e_lrclk, + &aud_mst_f_lrclk, + &aud_tdmin_a_sclk_sel, + &aud_tdmin_b_sclk_sel, + &aud_tdmin_c_sclk_sel, + &aud_tdmin_lb_sclk_sel, + &aud_tdmout_a_sclk_sel, + &aud_tdmout_b_sclk_sel, + &aud_tdmout_c_sclk_sel, + &aud_tdmin_a_sclk_pre_en, + &aud_tdmin_b_sclk_pre_en, + &aud_tdmin_c_sclk_pre_en, + &aud_tdmin_lb_sclk_pre_en, + &aud_tdmout_a_sclk_pre_en, + &aud_tdmout_b_sclk_pre_en, + &aud_tdmout_c_sclk_pre_en, + &aud_tdmin_a_sclk_post_en, + &aud_tdmin_b_sclk_post_en, + &aud_tdmin_c_sclk_post_en, + &aud_tdmin_lb_sclk_post_en, + &aud_tdmout_a_sclk_post_en, + &aud_tdmout_b_sclk_post_en, + &aud_tdmout_c_sclk_post_en, + &aud_tdmin_a_sclk, + &aud_tdmin_b_sclk, + &aud_tdmin_c_sclk, + &aud_tdmin_lb_sclk, + &aud_tdmout_a_sclk, + &aud_tdmout_b_sclk, + &aud_tdmout_c_sclk, + &aud_tdmin_a_lrclk, + &aud_tdmin_b_lrclk, + &aud_tdmin_c_lrclk, + &aud_tdmin_lb_lrclk, + &aud_tdmout_a_lrclk, + &aud_tdmout_b_lrclk, + &aud_tdmout_c_lrclk, + &aud_spdifout_b_clk_sel, + &aud_spdifout_b_clk_div, + &aud_spdifout_b_clk, + &aud_tdm_mclk_pad_0, + &aud_tdm_mclk_pad_1, + &aud_tdm_lrclk_pad_0, + &aud_tdm_lrclk_pad_1, + &aud_tdm_lrclk_pad_2, + &aud_tdm_sclk_pad_0, + &aud_tdm_sclk_pad_1, + &aud_tdm_sclk_pad_2, }; static int devm_clk_get_enable(struct device *dev, char *id) @@ -665,14 +869,13 @@ static int devm_clk_get_enable(struct device *dev, char *id) } static int axg_register_clk_hw_input(struct device *dev, - const char *name, - unsigned int clkid) + const char *name) { char *clk_name; struct clk_hw *hw; int err = 0; - clk_name = kasprintf(GFP_KERNEL, "axg_%s", name); + clk_name = kasprintf(GFP_KERNEL, "aud_%s", name); if (!clk_name) return -ENOMEM; @@ -686,8 +889,6 @@ static int axg_register_clk_hw_input(struct device *dev, if (err != -EPROBE_DEFER) dev_err(dev, "failed to get %s clock", name); } - } else { - axg_audio_hw_onecell_data.hws[clkid] = hw; } kfree(clk_name); @@ -696,8 +897,7 @@ static int axg_register_clk_hw_input(struct device *dev, static int axg_register_clk_hw_inputs(struct device *dev, const char *basename, - unsigned int count, - unsigned int clkid) + unsigned int count) { char *name; int i, ret; @@ -707,7 +907,7 @@ static int axg_register_clk_hw_inputs(struct device *dev, if (!name) return -ENOMEM; - ret = axg_register_clk_hw_input(dev, name, clkid + i); + ret = axg_register_clk_hw_input(dev, name); kfree(name); if (ret) return ret; @@ -723,15 +923,24 @@ static const struct regmap_config axg_audio_regmap_cfg = { .max_register = AUDIO_CLK_PDMIN_CTRL1, }; +struct audioclk_data { + struct clk_hw_onecell_data *hw_onecell_data; +}; + static int axg_audio_clkc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct audioclk_data *data; struct regmap *map; struct resource *res; void __iomem *regs; struct clk_hw *hw; int ret, i; + data = of_device_get_match_data(dev); + if (!data) + return -EINVAL; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) @@ -755,40 +964,35 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) } /* Register the peripheral input clock */ - hw = meson_clk_hw_register_input(dev, "pclk", "axg_audio_pclk", 0); + hw = meson_clk_hw_register_input(dev, "pclk", "audio_pclk", 0); if (IS_ERR(hw)) return PTR_ERR(hw); - axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw; - /* Register optional input master clocks */ ret = axg_register_clk_hw_inputs(dev, "mst_in", - AXG_MST_IN_COUNT, - AUD_CLKID_MST0); + AUD_MST_IN_COUNT); if (ret) return ret; /* Register optional input slave sclks */ ret = axg_register_clk_hw_inputs(dev, "slv_sclk", - AXG_SLV_SCLK_COUNT, - AUD_CLKID_SLV_SCLK0); + AUD_SLV_SCLK_COUNT); if (ret) return ret; /* Register optional input slave lrclks */ ret = axg_register_clk_hw_inputs(dev, "slv_lrclk", - AXG_SLV_LRCLK_COUNT, - AUD_CLKID_SLV_LRCLK0); + AUD_SLV_LRCLK_COUNT); if (ret) return ret; /* Populate regmap for the regmap backed clocks */ - for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++) - axg_audio_clk_regmaps[i]->map = map; + for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++) + aud_clk_regmaps[i]->map = map; /* Take care to skip the registered input clocks */ - for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) { - hw = axg_audio_hw_onecell_data.hws[i]; + for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) { + hw = data->hw_onecell_data->hws[i]; /* array might be sparse */ if (!hw) continue; @@ -802,12 +1006,25 @@ static int axg_audio_clkc_probe(struct platform_device *pdev) } return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, - &axg_audio_hw_onecell_data); + data->hw_onecell_data); } +static const struct audioclk_data axg_audioclk_data = { + .hw_onecell_data = &axg_audio_hw_onecell_data, +}; + +static const struct audioclk_data g12a_audioclk_data = { + .hw_onecell_data = &g12a_audio_hw_onecell_data, +}; + static const struct of_device_id clkc_match_table[] = { - { .compatible = "amlogic,axg-audio-clkc" }, - {} + { + .compatible = "amlogic,axg-audio-clkc", + .data = &axg_audioclk_data + }, { + .compatible = "amlogic,g12a-audio-clkc", + .data = &g12a_audioclk_data + }, {} }; MODULE_DEVICE_TABLE(of, clkc_match_table); @@ -820,6 +1037,6 @@ static struct platform_driver axg_audio_driver = { }; module_platform_driver(axg_audio_driver); -MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver"); +MODULE_DESCRIPTION("Amlogic AXG/G12A Audio Clock driver"); MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h index 7191b39c9d65..5d972d55d6c7 100644 --- a/drivers/clk/meson/axg-audio.h +++ b/drivers/clk/meson/axg-audio.h @@ -20,6 +20,8 @@ #define AUDIO_MCLK_D_CTRL 0x010 #define AUDIO_MCLK_E_CTRL 0x014 #define AUDIO_MCLK_F_CTRL 0x018 +#define AUDIO_MST_PAD_CTRL0 0x01c +#define AUDIO_MST_PAD_CTRL1 0x020 #define AUDIO_MST_A_SCLK_CTRL0 0x040 #define AUDIO_MST_A_SCLK_CTRL1 0x044 #define AUDIO_MST_B_SCLK_CTRL0 0x048 @@ -45,21 +47,13 @@ #define AUDIO_CLK_LOCKER_CTRL 0x0A8 #define AUDIO_CLK_PDMIN_CTRL0 0x0AC #define AUDIO_CLK_PDMIN_CTRL1 0x0B0 +#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4 /* * CLKID index values * These indices are entirely contrived and do not map onto the hardware. */ -#define AUD_CLKID_PCLK 0 -#define AUD_CLKID_MST0 1 -#define AUD_CLKID_MST1 2 -#define AUD_CLKID_MST2 3 -#define AUD_CLKID_MST3 4 -#define AUD_CLKID_MST4 5 -#define AUD_CLKID_MST5 6 -#define AUD_CLKID_MST6 7 -#define AUD_CLKID_MST7 8 #define AUD_CLKID_MST_A_MCLK_SEL 59 #define AUD_CLKID_MST_B_MCLK_SEL 60 #define AUD_CLKID_MST_C_MCLK_SEL 61 @@ -118,10 +112,12 @@ #define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148 #define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149 #define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150 +#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153 +#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154 /* include the CLKIDs which are part of the DT bindings */ #include <dt-bindings/clock/axg-audio-clkc.h> -#define NR_CLKS 151 +#define NR_CLKS 163 #endif /*__AXG_AUDIO_CLKC_H */ diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 41e16dd7272a..ddb1e5634739 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate, return true; } else { /* Round down */ - if (now < rate && best < now) + if (now <= rate && best < now) return true; } @@ -303,6 +303,16 @@ static int meson_clk_pll_is_enabled(struct clk_hw *hw) return 1; } +static int meson_clk_pcie_pll_enable(struct clk_hw *hw) +{ + meson_clk_pll_init(hw); + + if (meson_clk_pll_wait_lock(hw)) + return -EIO; + + return 0; +} + static int meson_clk_pll_enable(struct clk_hw *hw) { struct clk_regmap *clk = to_clk_regmap(hw); @@ -387,6 +397,22 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +/* + * The Meson G12A PCIE PLL is fined tuned to deliver a very precise + * 100MHz reference clock for the PCIe Analog PHY, and thus requires + * a strict register sequence to enable the PLL. + * To simplify, re-use the _init() op to enable the PLL and keep + * the other ops except set_rate since the rate is fixed. + */ +const struct clk_ops meson_clk_pcie_pll_ops = { + .recalc_rate = meson_clk_pll_recalc_rate, + .round_rate = meson_clk_pll_round_rate, + .is_enabled = meson_clk_pll_is_enabled, + .enable = meson_clk_pcie_pll_enable, + .disable = meson_clk_pll_disable +}; +EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops); + const struct clk_ops meson_clk_pll_ops = { .init = meson_clk_pll_init, .recalc_rate = meson_clk_pll_recalc_rate, diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h index 55af2e285b1b..367efd0f6410 100644 --- a/drivers/clk/meson/clk-pll.h +++ b/drivers/clk/meson/clk-pll.h @@ -45,5 +45,6 @@ struct meson_clk_pll_data { extern const struct clk_ops meson_clk_pll_ro_ops; extern const struct clk_ops meson_clk_pll_ops; +extern const struct clk_ops meson_clk_pcie_pll_ops; #endif /* __MESON_CLK_PLL_H */ diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h index 04b0d5506641..a67c8a7cd7c4 100644 --- a/drivers/clk/meson/g12a-aoclk.h +++ b/drivers/clk/meson/g12a-aoclk.h @@ -16,9 +16,7 @@ * to expose, such as the internal muxes and dividers of composite clocks, * will remain defined here. */ -#define CLKID_AO_SAR_ADC_SEL 16 #define CLKID_AO_SAR_ADC_DIV 17 -#define CLKID_AO_CTS_OSCIN 19 #define CLKID_AO_32K_PRE 20 #define CLKID_AO_32K_DIV 21 #define CLKID_AO_32K_SEL 22 diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 0e1ce8c03259..739f64fdf1e3 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -150,6 +150,318 @@ static struct clk_regmap g12a_sys_pll = { }, }; +static struct clk_regmap g12a_sys_pll_div16_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "sys_pll_div16_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "sys_pll" }, + .num_parents = 1, + /* + * This clock is used to debug the sys_pll range + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_fixed_factor g12a_sys_pll_div16 = { + .mult = 1, + .div = 16, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll_div16", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "sys_pll_div16_en" }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "premux0" */ +static struct clk_regmap g12a_cpu_clk_premux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x3, + .shift = 0, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn0_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal", + "fclk_div2", + "fclk_div3" }, + .num_parents = 3, + }, +}; + +/* Datasheet names this field as "mux0_divn_tcnt" */ +static struct clk_regmap g12a_cpu_clk_mux0_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .shift = 4, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn0_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn0_sel" }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux0" */ +static struct clk_regmap g12a_cpu_clk_postmux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x1, + .shift = 2, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn0", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn0_sel", + "cpu_clk_dyn0_div" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "premux1" */ +static struct clk_regmap g12a_cpu_clk_premux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x3, + .shift = 16, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn1_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal", + "fclk_div2", + "fclk_div3" }, + .num_parents = 3, + }, +}; + +/* Datasheet names this field as "Mux1_divn_tcnt" */ +static struct clk_regmap g12a_cpu_clk_mux1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .shift = 20, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn1_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn1_sel" }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux1" */ +static struct clk_regmap g12a_cpu_clk_postmux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x1, + .shift = 18, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn1", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn1_sel", + "cpu_clk_dyn1_div" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_dyn_mux_sel" */ +static struct clk_regmap g12a_cpu_clk_dyn = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x1, + .shift = 10, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_dyn", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn0", + "cpu_clk_dyn1" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_mux_sel" */ +static struct clk_regmap g12a_cpu_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x1, + .shift = 11, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn", + "sys_pll" }, + .num_parents = 2, + }, +}; + +static struct clk_regmap g12a_cpu_clk_div16_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 1, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpu_clk_div16_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpu_clk" }, + .num_parents = 1, + /* + * This clock is used to debug the cpu_clk range + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_fixed_factor g12a_cpu_clk_div16 = { + .mult = 1, + .div = 16, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_div16", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpu_clk_div16_en" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_cpu_clk_apb_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .shift = 3, + .width = 3, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_apb_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_cpu_clk_apb = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 1, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpu_clk_apb", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_apb_div" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12a_cpu_clk_atb_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .shift = 6, + .width = 3, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_atb_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_cpu_clk_atb = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 17, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpu_clk_atb", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_atb_div" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12a_cpu_clk_axi_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .shift = 9, + .width = 3, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_axi_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_cpu_clk_axi = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 18, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpu_clk_axi", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_axi_div" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12a_cpu_clk_trace_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .shift = 20, + .width = 3, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk_trace_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpu_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_cpu_clk_trace = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 23, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpu_clk_trace", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_trace_div" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + static const struct pll_mult_range g12a_gp0_pll_mult_range = { .min = 55, .max = 255, @@ -302,6 +614,118 @@ static struct clk_regmap g12a_hifi_pll = { }, }; +/* + * The Meson G12A PCIE PLL is fined tuned to deliver a very precise + * 100MHz reference clock for the PCIe Analog PHY, and thus requires + * a strict register sequence to enable the PLL. + */ +static const struct reg_sequence g12a_pcie_pll_init_regs[] = { + { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 }, + { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 }, + { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 }, + { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 }, + { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 }, + { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 }, + { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 }, + { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 }, + { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 }, + { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 }, + { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 }, + { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 }, +}; + +/* Keep a single entry table for recalc/round_rate() ops */ +static const struct pll_params_table g12a_pcie_pll_table[] = { + PLL_PARAMS(150, 1), + {0, 0}, +}; + +static struct clk_regmap g12a_pcie_pll_dco = { + .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_PCIE_PLL_CNTL0, + .shift = 28, + .width = 1, + }, + .m = { + .reg_off = HHI_PCIE_PLL_CNTL0, + .shift = 0, + .width = 8, + }, + .n = { + .reg_off = HHI_PCIE_PLL_CNTL0, + .shift = 10, + .width = 5, + }, + .frac = { + .reg_off = HHI_PCIE_PLL_CNTL1, + .shift = 0, + .width = 12, + }, + .l = { + .reg_off = HHI_PCIE_PLL_CNTL0, + .shift = 31, + .width = 1, + }, + .rst = { + .reg_off = HHI_PCIE_PLL_CNTL0, + .shift = 29, + .width = 1, + }, + .table = g12a_pcie_pll_table, + .init_regs = g12a_pcie_pll_init_regs, + .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs), + }, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll_dco", + .ops = &meson_clk_pcie_pll_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll_dco_div2", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "pcie_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_pcie_pll_od = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_PCIE_PLL_CNTL0, + .shift = 16, + .width = 5, + .flags = CLK_DIVIDER_ROUND_CLOSEST | + CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ALLOW_ZERO, + }, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll_od", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "pcie_pll_dco_div2" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_fixed_factor g12a_pcie_pll = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll_pll", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "pcie_pll_od" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static struct clk_regmap g12a_hdmi_pll_dco = { .data = &(struct meson_clk_pll_data){ .en = { @@ -960,14 +1384,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = { /* VPU Clock */ static const char * const g12a_vpu_parent_names[] = { - "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", + "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7", "mpll1", "vid_pll", "hifi_pll", "gp0_pll", }; static struct clk_regmap g12a_vpu_0_sel = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_VPU_CLK_CNTL, - .mask = 0x3, + .mask = 0x7, .shift = 9, }, .hw.init = &(struct clk_init_data){ @@ -1011,7 +1435,7 @@ static struct clk_regmap g12a_vpu_0 = { static struct clk_regmap g12a_vpu_1_sel = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_VPU_CLK_CNTL, - .mask = 0x3, + .mask = 0x7, .shift = 25, }, .hw.init = &(struct clk_init_data){ @@ -1071,6 +1495,151 @@ static struct clk_regmap g12a_vpu = { }, }; +/* VDEC clocks */ + +static const char * const g12a_vdec_parent_names[] = { + "fclk_div2p5", "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7", + "hifi_pll", "gp0_pll", +}; + +static struct clk_regmap g12a_vdec_1_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC_CLK_CNTL, + .mask = 0x7, + .shift = 9, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = g12a_vdec_parent_names, + .num_parents = ARRAY_SIZE(g12a_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC_CLK_CNTL, + .shift = 0, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_1_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_1 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_1", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_1_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevcf_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .mask = 0x7, + .shift = 9, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevcf_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = g12a_vdec_parent_names, + .num_parents = ARRAY_SIZE(g12a_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevcf_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .shift = 0, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevcf_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_hevcf_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevcf = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_hevcf", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_hevcf_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevc_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .mask = 0x7, + .shift = 25, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevc_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = g12a_vdec_parent_names, + .num_parents = ARRAY_SIZE(g12a_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevc_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .shift = 16, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevc_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_hevc_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_vdec_hevc = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_hevc", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_hevc_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + /* VAPB Clock */ static const char * const g12a_vapb_parent_names[] = { @@ -2167,6 +2736,39 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = { [CLKID_MALI] = &g12a_mali.hw, [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw, [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw, + [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, + [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, + [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, + [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw, + [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw, + [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw, + [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw, + [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw, + [CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw, + [CLKID_CPU_CLK] = &g12a_cpu_clk.hw, + [CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw, + [CLKID_CPU_CLK_DIV16] = &g12a_cpu_clk_div16.hw, + [CLKID_CPU_CLK_APB_DIV] = &g12a_cpu_clk_apb_div.hw, + [CLKID_CPU_CLK_APB] = &g12a_cpu_clk_apb.hw, + [CLKID_CPU_CLK_ATB_DIV] = &g12a_cpu_clk_atb_div.hw, + [CLKID_CPU_CLK_ATB] = &g12a_cpu_clk_atb.hw, + [CLKID_CPU_CLK_AXI_DIV] = &g12a_cpu_clk_axi_div.hw, + [CLKID_CPU_CLK_AXI] = &g12a_cpu_clk_axi.hw, + [CLKID_CPU_CLK_TRACE_DIV] = &g12a_cpu_clk_trace_div.hw, + [CLKID_CPU_CLK_TRACE] = &g12a_cpu_clk_trace.hw, + [CLKID_PCIE_PLL_DCO] = &g12a_pcie_pll_dco.hw, + [CLKID_PCIE_PLL_DCO_DIV2] = &g12a_pcie_pll_dco_div2.hw, + [CLKID_PCIE_PLL_OD] = &g12a_pcie_pll_od.hw, + [CLKID_PCIE_PLL] = &g12a_pcie_pll.hw, + [CLKID_VDEC_1_SEL] = &g12a_vdec_1_sel.hw, + [CLKID_VDEC_1_DIV] = &g12a_vdec_1_div.hw, + [CLKID_VDEC_1] = &g12a_vdec_1.hw, + [CLKID_VDEC_HEVC_SEL] = &g12a_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &g12a_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC] = &g12a_vdec_hevc.hw, + [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw, + [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw, + [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -2335,6 +2937,35 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12a_mali_1, &g12a_mali, &g12a_mpll_50m, + &g12a_sys_pll_div16_en, + &g12a_cpu_clk_premux0, + &g12a_cpu_clk_mux0_div, + &g12a_cpu_clk_postmux0, + &g12a_cpu_clk_premux1, + &g12a_cpu_clk_mux1_div, + &g12a_cpu_clk_postmux1, + &g12a_cpu_clk_dyn, + &g12a_cpu_clk, + &g12a_cpu_clk_div16_en, + &g12a_cpu_clk_apb_div, + &g12a_cpu_clk_apb, + &g12a_cpu_clk_atb_div, + &g12a_cpu_clk_atb, + &g12a_cpu_clk_axi_div, + &g12a_cpu_clk_axi, + &g12a_cpu_clk_trace_div, + &g12a_cpu_clk_trace, + &g12a_pcie_pll_od, + &g12a_pcie_pll_dco, + &g12a_vdec_1_sel, + &g12a_vdec_1_div, + &g12a_vdec_1, + &g12a_vdec_hevc_sel, + &g12a_vdec_hevc_div, + &g12a_vdec_hevc, + &g12a_vdec_hevcf_sel, + &g12a_vdec_hevcf_div, + &g12a_vdec_hevcf, }; static const struct meson_eeclkc_data g12a_clkc_data = { diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index f399dfe1401c..39c41af70804 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -50,6 +50,7 @@ #define HHI_GCLK_MPEG2 0x148 #define HHI_GCLK_OTHER 0x150 #define HHI_GCLK_OTHER2 0x154 +#define HHI_SYS_CPU_CLK_CNTL1 0x15c #define HHI_VID_CLK_DIV 0x164 #define HHI_MPEG_CLK_CNTL 0x174 #define HHI_AUD_CLK_CNTL 0x178 @@ -166,8 +167,36 @@ #define CLKID_MALI_0_DIV 170 #define CLKID_MALI_1_DIV 173 #define CLKID_MPLL_5OM_DIV 176 +#define CLKID_SYS_PLL_DIV16_EN 178 +#define CLKID_SYS_PLL_DIV16 179 +#define CLKID_CPU_CLK_DYN0_SEL 180 +#define CLKID_CPU_CLK_DYN0_DIV 181 +#define CLKID_CPU_CLK_DYN0 182 +#define CLKID_CPU_CLK_DYN1_SEL 183 +#define CLKID_CPU_CLK_DYN1_DIV 184 +#define CLKID_CPU_CLK_DYN1 185 +#define CLKID_CPU_CLK_DYN 186 +#define CLKID_CPU_CLK_DIV16_EN 188 +#define CLKID_CPU_CLK_DIV16 189 +#define CLKID_CPU_CLK_APB_DIV 190 +#define CLKID_CPU_CLK_APB 191 +#define CLKID_CPU_CLK_ATB_DIV 192 +#define CLKID_CPU_CLK_ATB 193 +#define CLKID_CPU_CLK_AXI_DIV 194 +#define CLKID_CPU_CLK_AXI 195 +#define CLKID_CPU_CLK_TRACE_DIV 196 +#define CLKID_CPU_CLK_TRACE 197 +#define CLKID_PCIE_PLL_DCO 198 +#define CLKID_PCIE_PLL_DCO_DIV2 199 +#define CLKID_PCIE_PLL_OD 200 +#define CLKID_VDEC_1_SEL 202 +#define CLKID_VDEC_1_DIV 203 +#define CLKID_VDEC_HEVC_SEL 205 +#define CLKID_VDEC_HEVC_DIV 206 +#define CLKID_VDEC_HEVCF_SEL 208 +#define CLKID_VDEC_HEVCF_DIV 209 -#define NR_CLKS 178 +#define NR_CLKS 211 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/g12a-clkc.h> diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 04df2e208ed6..29ffb4fde714 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = { .offset = HHI_VDEC_CLK_CNTL, .shift = 0, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_1_div", @@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = { .offset = HHI_VDEC2_CLK_CNTL, .shift = 16, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_hevc_div", diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 576ad42252d0..37cf0f01bb5d 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1703,6 +1703,456 @@ static struct clk_regmap meson8b_mali = { }, }; +static const struct pll_params_table meson8m2_gp_pll_params_table[] = { + PLL_PARAMS(182, 3), + { /* sentinel */ }, +}; + +static struct clk_regmap meson8m2_gp_pll_dco = { + .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_GP_PLL_CNTL, + .shift = 30, + .width = 1, + }, + .m = { + .reg_off = HHI_GP_PLL_CNTL, + .shift = 0, + .width = 9, + }, + .n = { + .reg_off = HHI_GP_PLL_CNTL, + .shift = 9, + .width = 5, + }, + .l = { + .reg_off = HHI_GP_PLL_CNTL, + .shift = 31, + .width = 1, + }, + .rst = { + .reg_off = HHI_GP_PLL_CNTL, + .shift = 29, + .width = 1, + }, + .table = meson8m2_gp_pll_params_table, + }, + .hw.init = &(struct clk_init_data){ + .name = "gp_pll_dco", + .ops = &meson_clk_pll_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap meson8m2_gp_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_GP_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "gp_pll", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "gp_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static const char * const mmeson8b_vpu_0_1_parent_names[] = { + "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7" +}; + +static const char * const mmeson8m2_vpu_0_1_parent_names[] = { + "fclk_div4", "fclk_div3", "fclk_div5", "gp_pll" +}; + +static struct clk_regmap meson8b_vpu_0_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VPU_CLK_CNTL, + .mask = 0x3, + .shift = 9, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_0_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = mmeson8b_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8m2_vpu_0_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VPU_CLK_CNTL, + .mask = 0x3, + .shift = 9, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_0_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = mmeson8m2_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu_0_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VPU_CLK_CNTL, + .shift = 0, + .width = 7, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_0_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vpu_0_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu_0 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VPU_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vpu_0", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vpu_0_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu_1_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VPU_CLK_CNTL, + .mask = 0x3, + .shift = 25, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_1_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = mmeson8b_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8m2_vpu_1_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VPU_CLK_CNTL, + .mask = 0x3, + .shift = 25, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_1_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = mmeson8m2_vpu_0_1_parent_names, + .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu_1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VPU_CLK_CNTL, + .shift = 16, + .width = 7, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu_1_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vpu_1_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu_1 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VPU_CLK_CNTL, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "vpu_1", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vpu_1_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vpu = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VPU_CLK_CNTL, + .mask = 1, + .shift = 31, + }, + .hw.init = &(struct clk_init_data){ + .name = "vpu", + .ops = &clk_regmap_mux_ops, + .parent_names = (const char *[]){ "vpu_0", "vpu_1" }, + .num_parents = 2, + .flags = CLK_SET_RATE_NO_REPARENT, + }, +}; + +static const char * const meson8b_vdec_parent_names[] = { + "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "mpll2", "mpll1" +}; + +static struct clk_regmap meson8b_vdec_1_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_vdec_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_1_1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC_CLK_CNTL, + .shift = 0, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1_1_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_1_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_1_1 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_1_1", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_1_1_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_1_2_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC3_CLK_CNTL, + .shift = 0, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1_2_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_1_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_1_2 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC3_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_1_2", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_1_2_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC3_CLK_CNTL, + .mask = 0x1, + .shift = 15, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_1", + .ops = &clk_regmap_mux_ops, + .parent_names = (const char *[]){ "vdec_1_1", "vdec_1_2" }, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hcodec_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC_CLK_CNTL, + .mask = 0x3, + .shift = 25, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hcodec_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_vdec_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hcodec_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC_CLK_CNTL, + .shift = 16, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hcodec_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_hcodec_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hcodec = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC_CLK_CNTL, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_hcodec", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_hcodec_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_2_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_2_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_vdec_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_2_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .shift = 0, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_2_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_2_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_2 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_2", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_2_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hevc_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .mask = 0x3, + .shift = 25, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevc_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_vdec_parent_names, + .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names), + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hevc_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .shift = 16, + .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevc_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "vdec_hevc_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hevc_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "vdec_hevc_en", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "vdec_hevc_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_vdec_hevc = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_VDEC2_CLK_CNTL, + .mask = 0x1, + .shift = 31, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "vdec_hevc", + .ops = &clk_regmap_mux_ops, + /* TODO: The second parent is currently unknown */ + .parent_names = (const char *[]){ "vdec_hevc_en" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); @@ -1966,6 +2416,22 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = { [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw, [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw, [CLKID_MALI] = &meson8b_mali_0.hw, + [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw, + [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw, + [CLKID_VPU] = &meson8b_vpu_0.hw, + [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw, + [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw, + [CLKID_VDEC_1] = &meson8b_vdec_1_1.hw, + [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw, + [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw, + [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw, + [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw, + [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw, + [CLKID_VDEC_2] = &meson8b_vdec_2.hw, + [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, + [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -2152,6 +2618,240 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw, [CLKID_MALI_1] = &meson8b_mali_1.hw, [CLKID_MALI] = &meson8b_mali.hw, + [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw, + [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw, + [CLKID_VPU_0] = &meson8b_vpu_0.hw, + [CLKID_VPU_1_SEL] = &meson8b_vpu_1_sel.hw, + [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw, + [CLKID_VPU_1] = &meson8b_vpu_1.hw, + [CLKID_VPU] = &meson8b_vpu.hw, + [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw, + [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw, + [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw, + [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw, + [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw, + [CLKID_VDEC_1] = &meson8b_vdec_1.hw, + [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw, + [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw, + [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw, + [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw, + [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw, + [CLKID_VDEC_2] = &meson8b_vdec_2.hw, + [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, + [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, + [CLK_NR_CLKS] = NULL, + }, + .num = CLK_NR_CLKS, +}; + +static struct clk_hw_onecell_data meson8m2_hw_onecell_data = { + .hws = { + [CLKID_XTAL] = &meson8b_xtal.hw, + [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw, + [CLKID_PLL_VID] = &meson8b_vid_pll.hw, + [CLKID_PLL_SYS] = &meson8b_sys_pll.hw, + [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw, + [CLKID_CPUCLK] = &meson8b_cpu_clk.hw, + [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw, + [CLKID_CLK81] = &meson8b_clk81.hw, + [CLKID_DDR] = &meson8b_ddr.hw, + [CLKID_DOS] = &meson8b_dos.hw, + [CLKID_ISA] = &meson8b_isa.hw, + [CLKID_PL301] = &meson8b_pl301.hw, + [CLKID_PERIPHS] = &meson8b_periphs.hw, + [CLKID_SPICC] = &meson8b_spicc.hw, + [CLKID_I2C] = &meson8b_i2c.hw, + [CLKID_SAR_ADC] = &meson8b_sar_adc.hw, + [CLKID_SMART_CARD] = &meson8b_smart_card.hw, + [CLKID_RNG0] = &meson8b_rng0.hw, + [CLKID_UART0] = &meson8b_uart0.hw, + [CLKID_SDHC] = &meson8b_sdhc.hw, + [CLKID_STREAM] = &meson8b_stream.hw, + [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw, + [CLKID_SDIO] = &meson8b_sdio.hw, + [CLKID_ABUF] = &meson8b_abuf.hw, + [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw, + [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw, + [CLKID_SPI] = &meson8b_spi.hw, + [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw, + [CLKID_ETH] = &meson8b_eth.hw, + [CLKID_DEMUX] = &meson8b_demux.hw, + [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw, + [CLKID_IEC958] = &meson8b_iec958.hw, + [CLKID_I2S_OUT] = &meson8b_i2s_out.hw, + [CLKID_AMCLK] = &meson8b_amclk.hw, + [CLKID_AIFIFO2] = &meson8b_aififo2.hw, + [CLKID_MIXER] = &meson8b_mixer.hw, + [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw, + [CLKID_ADC] = &meson8b_adc.hw, + [CLKID_BLKMV] = &meson8b_blkmv.hw, + [CLKID_AIU] = &meson8b_aiu.hw, + [CLKID_UART1] = &meson8b_uart1.hw, + [CLKID_G2D] = &meson8b_g2d.hw, + [CLKID_USB0] = &meson8b_usb0.hw, + [CLKID_USB1] = &meson8b_usb1.hw, + [CLKID_RESET] = &meson8b_reset.hw, + [CLKID_NAND] = &meson8b_nand.hw, + [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw, + [CLKID_USB] = &meson8b_usb.hw, + [CLKID_VDIN1] = &meson8b_vdin1.hw, + [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw, + [CLKID_EFUSE] = &meson8b_efuse.hw, + [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw, + [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw, + [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw, + [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw, + [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw, + [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw, + [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw, + [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw, + [CLKID_DVIN] = &meson8b_dvin.hw, + [CLKID_UART2] = &meson8b_uart2.hw, + [CLKID_SANA] = &meson8b_sana.hw, + [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw, + [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw, + [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw, + [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw, + [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw, + [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw, + [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw, + [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw, + [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw, + [CLKID_DAC_CLK] = &meson8b_dac_clk.hw, + [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw, + [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw, + [CLKID_ENC480P] = &meson8b_enc480p.hw, + [CLKID_RNG1] = &meson8b_rng1.hw, + [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw, + [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw, + [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw, + [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw, + [CLKID_EDP] = &meson8b_edp.hw, + [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw, + [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw, + [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw, + [CLKID_AO_IFACE] = &meson8b_ao_iface.hw, + [CLKID_MPLL0] = &meson8b_mpll0.hw, + [CLKID_MPLL1] = &meson8b_mpll1.hw, + [CLKID_MPLL2] = &meson8b_mpll2.hw, + [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw, + [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw, + [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw, + [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw, + [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw, + [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw, + [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw, + [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw, + [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw, + [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw, + [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw, + [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw, + [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw, + [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw, + [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw, + [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw, + [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw, + [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw, + [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw, + [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw, + [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw, + [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw, + [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw, + [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw, + [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, + [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, + [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, + [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw, + [CLKID_APB] = &meson8b_apb_clk_gate.hw, + [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, + [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, + [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, + [CLKID_AXI] = &meson8b_axi_clk_gate.hw, + [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw, + [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw, + [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw, + [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw, + [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw, + [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw, + [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw, + [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw, + [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw, + [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw, + [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw, + [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw, + [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw, + [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw, + [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw, + [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw, + [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw, + [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw, + [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw, + [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw, + [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw, + [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw, + [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw, + [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw, + [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw, + [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw, + [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw, + [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw, + [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw, + [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw, + [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw, + [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw, + [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw, + [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw, + [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw, + [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw, + [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw, + [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw, + [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw, + [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw, + [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw, + [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw, + [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw, + [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, + [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, + [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, + [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw, + [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw, + [CLKID_MALI_0] = &meson8b_mali_0.hw, + [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw, + [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw, + [CLKID_MALI_1] = &meson8b_mali_1.hw, + [CLKID_MALI] = &meson8b_mali.hw, + [CLKID_GP_PLL_DCO] = &meson8m2_gp_pll_dco.hw, + [CLKID_GP_PLL] = &meson8m2_gp_pll.hw, + [CLKID_VPU_0_SEL] = &meson8m2_vpu_0_sel.hw, + [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw, + [CLKID_VPU_0] = &meson8b_vpu_0.hw, + [CLKID_VPU_1_SEL] = &meson8m2_vpu_1_sel.hw, + [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw, + [CLKID_VPU_1] = &meson8b_vpu_1.hw, + [CLKID_VPU] = &meson8b_vpu.hw, + [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw, + [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw, + [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw, + [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw, + [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw, + [CLKID_VDEC_1] = &meson8b_vdec_1.hw, + [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw, + [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw, + [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw, + [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw, + [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw, + [CLKID_VDEC_2] = &meson8b_vdec_2.hw, + [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, + [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -2314,6 +3014,33 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { &meson8b_mali_1_div, &meson8b_mali_1, &meson8b_mali, + &meson8m2_gp_pll_dco, + &meson8m2_gp_pll, + &meson8b_vpu_0_sel, + &meson8m2_vpu_0_sel, + &meson8b_vpu_0_div, + &meson8b_vpu_0, + &meson8b_vpu_1_sel, + &meson8m2_vpu_1_sel, + &meson8b_vpu_1_div, + &meson8b_vpu_1, + &meson8b_vpu, + &meson8b_vdec_1_sel, + &meson8b_vdec_1_1_div, + &meson8b_vdec_1_1, + &meson8b_vdec_1_2_div, + &meson8b_vdec_1_2, + &meson8b_vdec_1, + &meson8b_vdec_hcodec_sel, + &meson8b_vdec_hcodec_div, + &meson8b_vdec_hcodec, + &meson8b_vdec_2_sel, + &meson8b_vdec_2_div, + &meson8b_vdec_2, + &meson8b_vdec_hevc_sel, + &meson8b_vdec_hevc_div, + &meson8b_vdec_hevc_en, + &meson8b_vdec_hevc, }; static const struct meson8b_clk_reset_line { @@ -2558,9 +3285,14 @@ static void __init meson8b_clkc_init(struct device_node *np) return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data); } +static void __init meson8m2_clkc_init(struct device_node *np) +{ + return meson8b_clkc_init_common(np, &meson8m2_hw_onecell_data); +} + CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", meson8_clkc_init); CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", meson8b_clkc_init); CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", - meson8b_clkc_init); + meson8m2_clkc_init); diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index b8c58faeae52..ed37196187e6 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -19,6 +19,7 @@ * * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf */ +#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ #define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ #define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ #define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ @@ -34,7 +35,11 @@ #define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ #define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ #define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */ +#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */ #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */ +#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */ +#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */ +#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */ #define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */ #define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ #define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */ @@ -146,8 +151,28 @@ #define CLKID_MALI_1_SEL 178 #define CLKID_MALI_1_DIV 179 #define CLKID_MALI_1 180 +#define CLKID_GP_PLL_DCO 181 +#define CLKID_GP_PLL 182 +#define CLKID_VPU_0_SEL 183 +#define CLKID_VPU_0_DIV 184 +#define CLKID_VPU_0 185 +#define CLKID_VPU_1_SEL 186 +#define CLKID_VPU_1_DIV 187 +#define CLKID_VPU_1 189 +#define CLKID_VDEC_1_SEL 191 +#define CLKID_VDEC_1_1_DIV 192 +#define CLKID_VDEC_1_1 193 +#define CLKID_VDEC_1_2_DIV 194 +#define CLKID_VDEC_1_2 195 +#define CLKID_VDEC_HCODEC_SEL 197 +#define CLKID_VDEC_HCODEC_DIV 198 +#define CLKID_VDEC_2_SEL 200 +#define CLKID_VDEC_2_DIV 201 +#define CLKID_VDEC_HEVC_SEL 203 +#define CLKID_VDEC_HEVC_DIV 204 +#define CLKID_VDEC_HEVC_EN 205 -#define CLK_NR_CLKS 181 +#define CLK_NR_CLKS 207 /* * include the CLKID and RESETID that have diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index 08bcc01c0923..daff235bc763 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c @@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw, div = _get_table_val(meson_parm_read(clk->map, &pll_div->val), meson_parm_read(clk->map, &pll_div->sel)); if (!div || !div->divider) { - pr_info("%s: Invalid config value for vid_pll_div\n", __func__); - return parent_rate; + pr_debug("%s: Invalid config value for vid_pll_div\n", __func__); + return 0; } return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider); diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c index 6ab3c2e627c7..785dbede4835 100644 --- a/drivers/clk/mvebu/common.c +++ b/drivers/clk/mvebu/common.c @@ -240,7 +240,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np, int n; if (ctrl) { - pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n"); + pr_err("mvebu-clk-gating: cannot instantiate more than one gateable clock device\n"); return; } diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 9235a331b588..b6de283f45e3 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -21,7 +21,7 @@ * - Equal to SDIO clock * - 2/5 PLL0 * - * CP110 has 32 gatable clocks, for the various peripherals in the IP. + * CP110 has 32 gateable clocks, for the various peripherals in the IP. */ #define pr_fmt(fmt) "cp110-system-controller: " fmt @@ -57,7 +57,7 @@ enum { #define CP110_CORE_NAND 4 #define CP110_CORE_SDIO 5 -/* A number of gatable clocks need special handling */ +/* A number of gateable clocks need special handling */ #define CP110_GATE_AUDIO 0 #define CP110_GATE_COMM_UNIT 1 #define CP110_GATE_NAND 2 diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c index 27781b49eb82..5969f620607a 100644 --- a/drivers/clk/nxp/clk-lpc18xx-ccu.c +++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c @@ -142,7 +142,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable) * Divider field is write only, so divider stat field must * be read so divider field can be set accordingly. */ - val = clk_readl(gate->reg); + val = readl(gate->reg); if (val & LPC18XX_CCU_DIVSTAT) val |= LPC18XX_CCU_DIV; @@ -155,12 +155,12 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable) * and the next write should clear the RUN bit. */ val |= LPC18XX_CCU_AUTO; - clk_writel(val, gate->reg); + writel(val, gate->reg); val &= ~LPC18XX_CCU_RUN; } - clk_writel(val, gate->reg); + writel(val, gate->reg); return 0; } diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c index 2531174b399e..f5bc8bd192b7 100644 --- a/drivers/clk/nxp/clk-lpc18xx-cgu.c +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -352,9 +352,9 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw, struct lpc18xx_pll *pll = to_lpc_pll(hw); u32 ctrl, mdiv, msel, npdiv; - ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); - mdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV); - npdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); + ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + mdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV); + npdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); if (ctrl & LPC18XX_PLL0_CTRL_BYPASS) return parent_rate; @@ -415,25 +415,25 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, m |= lpc18xx_pll0_msel2seli(m) << LPC18XX_PLL0_MDIV_SELI_SHIFT; /* Power down PLL, disable clk output and dividers */ - ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); ctrl |= LPC18XX_PLL0_CTRL_PD; ctrl &= ~(LPC18XX_PLL0_CTRL_BYPASS | LPC18XX_PLL0_CTRL_DIRECTI | LPC18XX_PLL0_CTRL_DIRECTO | LPC18XX_PLL0_CTRL_CLKEN); - clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); /* Configure new PLL settings */ - clk_writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV); - clk_writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); + writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV); + writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); /* Power up PLL and wait for lock */ ctrl &= ~LPC18XX_PLL0_CTRL_PD; - clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); do { udelay(10); - stat = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT); + stat = readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT); if (stat & LPC18XX_PLL0_STAT_LOCK) { ctrl |= LPC18XX_PLL0_CTRL_CLKEN; - clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); return 0; } @@ -458,8 +458,8 @@ static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw, bool direct, fbsel; u32 stat, ctrl; - stat = clk_readl(pll->reg + LPC18XX_CGU_PLL1_STAT); - ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL1_CTRL); + stat = readl(pll->reg + LPC18XX_CGU_PLL1_STAT); + ctrl = readl(pll->reg + LPC18XX_CGU_PLL1_CTRL); direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false; fbsel = (ctrl & LPC18XX_PLL1_CTRL_FBSEL) ? true : false; diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c index 5eeecee17b69..7524d19fe60b 100644 --- a/drivers/clk/nxp/clk-lpc32xx.c +++ b/drivers/clk/nxp/clk-lpc32xx.c @@ -1085,13 +1085,12 @@ struct clk_hw_proto { }; }; -#define LPC32XX_DEFINE_FIXED(_idx, _rate, _flags) \ +#define LPC32XX_DEFINE_FIXED(_idx, _rate) \ [CLK_PREFIX(_idx)] = { \ .type = CLK_FIXED, \ { \ .f = { \ .fixed_rate = (_rate), \ - .flags = (_flags), \ }, \ }, \ } @@ -1225,7 +1224,7 @@ struct clk_hw_proto { } static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = { - LPC32XX_DEFINE_FIXED(RTC, 32768, 0), + LPC32XX_DEFINE_FIXED(RTC, 32768), LPC32XX_DEFINE_PLL(PLL397X, pll_397x, HCLKPLL_CTRL, BIT(1)), LPC32XX_DEFINE_PLL(HCLK_PLL, hclk_pll, HCLKPLL_CTRL, PLL_CTRL_ENABLE), LPC32XX_DEFINE_PLL(USB_PLL, usb_pll, USB_CTRL, PLL_CTRL_ENABLE), @@ -1468,7 +1467,7 @@ static struct clk * __init lpc32xx_clk_register(u32 id) struct clk_fixed_rate *fixed = &clk_hw->f; clk = clk_register_fixed_rate(NULL, lpc32xx_clk->name, - parents[0], fixed->flags, fixed->fixed_rate); + parents[0], 0, fixed->fixed_rate); break; } default: diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 1c04575c118f..18bdf34d5e64 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -243,6 +243,12 @@ config SDM_GCC_660 Say Y if you want to use peripheral devices such as UART, SPI, i2C, USB, UFS, SDDC, PCIe, etc. +config QCS_TURING_404 + tristate "QCS404 Turing Clock Controller" + help + Support for the Turing Clock Controller on QCS404, provides clocks + and resets for the Turing subsystem. + config SDM_GCC_845 tristate "SDM845 Global Clock Controller" select QCOM_GDSC diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index ee8d0698e370..f0768fb1f037 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o +obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index 99446bf630aa..f869fc6aaed6 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -146,6 +146,12 @@ const struct clk_ops clk_branch2_ops = { }; EXPORT_SYMBOL_GPL(clk_branch2_ops); +const struct clk_ops clk_branch2_aon_ops = { + .enable = clk_branch2_enable, + .is_enabled = clk_is_enabled_regmap, +}; +EXPORT_SYMBOL_GPL(clk_branch2_aon_ops); + const struct clk_ops clk_branch_simple_ops = { .enable = clk_enable_regmap, .disable = clk_disable_regmap, diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h index b3561e0a3984..17a58119165e 100644 --- a/drivers/clk/qcom/clk-branch.h +++ b/drivers/clk/qcom/clk-branch.h @@ -40,6 +40,7 @@ struct clk_branch { extern const struct clk_ops clk_branch_ops; extern const struct clk_ops clk_branch2_ops; extern const struct clk_ops clk_branch_simple_ops; +extern const struct clk_ops clk_branch2_aon_ops; #define to_clk_branch(_hw) \ container_of(to_clk_regmap(_hw), struct clk_branch, clkr) diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index c240fba794c7..033688264c7b 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -2161,7 +2161,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { static struct clk_branch gcc_pcie_0_pipe_clk = { .halt_reg = 0x6b018, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x6b018, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index 5a62f64ada93..a54807eb3b28 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -260,6 +260,20 @@ static const char * const gcc_parent_names_15[] = { "core_bi_pll_test_se", }; +static const struct parent_map gcc_parent_map_16[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_16[] = { + "cxo", + "gpll0_out_main", + "gpll0_out_aux", + "core_bi_pll_test_se", +}; + static struct clk_fixed_factor cxo = { .mult = 1, .div = 1, @@ -1194,6 +1208,28 @@ static struct clk_rcg2 vsync_clk_src = { }, }; +static const struct freq_tbl ftbl_cdsp_bimc_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 cdsp_bimc_clk_src = { + .cmd_rcgr = 0x5e010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_16, + .freq_tbl = ftbl_cdsp_bimc_clk_src, + .clkr.hw.init = &(struct clk_init_data) { + .name = "cdsp_bimc_clk_src", + .parent_names = gcc_parent_names_16, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + static struct clk_branch gcc_apss_ahb_clk = { .halt_reg = 0x4601c, .halt_check = BRANCH_HALT_VOTED, @@ -1255,6 +1291,24 @@ static struct clk_branch gcc_bimc_gpu_clk = { }, }; +static struct clk_branch gcc_bimc_cdsp_clk = { + .halt_reg = 0x31030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x31030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "gcc_bimc_cdsp_clk", + .parent_names = (const char *[]) { + "cdsp_bimc_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_bimc_mdss_clk = { .halt_reg = 0x31038, .halt_check = BRANCH_HALT, @@ -1792,6 +1846,24 @@ static struct clk_branch gcc_gfx_tbu_clk = { }, }; +static struct clk_branch gcc_cdsp_tbu_clk = { + .halt_reg = 0x1203c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x13020, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data) { + .name = "gcc_cdsp_tbu_clk", + .parent_names = (const char *[]) { + "cdsp_bimc_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_gp1_clk = { .halt_reg = 0x8000, .halt_check = BRANCH_HALT, @@ -2304,6 +2376,19 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = { }, }; +static struct clk_branch gcc_cdsp_cfg_ahb_clk = { + .halt_reg = 0x5e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5e004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "gcc_cdsp_cfg_ahb_cbcr", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_sdcc2_ahb_clk = { .halt_reg = 0x4301c, .halt_check = BRANCH_HALT, @@ -2548,6 +2633,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { [GCC_ESC0_CLK_SRC] = &esc0_clk_src.clkr, [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr, [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr, + [GCC_BIMC_CDSP_CLK] = &gcc_bimc_cdsp_clk.clkr, [GCC_BIMC_MDSS_CLK] = &gcc_bimc_mdss_clk.clkr, [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, [GCC_BLSP1_QUP0_I2C_APPS_CLK] = &gcc_blsp1_qup0_i2c_apps_clk.clkr, @@ -2605,6 +2691,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_CDSP_CFG_AHB_CLK] = &gcc_cdsp_cfg_ahb_clk.clkr, [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, [GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr, @@ -2645,6 +2732,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { [GCC_USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, [GCC_USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr, [GCC_VSYNC_CLK_SRC] = &vsync_clk_src.clkr, + [GCC_CDSP_BIMC_CLK_SRC] = &cdsp_bimc_clk_src.clkr, [GCC_USB_HS_INACTIVITY_TIMERS_CLK] = &gcc_usb_hs_inactivity_timers_clk.clkr, [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr, @@ -2653,6 +2741,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr, [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr, [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr, + [GCC_CDSP_TBU_CLK] = &gcc_cdsp_tbu_clk.clkr, [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr, [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr, [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, @@ -2664,6 +2753,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = { static const struct qcom_reset_map gcc_qcs404_resets[] = { [GCC_GENI_IR_BCR] = { 0x0F000 }, + [GCC_CDSP_RESTART] = { 0x18000 }, [GCC_USB_HS_BCR] = { 0x41000 }, [GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 }, [GCC_QUSB2_PHY_BCR] = { 0x4103c }, diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c new file mode 100644 index 000000000000..aa859e6ec9bd --- /dev/null +++ b/drivers/clk/qcom/turingcc-qcs404.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Linaro Ltd. + */ + +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,turingcc-qcs404.h> + +#include "clk-regmap.h" +#include "clk-branch.h" +#include "common.h" +#include "reset.h" + +static struct clk_branch turing_wrapper_aon_cbcr = { + .halt_reg = 0x5098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "turing_wrapper_aon_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch turing_q6ss_ahbm_aon_cbcr = { + .halt_reg = 0x9000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "turing_q6ss_ahbm_aon_cbcr", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch turing_q6ss_q6_axim_clk = { + .halt_reg = 0xb000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "turing_q6ss_q6_axim_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch turing_q6ss_ahbs_aon_cbcr = { + .halt_reg = 0x10000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "turing_q6ss_ahbs_aon_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch turing_wrapper_qos_ahbs_aon_cbcr = { + .halt_reg = 0x11014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) { + .name = "turing_wrapper_qos_ahbs_aon_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_regmap *turingcc_clocks[] = { + [TURING_WRAPPER_AON_CLK] = &turing_wrapper_aon_cbcr.clkr, + [TURING_Q6SS_AHBM_AON_CLK] = &turing_q6ss_ahbm_aon_cbcr.clkr, + [TURING_Q6SS_Q6_AXIM_CLK] = &turing_q6ss_q6_axim_clk.clkr, + [TURING_Q6SS_AHBS_AON_CLK] = &turing_q6ss_ahbs_aon_cbcr.clkr, + [TURING_WRAPPER_QOS_AHBS_AON_CLK] = &turing_wrapper_qos_ahbs_aon_cbcr.clkr, +}; + +static const struct regmap_config turingcc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x30000, + .fast_io = true, +}; + +static const struct qcom_cc_desc turingcc_desc = { + .config = &turingcc_regmap_config, + .clks = turingcc_clocks, + .num_clks = ARRAY_SIZE(turingcc_clocks), +}; + +static int turingcc_probe(struct platform_device *pdev) +{ + int ret; + + pm_runtime_enable(&pdev->dev); + ret = pm_clk_create(&pdev->dev); + if (ret) + goto disable_pm_runtime; + + ret = pm_clk_add(&pdev->dev, NULL); + if (ret < 0) { + dev_err(&pdev->dev, "failed to acquire iface clock\n"); + goto destroy_pm_clk; + } + + ret = qcom_cc_probe(pdev, &turingcc_desc); + if (ret < 0) + goto destroy_pm_clk; + + return 0; + +destroy_pm_clk: + pm_clk_destroy(&pdev->dev); + +disable_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int turingcc_remove(struct platform_device *pdev) +{ + pm_clk_destroy(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops turingcc_pm_ops = { + SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL) +}; + +static const struct of_device_id turingcc_match_table[] = { + { .compatible = "qcom,qcs404-turingcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, turingcc_match_table); + +static struct platform_driver turingcc_driver = { + .probe = turingcc_probe, + .remove = turingcc_remove, + .driver = { + .name = "qcs404-turingcc", + .of_match_table = turingcc_match_table, + .pm = &turingcc_pm_ops, + }, +}; + +module_platform_driver(turingcc_driver); + +MODULE_DESCRIPTION("Qualcomm QCS404 Turing Clock Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c index 57c49fe88295..cf65d4e0e116 100644 --- a/drivers/clk/renesas/r7s9210-cpg-mssr.c +++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c @@ -11,6 +11,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/io.h> #include <dt-bindings/clock/r7s9210-cpg-mssr.h> #include "renesas-cpg-mssr.h" @@ -119,7 +120,7 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk, if (clk_get_rate(extal_clk) > 12000000) cpg_mode = 1; - frqcr = clk_readl(base + CPG_FRQCR) & 0xFFF; + frqcr = readl(base + CPG_FRQCR) & 0xFFF; if (frqcr == 0x012) index = 0; else if (frqcr == 0x112) diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index 4d92b27a6153..76ed7d1bae36 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -71,8 +71,8 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), /* Core Clock Outputs */ - DEF_BASE("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), - DEF_BASE("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), + DEF_GEN3_Z("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_GEN3_Z("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), DEF_FIXED("ztr", R8A774A1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), DEF_FIXED("ztrd2", R8A774A1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("zt", R8A774A1_CLK_ZT, CLK_PLL1_DIV2, 4, 1), @@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO), DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO), DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO), - DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3), - DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3), + DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3), DEF_MOD("cmt3", 300, R8A774A1_CLK_R), DEF_MOD("cmt2", 301, R8A774A1_CLK_R), @@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { DEF_MOD("rwdt", 402, R8A774A1_CLK_R), DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP), DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3), - DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3), + DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2), DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1), DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1), DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1), @@ -165,9 +165,9 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { DEF_MOD("vspd0", 623, R8A774A1_CLK_S0D2), DEF_MOD("vspb", 626, R8A774A1_CLK_S0D1), DEF_MOD("vspi0", 631, R8A774A1_CLK_S0D1), - DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D4), - DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D4), + DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D2), DEF_MOD("csi20", 714, R8A774A1_CLK_CSI0), DEF_MOD("csi40", 716, R8A774A1_CLK_CSI0), DEF_MOD("du2", 722, R8A774A1_CLK_S2D1), diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c index 34e274f2a273..f91e7a484753 100644 --- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c @@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { /* Core Clock Outputs */ DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1), DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1), + DEF_GEN3_Z("z2", R8A774C0_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8), DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1), DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1), DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1), @@ -157,7 +158,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP), DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4), + DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2), DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C), DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C), DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C), @@ -177,8 +178,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1), DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1), - DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), + DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D2), DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), DEF_MOD("du1", 723, R8A774C0_CLK_S1D1), DEF_MOD("du0", 724, R8A774C0_CLK_S1D1), diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index 86842c9fd314..9e9a6f2c31e8 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -3,6 +3,7 @@ * r8a7795 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2015 Glider bvba + * Copyright (C) 2018-2019 Renesas Electronics Corp. * * Based on clk-rcar-gen3.c * @@ -73,8 +74,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), /* Core Clock Outputs */ - DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), - DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), + DEF_GEN3_Z("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_GEN3_Z("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1), @@ -129,8 +130,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("msiof2", 209, R8A7795_CLK_MSO), DEF_MOD("msiof1", 210, R8A7795_CLK_MSO), DEF_MOD("msiof0", 211, R8A7795_CLK_MSO), - DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3), - DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3), + DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3), DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR), DEF_MOD("cmt3", 300, R8A7795_CLK_R), @@ -153,16 +154,16 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("rwdt", 402, R8A7795_CLK_R), DEF_MOD("intc-ex", 407, R8A7795_CLK_CP), DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3), - DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3), - DEF_MOD("drif7", 508, R8A7795_CLK_S3D2), - DEF_MOD("drif6", 509, R8A7795_CLK_S3D2), - DEF_MOD("drif5", 510, R8A7795_CLK_S3D2), - DEF_MOD("drif4", 511, R8A7795_CLK_S3D2), - DEF_MOD("drif3", 512, R8A7795_CLK_S3D2), - DEF_MOD("drif2", 513, R8A7795_CLK_S3D2), - DEF_MOD("drif1", 514, R8A7795_CLK_S3D2), - DEF_MOD("drif0", 515, R8A7795_CLK_S3D2), + DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2), + DEF_MOD("drif31", 508, R8A7795_CLK_S3D2), + DEF_MOD("drif30", 509, R8A7795_CLK_S3D2), + DEF_MOD("drif21", 510, R8A7795_CLK_S3D2), + DEF_MOD("drif20", 511, R8A7795_CLK_S3D2), + DEF_MOD("drif11", 512, R8A7795_CLK_S3D2), + DEF_MOD("drif10", 513, R8A7795_CLK_S3D2), + DEF_MOD("drif01", 514, R8A7795_CLK_S3D2), + DEF_MOD("drif00", 515, R8A7795_CLK_S3D2), DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1), DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1), DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1), @@ -194,12 +195,12 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1), DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1), - DEF_MOD("ehci3", 700, R8A7795_CLK_S3D4), - DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4), - DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4), - DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4), - DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D4), + DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2), + DEF_MOD("ehci2", 701, R8A7795_CLK_S3D2), + DEF_MOD("ehci1", 702, R8A7795_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A7795_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A7795_CLK_S3D2), + DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D2), DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */ DEF_MOD("csi20", 714, R8A7795_CLK_CSI0), DEF_MOD("csi41", 715, R8A7795_CLK_CSI0), diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index 12c455859f2c..d8e9af5d9ae9 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -3,6 +3,7 @@ * r8a7796 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2016 Glider bvba + * Copyright (C) 2018 Renesas Electronics Corp. * * Based on r8a7795-cpg-mssr.c * @@ -73,8 +74,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), /* Core Clock Outputs */ - DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), - DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), + DEF_GEN3_Z("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_GEN3_Z("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1), @@ -126,8 +127,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("msiof2", 209, R8A7796_CLK_MSO), DEF_MOD("msiof1", 210, R8A7796_CLK_MSO), DEF_MOD("msiof0", 211, R8A7796_CLK_MSO), - DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3), - DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3), + DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3), DEF_MOD("cmt3", 300, R8A7796_CLK_R), DEF_MOD("cmt2", 301, R8A7796_CLK_R), @@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("rwdt", 402, R8A7796_CLK_R), DEF_MOD("intc-ex", 407, R8A7796_CLK_CP), DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3), - DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3), - DEF_MOD("drif7", 508, R8A7796_CLK_S3D2), - DEF_MOD("drif6", 509, R8A7796_CLK_S3D2), - DEF_MOD("drif5", 510, R8A7796_CLK_S3D2), - DEF_MOD("drif4", 511, R8A7796_CLK_S3D2), - DEF_MOD("drif3", 512, R8A7796_CLK_S3D2), - DEF_MOD("drif2", 513, R8A7796_CLK_S3D2), - DEF_MOD("drif1", 514, R8A7796_CLK_S3D2), - DEF_MOD("drif0", 515, R8A7796_CLK_S3D2), + DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2), + DEF_MOD("drif31", 508, R8A7796_CLK_S3D2), + DEF_MOD("drif30", 509, R8A7796_CLK_S3D2), + DEF_MOD("drif21", 510, R8A7796_CLK_S3D2), + DEF_MOD("drif20", 511, R8A7796_CLK_S3D2), + DEF_MOD("drif11", 512, R8A7796_CLK_S3D2), + DEF_MOD("drif10", 513, R8A7796_CLK_S3D2), + DEF_MOD("drif01", 514, R8A7796_CLK_S3D2), + DEF_MOD("drif00", 515, R8A7796_CLK_S3D2), DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1), DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1), DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1), @@ -176,9 +177,9 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2), DEF_MOD("vspb", 626, R8A7796_CLK_S0D1), DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1), - DEF_MOD("ehci1", 702, R8A7796_CLK_S3D4), - DEF_MOD("ehci0", 703, R8A7796_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A7796_CLK_S3D4), + DEF_MOD("ehci1", 702, R8A7796_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A7796_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A7796_CLK_S3D2), DEF_MOD("csi20", 714, R8A7796_CLK_CSI0), DEF_MOD("csi40", 716, R8A7796_CLK_CSI0), DEF_MOD("du2", 722, R8A7796_CLK_S2D1), diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index eb1cca58a1e1..8f87e314d949 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -3,6 +3,7 @@ * r8a77965 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org> + * Copyright (C) 2019 Renesas Electronics Corp. * * Based on r8a7795-cpg-mssr.c * @@ -71,7 +72,7 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), /* Core Clock Outputs */ - DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), + DEF_GEN3_Z("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), DEF_FIXED("ztrd2", R8A77965_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("zt", R8A77965_CLK_ZT, CLK_PLL1_DIV2, 4, 1), @@ -123,8 +124,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("msiof2", 209, R8A77965_CLK_MSO), DEF_MOD("msiof1", 210, R8A77965_CLK_MSO), DEF_MOD("msiof0", 211, R8A77965_CLK_MSO), - DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3), - DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3), + DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3), DEF_MOD("cmt3", 300, R8A77965_CLK_R), @@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("intc-ex", 407, R8A77965_CLK_CP), DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3), - DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3), - DEF_MOD("drif7", 508, R8A77965_CLK_S3D2), - DEF_MOD("drif6", 509, R8A77965_CLK_S3D2), - DEF_MOD("drif5", 510, R8A77965_CLK_S3D2), - DEF_MOD("drif4", 511, R8A77965_CLK_S3D2), - DEF_MOD("drif3", 512, R8A77965_CLK_S3D2), - DEF_MOD("drif2", 513, R8A77965_CLK_S3D2), - DEF_MOD("drif1", 514, R8A77965_CLK_S3D2), - DEF_MOD("drif0", 515, R8A77965_CLK_S3D2), + DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2), + DEF_MOD("drif31", 508, R8A77965_CLK_S3D2), + DEF_MOD("drif30", 509, R8A77965_CLK_S3D2), + DEF_MOD("drif21", 510, R8A77965_CLK_S3D2), + DEF_MOD("drif20", 511, R8A77965_CLK_S3D2), + DEF_MOD("drif11", 512, R8A77965_CLK_S3D2), + DEF_MOD("drif10", 513, R8A77965_CLK_S3D2), + DEF_MOD("drif01", 514, R8A77965_CLK_S3D2), + DEF_MOD("drif00", 515, R8A77965_CLK_S3D2), DEF_MOD("hscif4", 516, R8A77965_CLK_S3D1), DEF_MOD("hscif3", 517, R8A77965_CLK_S3D1), DEF_MOD("hscif2", 518, R8A77965_CLK_S3D1), @@ -175,9 +176,9 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("vspb", 626, R8A77965_CLK_S0D1), DEF_MOD("vspi0", 631, R8A77965_CLK_S0D1), - DEF_MOD("ehci1", 702, R8A77965_CLK_S3D4), - DEF_MOD("ehci0", 703, R8A77965_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A77965_CLK_S3D4), + DEF_MOD("ehci1", 702, R8A77965_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A77965_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A77965_CLK_S3D2), DEF_MOD("csi20", 714, R8A77965_CLK_CSI0), DEF_MOD("csi40", 716, R8A77965_CLK_CSI0), DEF_MOD("du3", 721, R8A77965_CLK_S2D1), diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c index f9e07fcc0d96..7227f675e61f 100644 --- a/drivers/clk/renesas/r8a77980-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c @@ -171,7 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = { DEF_MOD("gpio1", 911, R8A77980_CLK_CP), DEF_MOD("gpio0", 912, R8A77980_CLK_CP), DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2), - DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC), + DEF_MOD("rpc-if", 917, R8A77980_CLK_RPCD2), DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6), DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6), DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2), diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index 9a278c75c918..9570404baa58 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -2,7 +2,7 @@ /* * r8a77990 Clock Pulse Generator / Module Standby and Software Reset * - * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018-2019 Renesas Electronics Corp. * * Based on r8a7795-cpg-mssr.c * @@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { /* Core Clock Outputs */ DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1), DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1), + DEF_GEN3_Z("z2", R8A77990_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8), DEF_FIXED("ztr", R8A77990_CLK_ZTR, CLK_PLL1, 6, 1), DEF_FIXED("zt", R8A77990_CLK_ZT, CLK_PLL1, 4, 1), DEF_FIXED("zx", R8A77990_CLK_ZX, CLK_PLL1, 3, 1), @@ -152,15 +153,15 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("intc-ex", 407, R8A77990_CLK_CP), DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3), - DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4), - DEF_MOD("drif7", 508, R8A77990_CLK_S3D2), - DEF_MOD("drif6", 509, R8A77990_CLK_S3D2), - DEF_MOD("drif5", 510, R8A77990_CLK_S3D2), - DEF_MOD("drif4", 511, R8A77990_CLK_S3D2), - DEF_MOD("drif3", 512, R8A77990_CLK_S3D2), - DEF_MOD("drif2", 513, R8A77990_CLK_S3D2), - DEF_MOD("drif1", 514, R8A77990_CLK_S3D2), - DEF_MOD("drif0", 515, R8A77990_CLK_S3D2), + DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2), + DEF_MOD("drif31", 508, R8A77990_CLK_S3D2), + DEF_MOD("drif30", 509, R8A77990_CLK_S3D2), + DEF_MOD("drif21", 510, R8A77990_CLK_S3D2), + DEF_MOD("drif20", 511, R8A77990_CLK_S3D2), + DEF_MOD("drif11", 512, R8A77990_CLK_S3D2), + DEF_MOD("drif10", 513, R8A77990_CLK_S3D2), + DEF_MOD("drif01", 514, R8A77990_CLK_S3D2), + DEF_MOD("drif00", 515, R8A77990_CLK_S3D2), DEF_MOD("hscif4", 516, R8A77990_CLK_S3D1C), DEF_MOD("hscif3", 517, R8A77990_CLK_S3D1C), DEF_MOD("hscif2", 518, R8A77990_CLK_S3D1C), @@ -180,8 +181,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("vspb", 626, R8A77990_CLK_S0D1), DEF_MOD("vspi0", 631, R8A77990_CLK_S0D1), - DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4), - DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4), + DEF_MOD("ehci0", 703, R8A77990_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A77990_CLK_S3D2), DEF_MOD("csi40", 716, R8A77990_CLK_CSI0), DEF_MOD("du1", 723, R8A77990_CLK_S1D1), DEF_MOD("du0", 724, R8A77990_CLK_S1D1), diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index eee3874865a9..68707277b17b 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { DEF_MOD("rwdt", 402, R8A77995_CLK_R), DEF_MOD("intc-ex", 407, R8A77995_CLK_CP), DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2), - DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1), + DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2), DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C), DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C), DEF_MOD("thermal", 522, R8A77995_CLK_CP), diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 658cb11b6f55..97c72477cd54 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -170,6 +170,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0), D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0), D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0), + D_GATE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0), D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0), D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0), D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0), diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 9a8071a8114d..d25c8ba00a65 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -3,6 +3,7 @@ * R-Car Gen3 Clock Pulse Generator * * Copyright (C) 2015-2018 Glider bvba + * Copyright (C) 2019 Renesas Electronics Corp. * * Based on clk-rcar-gen3.c * @@ -88,14 +89,13 @@ static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers, #define CPG_FRQCRB 0x00000004 #define CPG_FRQCRB_KICK BIT(31) #define CPG_FRQCRC 0x000000e0 -#define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8) -#define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0) struct cpg_z_clk { struct clk_hw hw; void __iomem *reg; void __iomem *kick_reg; unsigned long mask; + unsigned int fixed_div; }; #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) @@ -110,17 +110,18 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw, val = readl(zclk->reg) & zclk->mask; mult = 32 - (val >> __ffs(zclk->mask)); - /* Factor of 2 is for fixed divider */ - return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2); + return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, + 32 * zclk->fixed_div); } static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { - /* Factor of 2 is for fixed divider */ - unsigned long prate = *parent_rate / 2; + struct cpg_z_clk *zclk = to_z_clk(hw); + unsigned long prate; unsigned int mult; + prate = *parent_rate / zclk->fixed_div; mult = div_u64(rate * 32ULL, prate); mult = clamp(mult, 1U, 32U); @@ -134,8 +135,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned int mult; unsigned int i; - /* Factor of 2 is for fixed divider */ - mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); + mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div, + parent_rate); mult = clamp(mult, 1U, 32U); if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) @@ -178,7 +179,8 @@ static const struct clk_ops cpg_z_clk_ops = { static struct clk * __init cpg_z_clk_register(const char *name, const char *parent_name, void __iomem *reg, - unsigned long mask) + unsigned int div, + unsigned int offset) { struct clk_init_data init; struct cpg_z_clk *zclk; @@ -197,7 +199,8 @@ static struct clk * __init cpg_z_clk_register(const char *name, zclk->reg = reg + CPG_FRQCRC; zclk->kick_reg = reg + CPG_FRQCRB; zclk->hw.init = &init; - zclk->mask = mask; + zclk->mask = GENMASK(offset + 4, offset); + zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */ clk = clk_register(NULL, &zclk->hw); if (IS_ERR(clk)) @@ -234,8 +237,6 @@ struct sd_clock { const struct sd_div_table *div_table; struct cpg_simple_notifier csn; unsigned int div_num; - unsigned int div_min; - unsigned int div_max; unsigned int cur_div_idx; }; @@ -312,14 +313,20 @@ static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock, unsigned long rate, unsigned long parent_rate) { - unsigned int div; - - if (!rate) - rate = 1; - - div = DIV_ROUND_CLOSEST(parent_rate, rate); + unsigned long calc_rate, diff, diff_min = ULONG_MAX; + unsigned int i, best_div = 0; + + for (i = 0; i < clock->div_num; i++) { + calc_rate = DIV_ROUND_CLOSEST(parent_rate, + clock->div_table[i].div); + diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate; + if (diff < diff_min) { + best_div = clock->div_table[i].div; + diff_min = diff; + } + } - return clamp_t(unsigned int, div, clock->div_min, clock->div_max); + return best_div; } static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate, @@ -369,27 +376,26 @@ static u32 cpg_quirks __initdata; #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */ #define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */ -static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, - void __iomem *base, const char *parent_name, +static struct clk * __init cpg_sd_clk_register(const char *name, + void __iomem *base, unsigned int offset, const char *parent_name, struct raw_notifier_head *notifiers) { struct clk_init_data init; struct sd_clock *clock; struct clk *clk; - unsigned int i; u32 val; clock = kzalloc(sizeof(*clock), GFP_KERNEL); if (!clock) return ERR_PTR(-ENOMEM); - init.name = core->name; + init.name = name; init.ops = &cpg_sd_clock_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = &parent_name; init.num_parents = 1; - clock->csn.reg = base + core->offset; + clock->csn.reg = base + offset; clock->hw.init = &init; clock->div_table = cpg_sd_div_table; clock->div_num = ARRAY_SIZE(cpg_sd_div_table); @@ -403,13 +409,6 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK); writel(val, clock->csn.reg); - clock->div_max = clock->div_table[0].div; - clock->div_min = clock->div_max; - for (i = 1; i < clock->div_num; i++) { - clock->div_max = max(clock->div_max, clock->div_table[i].div); - clock->div_min = min(clock->div_min, clock->div_table[i].div); - } - clk = clk_register(NULL, &clock->hw); if (IS_ERR(clk)) goto free_clock; @@ -606,8 +605,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, break; case CLK_TYPE_GEN3_SD: - return cpg_sd_clk_register(core, base, __clk_get_name(parent), - notifiers); + return cpg_sd_clk_register(core->name, base, core->offset, + __clk_get_name(parent), notifiers); case CLK_TYPE_GEN3_R: if (cpg_quirks & RCKCR_CKSEL) { @@ -658,11 +657,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, case CLK_TYPE_GEN3_Z: return cpg_z_clk_register(core->name, __clk_get_name(parent), - base, CPG_FRQCRC_ZFC_MASK); - - case CLK_TYPE_GEN3_Z2: - return cpg_z_clk_register(core->name, __clk_get_name(parent), - base, CPG_FRQCRC_Z2FC_MASK); + base, core->div, core->offset); case CLK_TYPE_GEN3_OSC: /* diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index eac1b057455a..15700d219a05 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -3,6 +3,7 @@ * R-Car Gen3 Clock Pulse Generator * * Copyright (C) 2015-2018 Glider bvba + * Copyright (C) 2018 Renesas Electronics Corp. * */ @@ -20,7 +21,6 @@ enum rcar_gen3_clk_types { CLK_TYPE_GEN3_R, CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */ CLK_TYPE_GEN3_Z, - CLK_TYPE_GEN3_Z2, CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ CLK_TYPE_GEN3_RPCSRC, @@ -51,6 +51,9 @@ enum rcar_gen3_clk_types { DEF_BASE(_name, _id, CLK_TYPE_GEN3_RCKSEL, \ (_parent0) << 16 | (_parent1), .div = (_div0) << 16 | (_div1)) +#define DEF_GEN3_Z(_name, _id, _type, _parent, _div, _offset) \ + DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset) + struct rcar_gen3_cpg_pll_config { u8 extal_div; u8 pll1_mult; diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index ebce5260068b..09ede6920593 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -82,7 +82,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); u32 val; - val = clk_readl(ddrclk->reg_base + + val = readl(ddrclk->reg_base + ddrclk->mux_offset) >> ddrclk->mux_shift; val &= GENMASK(ddrclk->mux_width - 1, 0); diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c index b8da6e799423..784b81e1ea7c 100644 --- a/drivers/clk/rockchip/clk-half-divider.c +++ b/drivers/clk/rockchip/clk-half-divider.c @@ -24,7 +24,7 @@ static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw, struct clk_divider *divider = to_clk_divider(hw); unsigned int val; - val = clk_readl(divider->reg) >> divider->shift; + val = readl(divider->reg) >> divider->shift; val &= div_mask(divider->width); val = val * 2 + 3; @@ -124,11 +124,11 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { val = div_mask(divider->width) << (divider->shift + 16); } else { - val = clk_readl(divider->reg); + val = readl(divider->reg); val &= ~(div_mask(divider->width) << divider->shift); } val |= value << divider->shift; - clk_writel(val, divider->reg); + writel(val, divider->reg); if (divider->lock) spin_unlock_irqrestore(divider->lock, flags); diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c index 0a0b09591e6f..b2da2c8fa0c7 100644 --- a/drivers/clk/samsung/clk-exynos5410.c +++ b/drivers/clk/samsung/clk-exynos5410.c @@ -209,6 +209,7 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = { GATE(CLK_USI1, "usi1", "aclk66", GATE_IP_PERIC, 11, 0, 0), GATE(CLK_USI2, "usi2", "aclk66", GATE_IP_PERIC, 12, 0, 0), GATE(CLK_USI3, "usi3", "aclk66", GATE_IP_PERIC, 13, 0, 0), + GATE(CLK_TSADC, "tsadc", "aclk66", GATE_IP_PERIC, 15, 0, 0), GATE(CLK_PWM, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0), GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0", diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 932836d26e2b..be0deee70182 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -531,7 +531,8 @@ static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram", static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" }; static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, - 0x104, 0, 4, 24, 3, BIT(31), 0); + 0x104, 0, 4, 24, 3, BIT(31), + CLK_SET_RATE_PARENT); static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" }; static const u8 tcon0_table[] = { 0, 2, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index 139e8389615c..3c32d7798f27 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -266,7 +266,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600, 0, 4, /* M */ 24, 1, /* mux */ BIT(31), /* gate */ - 0); + CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2", 0x60c, BIT(0), 0); @@ -311,7 +311,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690, 0, 3, /* M */ 24, 1, /* mux */ BIT(31), /* gate */ - 0); + CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2", 0x69c, BIT(0), 0); @@ -656,6 +656,8 @@ static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" }; static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = { { .index = 1, .div = 36621 }, }; + +#define SUN50I_H6_HDMI_CEC_CLK_REG 0xb10 static struct ccu_mux hdmi_cec_clk = { .enable = BIT(31), @@ -689,7 +691,7 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_lcd0_parents, 0xb60, 24, 3, /* mux */ BIT(31), /* gate */ - 0); + CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3", 0xb7c, BIT(0), 0); @@ -704,7 +706,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0", 8, 2, /* P */ 24, 3, /* mux */ BIT(31), /* gate */ - 0); + CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3", 0xb9c, BIT(0), 0); @@ -1200,6 +1202,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev) val &= ~(GENMASK(21, 16) | BIT(0)); writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG); + /* + * First clock parent (osc32K) is unusable for CEC. But since there + * is no good way to force parent switch (both run with same frequency), + * just set second clock parent here. + */ + val = readl(reg + SUN50I_H6_HDMI_CEC_CLK_REG); + val |= BIT(24); + writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG); + return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc); } diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.h b/drivers/clk/sunxi-ng/ccu-sun5i.h index 93a275fbd9a9..b66abd4fd0bf 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.h +++ b/drivers/clk/sunxi-ng/ccu-sun5i.h @@ -60,10 +60,6 @@ /* The rest of the module clocks are exported */ -#define CLK_MBUS 99 - -/* And finally the IEP clock */ - #define CLK_NUMBER (CLK_IEP + 1) #endif /* _CCU_SUN5I_H_ */ diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index 2d6555d73170..5f714b4d8ee4 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -513,8 +513,9 @@ static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0); static SUNXI_CCU_GATE(mipi_csi_clk, "mipi-csi", "osc24M", 0x130, BIT(31), 0); -static const char * const csi_mclk_parents[] = { "pll-de", "osc24M" }; -static const u8 csi_mclk_table[] = { 3, 5 }; +static const char * const csi_mclk_parents[] = { "pll-video0", "pll-de", + "osc24M" }; +static const u8 csi_mclk_table[] = { 0, 3, 5 }; static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, csi_mclk_table, 0x134, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index ac12f261f8ca..eada0e291859 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -325,7 +325,8 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram", static const char * const de_parents[] = { "pll-video", "pll-periph0" }; static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, - 0x104, 0, 4, 24, 2, BIT(31), 0); + 0x104, 0, 4, 24, 2, BIT(31), + CLK_SET_RATE_PARENT); static const char * const tcon_parents[] = { "pll-video" }; static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents, diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c index a09dfbe36402..dc9f0a365664 100644 --- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c +++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c @@ -240,7 +240,7 @@ static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_spdif_parents, /* The BSP header file has a CIR_CFG, but no mod clock uses this definition */ static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", - 0x0cc, BIT(8), 0); + 0x0cc, BIT(1), 0); static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr", 0x100, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index 9b49adb20d07..cbcdf664f336 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); - u32 n_mask, k_mask, m_mask, p_mask; + u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; @@ -186,10 +186,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, ccu_nkmp_find_best(parent_rate, rate, &_nkmp); - n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); - k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); - m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); - p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); + /* + * If width is 0, GENMASK() macro may not generate expected mask (0) + * as it falls under undefined behaviour by C standard due to shifts + * which are equal or greater than width of left operand. This can + * be easily avoided by explicitly checking if width is 0. + */ + if (nkmp->n.width) + n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, + nkmp->n.shift); + if (nkmp->k.width) + k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, + nkmp->k.shift); + if (nkmp->m.width) + m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, + nkmp->m.shift); + if (nkmp->p.width) + p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, + nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig new file mode 100644 index 000000000000..2b6207cc4eda --- /dev/null +++ b/drivers/clk/sunxi/Kconfig @@ -0,0 +1,43 @@ +menuconfig CLK_SUNXI + bool "Legacy clock support for Allwinner SoCs" + depends on ARCH_SUNXI || COMPILE_TEST + default y + +if CLK_SUNXI + +config CLK_SUNXI_CLOCKS + bool "Legacy clock drivers" + default y + help + Legacy clock drivers being used on older (A10, A13, A20, + A23, A31, A80) SoCs. These drivers are kept around for + Device Tree backward compatibility issues, in case one would + still use a Device Tree with one clock provider by + node. Newer Device Trees and newer SoCs use the drivers + controlled by CONFIG_SUNXI_CCU. + +config CLK_SUNXI_PRCM_SUN6I + bool "Legacy A31 PRCM driver" + select MFD_SUN6I_PRCM + default y + help + Legacy clock driver for the A31 PRCM clocks. Those are + usually needed for the PMIC communication, mostly. + +config CLK_SUNXI_PRCM_SUN8I + bool "Legacy sun8i PRCM driver" + select MFD_SUN6I_PRCM + default y + help + Legacy clock driver for the sun8i family PRCM clocks. + Those are usually needed for the PMIC communication, + mostly. + +config CLK_SUNXI_PRCM_SUN9I + bool "Legacy A80 PRCM driver" + default y + help + Legacy clock driver for the A80 PRCM clocks. Those are + usually needed for the PMIC communication, mostly. + +endif diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile index be88368b48a1..e10824c76ae9 100644 --- a/drivers/clk/sunxi/Makefile +++ b/drivers/clk/sunxi/Makefile @@ -3,27 +3,32 @@ # Makefile for sunxi specific clk # -obj-y += clk-sunxi.o clk-factors.o -obj-y += clk-a10-codec.o -obj-y += clk-a10-hosc.o -obj-y += clk-a10-mod1.o -obj-y += clk-a10-pll2.o -obj-y += clk-a10-ve.o -obj-y += clk-a20-gmac.o -obj-y += clk-mod0.o -obj-y += clk-simple-gates.o -obj-y += clk-sun4i-display.o -obj-y += clk-sun4i-pll3.o -obj-y += clk-sun4i-tcon-ch1.o -obj-y += clk-sun8i-bus-gates.o -obj-y += clk-sun8i-mbus.o -obj-y += clk-sun9i-core.o -obj-y += clk-sun9i-mmc.o -obj-y += clk-usb.o +obj-$(CONFIG_CLK_SUNXI) += clk-factors.o -obj-$(CONFIG_MACH_SUN9I) += clk-sun8i-apb0.o -obj-$(CONFIG_MACH_SUN9I) += clk-sun9i-cpus.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sunxi.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-codec.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-hosc.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-mod1.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-pll2.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-ve.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a20-gmac.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-mod0.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-simple-gates.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-display.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-pll3.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-tcon-ch1.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-bus-gates.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-mbus.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-core.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-mmc.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-usb.o -obj-$(CONFIG_MFD_SUN6I_PRCM) += \ - clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ - clk-sun8i-apb0.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-apb0.o +obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-cpus.o + +obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0.o +obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0-gates.o +obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-ar100.o + +obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun8i-apb0.o +obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun6i-apb0-gates.o diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c index 84267cfc4433..b5ff76c663f8 100644 --- a/drivers/clk/tegra/clk-super.c +++ b/drivers/clk/tegra/clk-super.c @@ -121,7 +121,7 @@ out: return err; } -const struct clk_ops tegra_clk_super_mux_ops = { +static const struct clk_ops tegra_clk_super_mux_ops = { .get_parent = clk_super_get_parent, .set_parent = clk_super_set_parent, }; diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index df0018f7bf7e..abc0c4bea740 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1466,9 +1466,9 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np) tegra_pmc_clk_init(pmc_base, tegra124_clks); /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */ - plld_base = clk_readl(clk_base + PLLD_BASE); + plld_base = readl(clk_base + PLLD_BASE); plld_base &= ~BIT(25); - clk_writel(plld_base, clk_base + PLLD_BASE); + writel(plld_base, clk_base + PLLD_BASE); } /** diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 7545af763d7a..ed3c7df75d1e 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3557,7 +3557,7 @@ static void __init tegra210_clock_init(struct device_node *np) if (!clks) return; - value = clk_readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT; + value = readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT; clk_m_div = (value & CLK_M_DIVISOR_MASK) + 1; if (tegra_osc_clk_init(clk_base, tegra210_clks, tegra210_input_freq, @@ -3574,9 +3574,9 @@ static void __init tegra210_clock_init(struct device_node *np) tegra_pmc_clk_init(pmc_base, tegra210_clks); /* For Tegra210, PLLD is the only source for DSIA & DSIB */ - value = clk_readl(clk_base + PLLD_BASE); + value = readl(clk_base + PLLD_BASE); value &= ~BIT(25); - clk_writel(value, clk_base + PLLD_BASE); + writel(value, clk_base + PLLD_BASE); tegra_clk_apply_init_table = tegra210_clock_apply_init_table; diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c index 7c0403b733ae..698306f4801f 100644 --- a/drivers/clk/ux500/clk-sysctrl.c +++ b/drivers/clk/ux500/clk-sysctrl.c @@ -42,7 +42,8 @@ static int clk_sysctrl_prepare(struct clk_hw *hw) clk->reg_bits[0]); if (!ret && clk->enable_delay_us) - usleep_range(clk->enable_delay_us, clk->enable_delay_us); + usleep_range(clk->enable_delay_us, clk->enable_delay_us + + (clk->enable_delay_us >> 2)); return ret; } diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index d977193842df..19174835693b 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = { }; static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, - void __iomem *base, + const struct pmc_clk_data *pmc_data, const char **parent_names, int num_parents) { @@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, init.num_parents = num_parents; pclk->hw.init = &init; - pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; + pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); + /* + * On some systems, the pmc_plt_clocks already enabled by the + * firmware are being marked as critical to avoid them being + * gated by the clock framework. + */ + if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw)) + init.flags |= CLK_IS_CRITICAL; + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); @@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev) return PTR_ERR(parent_names); for (i = 0; i < PMC_CLK_NUM; i++) { - data->clks[i] = plt_clk_register(pdev, i, pmc_data->base, + data->clks[i] = plt_clk_register(pdev, i, pmc_data, parent_names, data->nparents); if (IS_ERR(data->clks[i])) { err = PTR_ERR(data->clks[i]); diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index d7b53ac8ad11..4b9d5c14c400 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -158,7 +158,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk, clks[fclk] = clk_register_gate(NULL, clk_name, div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg, 0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock); - enable_reg = clk_readl(fclk_gate_reg) & 1; + enable_reg = readl(fclk_gate_reg) & 1; if (enable && !enable_reg) { if (clk_prepare_enable(clks[fclk])) pr_warn("%s: FCLK%u enable failed\n", __func__, @@ -287,7 +287,7 @@ static void __init zynq_clk_setup(struct device_node *np) SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock); /* CPU clocks */ - tmp = clk_readl(SLCR_621_TRUE) & 1; + tmp = readl(SLCR_621_TRUE) & 1; clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0, &armclk_lock); @@ -510,7 +510,7 @@ static void __init zynq_clk_setup(struct device_node *np) &dbgclk_lock); /* leave debug clocks in the state the bootloader set them up to */ - tmp = clk_readl(SLCR_DBG_CLK_CTRL); + tmp = readl(SLCR_DBG_CLK_CTRL); if (tmp & DBG_CLK_CTRL_CLKACT_TRC) if (clk_prepare_enable(clks[dbg_trc])) pr_warn("%s: trace clk enable failed\n", __func__); diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c index 00d72fb5c036..800b70ee19b3 100644 --- a/drivers/clk/zynq/pll.c +++ b/drivers/clk/zynq/pll.c @@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw, * makes probably sense to redundantly save fbdiv in the struct * zynq_pll to save the IO access. */ - fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >> + fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT; return parent_rate * fbdiv; @@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw) spin_lock_irqsave(clk->lock, flags); - reg = clk_readl(clk->pll_ctrl); + reg = readl(clk->pll_ctrl); spin_unlock_irqrestore(clk->lock, flags); @@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw) /* Power up PLL and wait for lock */ spin_lock_irqsave(clk->lock, flags); - reg = clk_readl(clk->pll_ctrl); + reg = readl(clk->pll_ctrl); reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK); - clk_writel(reg, clk->pll_ctrl); - while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit))) + writel(reg, clk->pll_ctrl); + while (!(readl(clk->pll_status) & (1 << clk->lockbit))) ; spin_unlock_irqrestore(clk->lock, flags); @@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw) /* shut down PLL */ spin_lock_irqsave(clk->lock, flags); - reg = clk_readl(clk->pll_ctrl); + reg = readl(clk->pll_ctrl); reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK; - clk_writel(reg, clk->pll_ctrl); + writel(reg, clk->pll_ctrl); spin_unlock_irqrestore(clk->lock, flags); } @@ -223,9 +223,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent, spin_lock_irqsave(pll->lock, flags); - reg = clk_readl(pll->pll_ctrl); + reg = readl(pll->pll_ctrl); reg &= ~PLLCTRL_BPQUAL_MASK; - clk_writel(reg, pll->pll_ctrl); + writel(reg, pll->pll_ctrl); spin_unlock_irqrestore(pll->lock, flags); diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c index 4143f560c28d..0af8f74c5fa5 100644 --- a/drivers/clk/zynqmp/clk-mux-zynqmp.c +++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c @@ -138,4 +138,3 @@ struct clk_hw *zynqmp_clk_register_mux(const char *name, u32 clk_id, return hw; } -EXPORT_SYMBOL_GPL(zynqmp_clk_register_mux); diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h index 7ab163b67249..fec9a15c8786 100644 --- a/drivers/clk/zynqmp/clk-zynqmp.h +++ b/drivers/clk/zynqmp/clk-zynqmp.h @@ -10,12 +10,6 @@ #include <linux/firmware/xlnx-zynqmp.h> -/* Clock APIs payload parameters */ -#define CLK_GET_NAME_RESP_LEN 16 -#define CLK_GET_TOPOLOGY_RESP_WORDS 3 -#define CLK_GET_PARENTS_RESP_WORDS 3 -#define CLK_GET_ATTR_RESP_WORDS 1 - enum topology_type { TYPE_INVALID, TYPE_MUX, diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index b0908ec62f73..8febd2431545 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -21,24 +21,6 @@ #define MAX_NODES 6 #define MAX_NAME_LEN 50 -#define CLK_TYPE_SHIFT 2 - -#define PM_API_PAYLOAD_LEN 3 - -#define NA_PARENT 0xFFFFFFFF -#define DUMMY_PARENT 0xFFFFFFFE - -#define CLK_TYPE_FIELD_LEN 4 -#define CLK_TOPOLOGY_NODE_OFFSET 16 -#define NODES_PER_RESP 3 - -#define CLK_TYPE_FIELD_MASK 0xF -#define CLK_FLAG_FIELD_MASK GENMASK(21, 8) -#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24) - -#define CLK_PARENTS_ID_LEN 16 -#define CLK_PARENTS_ID_MASK 0xFFFF - /* Flags for parents */ #define PARENT_CLK_SELF 0 #define PARENT_CLK_NODE1 1 @@ -52,7 +34,10 @@ #define END_OF_PARENTS 1 #define RESERVED_CLK_NAME "" -#define CLK_VALID_MASK 0x1 +#define CLK_GET_NAME_RESP_LEN 16 +#define CLK_GET_TOPOLOGY_RESP_WORDS 3 +#define CLK_GET_PARENTS_RESP_WORDS 3 +#define CLK_GET_ATTR_RESP_WORDS 1 enum clk_type { CLK_TYPE_OUTPUT, @@ -80,6 +65,7 @@ struct clock_parent { * @num_nodes: Number of nodes present in topology * @parent: Parent of clock * @num_parents: Number of parents of clock + * @clk_id: Clock id */ struct zynqmp_clock { char clk_name[MAX_NAME_LEN]; @@ -89,6 +75,36 @@ struct zynqmp_clock { u32 num_nodes; struct clock_parent parent[MAX_PARENT]; u32 num_parents; + u32 clk_id; +}; + +struct name_resp { + char name[CLK_GET_NAME_RESP_LEN]; +}; + +struct topology_resp { +#define CLK_TOPOLOGY_TYPE GENMASK(3, 0) +#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8) +#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24) + u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS]; +}; + +struct parents_resp { +#define NA_PARENT 0xFFFFFFFF +#define DUMMY_PARENT 0xFFFFFFFE +#define CLK_PARENTS_ID GENMASK(15, 0) +#define CLK_PARENTS_FLAGS GENMASK(31, 16) + u32 parents[CLK_GET_PARENTS_RESP_WORDS]; +}; + +struct attr_resp { +#define CLK_ATTR_VALID BIT(0) +#define CLK_ATTR_TYPE BIT(2) +#define CLK_ATTR_NODE_INDEX GENMASK(13, 0) +#define CLK_ATTR_NODE_TYPE GENMASK(19, 14) +#define CLK_ATTR_NODE_SUBCLASS GENMASK(25, 20) +#define CLK_ATTR_NODE_CLASS GENMASK(31, 26) + u32 attr[CLK_GET_ATTR_RESP_WORDS]; }; static const char clk_type_postfix[][10] = { @@ -199,14 +215,15 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks) /** * zynqmp_pm_clock_get_name() - Get the name of clock for given id * @clock_id: ID of the clock to be queried - * @name: Name of given clock + * @response: Name of the clock with the given id * * This function is used to get name of clock specified by given * clock ID. * - * Return: Returns 0, in case of error name would be 0 + * Return: Returns 0 */ -static int zynqmp_pm_clock_get_name(u32 clock_id, char *name) +static int zynqmp_pm_clock_get_name(u32 clock_id, + struct name_resp *response) { struct zynqmp_pm_query_data qdata = {0}; u32 ret_payload[PAYLOAD_ARG_CNT]; @@ -215,7 +232,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name) qdata.arg1 = clock_id; eemi_ops->query_data(qdata, ret_payload); - memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN); + memcpy(response, ret_payload, sizeof(*response)); return 0; } @@ -224,7 +241,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name) * zynqmp_pm_clock_get_topology() - Get the topology of clock for given id * @clock_id: ID of the clock to be queried * @index: Node index of clock topology - * @topology: Buffer to store nodes in topology and flags + * @response: Buffer used for the topology response * * This function is used to get topology information for the clock * specified by given clock ID. @@ -237,7 +254,8 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name) * * Return: 0 on success else error+reason */ -static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology) +static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, + struct topology_resp *response) { struct zynqmp_pm_query_data qdata = {0}; u32 ret_payload[PAYLOAD_ARG_CNT]; @@ -248,7 +266,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology) qdata.arg2 = index; ret = eemi_ops->query_data(qdata, ret_payload); - memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4); + memcpy(response, &ret_payload[1], sizeof(*response)); return ret; } @@ -297,7 +315,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id, * zynqmp_pm_clock_get_parents() - Get the first 3 parents of clock for given id * @clock_id: Clock ID * @index: Parent index - * @parents: 3 parents of the given clock + * @response: Parents of the given clock * * This function is used to get 3 parents for the clock specified by * given clock ID. @@ -310,7 +328,8 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id, * * Return: 0 on success else error+reason */ -static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents) +static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, + struct parents_resp *response) { struct zynqmp_pm_query_data qdata = {0}; u32 ret_payload[PAYLOAD_ARG_CNT]; @@ -321,7 +340,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents) qdata.arg2 = index; ret = eemi_ops->query_data(qdata, ret_payload); - memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4); + memcpy(response, &ret_payload[1], sizeof(*response)); return ret; } @@ -329,13 +348,14 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents) /** * zynqmp_pm_clock_get_attributes() - Get the attributes of clock for given id * @clock_id: Clock ID - * @attr: Clock attributes + * @response: Clock attributes response * * This function is used to get clock's attributes(e.g. valid, clock type, etc). * * Return: 0 on success else error+reason */ -static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr) +static int zynqmp_pm_clock_get_attributes(u32 clock_id, + struct attr_resp *response) { struct zynqmp_pm_query_data qdata = {0}; u32 ret_payload[PAYLOAD_ARG_CNT]; @@ -345,7 +365,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr) qdata.arg1 = clock_id; ret = eemi_ops->query_data(qdata, ret_payload); - memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4); + memcpy(response, &ret_payload[1], sizeof(*response)); return ret; } @@ -354,24 +374,28 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr) * __zynqmp_clock_get_topology() - Get topology data of clock from firmware * response data * @topology: Clock topology - * @data: Clock topology data received from firmware + * @response: Clock topology data received from firmware * @nnodes: Number of nodes * * Return: 0 on success else error+reason */ static int __zynqmp_clock_get_topology(struct clock_topology *topology, - u32 *data, u32 *nnodes) + struct topology_resp *response, + u32 *nnodes) { int i; + u32 type; - for (i = 0; i < PM_API_PAYLOAD_LEN; i++) { - if (!(data[i] & CLK_TYPE_FIELD_MASK)) + for (i = 0; i < ARRAY_SIZE(response->topology); i++) { + type = FIELD_GET(CLK_TOPOLOGY_TYPE, response->topology[i]); + if (type == TYPE_INVALID) return END_OF_TOPOLOGY_NODE; - topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK; - topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK, - data[i]); + topology[*nnodes].type = type; + topology[*nnodes].flag = FIELD_GET(CLK_TOPOLOGY_FLAGS, + response->topology[i]); topology[*nnodes].type_flag = - FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]); + FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS, + response->topology[i]); (*nnodes)++; } @@ -392,14 +416,16 @@ static int zynqmp_clock_get_topology(u32 clk_id, u32 *num_nodes) { int j, ret; - u32 pm_resp[PM_API_PAYLOAD_LEN] = {0}; + struct topology_resp response = { }; *num_nodes = 0; - for (j = 0; j <= MAX_NODES; j += 3) { - ret = zynqmp_pm_clock_get_topology(clk_id, j, pm_resp); + for (j = 0; j <= MAX_NODES; j += ARRAY_SIZE(response.topology)) { + ret = zynqmp_pm_clock_get_topology(clock[clk_id].clk_id, j, + &response); if (ret) return ret; - ret = __zynqmp_clock_get_topology(topology, pm_resp, num_nodes); + ret = __zynqmp_clock_get_topology(topology, &response, + num_nodes); if (ret == END_OF_TOPOLOGY_NODE) return 0; } @@ -408,31 +434,33 @@ static int zynqmp_clock_get_topology(u32 clk_id, } /** - * __zynqmp_clock_get_topology() - Get parents info of clock from firmware + * __zynqmp_clock_get_parents() - Get parents info of clock from firmware * response data * @parents: Clock parents - * @data: Clock parents data received from firmware + * @response: Clock parents data received from firmware * @nparent: Number of parent * * Return: 0 on success else error+reason */ -static int __zynqmp_clock_get_parents(struct clock_parent *parents, u32 *data, +static int __zynqmp_clock_get_parents(struct clock_parent *parents, + struct parents_resp *response, u32 *nparent) { int i; struct clock_parent *parent; - for (i = 0; i < PM_API_PAYLOAD_LEN; i++) { - if (data[i] == NA_PARENT) + for (i = 0; i < ARRAY_SIZE(response->parents); i++) { + if (response->parents[i] == NA_PARENT) return END_OF_PARENTS; parent = &parents[i]; - parent->id = data[i] & CLK_PARENTS_ID_MASK; - if (data[i] == DUMMY_PARENT) { + parent->id = FIELD_GET(CLK_PARENTS_ID, response->parents[i]); + if (response->parents[i] == DUMMY_PARENT) { strcpy(parent->name, "dummy_name"); parent->flag = 0; } else { - parent->flag = data[i] >> CLK_PARENTS_ID_LEN; + parent->flag = FIELD_GET(CLK_PARENTS_FLAGS, + response->parents[i]); if (zynqmp_get_clock_name(parent->id, parent->name)) continue; } @@ -454,20 +482,21 @@ static int zynqmp_clock_get_parents(u32 clk_id, struct clock_parent *parents, u32 *num_parents) { int j = 0, ret; - u32 pm_resp[PM_API_PAYLOAD_LEN] = {0}; + struct parents_resp response = { }; *num_parents = 0; do { /* Get parents from firmware */ - ret = zynqmp_pm_clock_get_parents(clk_id, j, pm_resp); + ret = zynqmp_pm_clock_get_parents(clock[clk_id].clk_id, j, + &response); if (ret) return ret; - ret = __zynqmp_clock_get_parents(&parents[j], pm_resp, + ret = __zynqmp_clock_get_parents(&parents[j], &response, num_parents); if (ret == END_OF_PARENTS) return 0; - j += PM_API_PAYLOAD_LEN; + j += ARRAY_SIZE(response.parents); } while (*num_parents <= MAX_PARENT); return 0; @@ -528,13 +557,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, const char **parent_names) { int j; - u32 num_nodes; + u32 num_nodes, clk_dev_id; char *clk_out = NULL; struct clock_topology *nodes; struct clk_hw *hw = NULL; nodes = clock[clk_id].node; num_nodes = clock[clk_id].num_nodes; + clk_dev_id = clock[clk_id].clk_id; for (j = 0; j < num_nodes; j++) { /* @@ -551,13 +581,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, if (!clk_topology[nodes[j].type]) continue; - hw = (*clk_topology[nodes[j].type])(clk_out, clk_id, + hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id, parent_names, num_parents, &nodes[j]); if (IS_ERR(hw)) - pr_warn_once("%s() %s register fail with %ld\n", - __func__, clk_name, PTR_ERR(hw)); + pr_warn_once("%s() 0x%x: %s register fail with %ld\n", + __func__, clk_dev_id, clk_name, + PTR_ERR(hw)); parent_names[0] = clk_out; } @@ -621,20 +652,33 @@ static int zynqmp_register_clocks(struct device_node *np) static void zynqmp_get_clock_info(void) { int i, ret; - u32 attr, type = 0; + u32 type = 0; + u32 nodetype, subclass, class; + struct attr_resp attr; + struct name_resp name; for (i = 0; i < clock_max_idx; i++) { - zynqmp_pm_clock_get_name(i, clock[i].clk_name); - if (!strcmp(clock[i].clk_name, RESERVED_CLK_NAME)) - continue; - ret = zynqmp_pm_clock_get_attributes(i, &attr); if (ret) continue; - clock[i].valid = attr & CLK_VALID_MASK; - clock[i].type = attr >> CLK_TYPE_SHIFT ? CLK_TYPE_EXTERNAL : - CLK_TYPE_OUTPUT; + clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]); + clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ? + CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT; + + nodetype = FIELD_GET(CLK_ATTR_NODE_TYPE, attr.attr[0]); + subclass = FIELD_GET(CLK_ATTR_NODE_SUBCLASS, attr.attr[0]); + class = FIELD_GET(CLK_ATTR_NODE_CLASS, attr.attr[0]); + + clock[i].clk_id = FIELD_PREP(CLK_ATTR_NODE_CLASS, class) | + FIELD_PREP(CLK_ATTR_NODE_SUBCLASS, subclass) | + FIELD_PREP(CLK_ATTR_NODE_TYPE, nodetype) | + FIELD_PREP(CLK_ATTR_NODE_INDEX, i); + + zynqmp_pm_clock_get_name(clock[i].clk_id, &name); + if (!strcmp(name.name, RESERVED_CLK_NAME)) + continue; + strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN); } /* Get topology of all clock */ diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index a371c66e72ef..d8f5b70d2709 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -31,12 +31,14 @@ * struct zynqmp_clk_divider - adjustable divider clock * @hw: handle between common and hardware-specific interfaces * @flags: Hardware specific flags + * @is_frac: The divider is a fractional divider * @clk_id: Id of clock * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2) */ struct zynqmp_clk_divider { struct clk_hw hw; u8 flags; + bool is_frac; u32 clk_id; u32 div_type; }; @@ -76,6 +78,13 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw, else value = div >> 16; + if (!value) { + WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), + "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", + clk_name); + return parent_rate; + } + return DIV_ROUND_UP_ULL(parent_rate, value); } @@ -116,8 +125,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, bestdiv = zynqmp_divider_get_val(*prate, rate); - if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && - (divider->flags & CLK_FRAC)) + if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) bestdiv = rate % *prate ? 1 : bestdiv; *prate = rate * bestdiv; @@ -195,11 +203,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name, init.name = name; init.ops = &zynqmp_clk_divider_ops; - init.flags = nodes->flag; + /* CLK_FRAC is not defined in the common clk framework */ + init.flags = nodes->flag & ~CLK_FRAC; init.parent_names = parents; init.num_parents = 1; /* struct clk_divider assignments */ + div->is_frac = !!(nodes->flag & CLK_FRAC); div->flags = nodes->type_flag; div->hw.init = &init; div->clk_id = clk_id; @@ -214,4 +224,3 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name, return hw; } -EXPORT_SYMBOL_GPL(zynqmp_clk_register_divider); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 171502a356aa..4b3d143f0f8a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -145,6 +145,7 @@ config VT8500_TIMER config NPCM7XX_TIMER bool "NPCM7xx timer driver" if COMPILE_TEST depends on HAS_IOMEM + select TIMER_OF select CLKSRC_MMIO help Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aa4ec53281ce..ea373cfbcecb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#define pr_fmt(fmt) "arm_arch_timer: " fmt +#define pr_fmt(fmt) "arch_timer: " fmt #include <linux/init.h> #include <linux/kernel.h> @@ -33,9 +33,6 @@ #include <clocksource/arm_arch_timer.h> -#undef pr_fmt -#define pr_fmt(fmt) "arch_timer: " fmt - #define CNTTIDR 0x08 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c index a8dd80576c95..857f8c086274 100644 --- a/drivers/clocksource/clps711x-timer.c +++ b/drivers/clocksource/clps711x-timer.c @@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void) return ~readw(tcd); } -static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) +static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base) { - unsigned long rate; - - if (!base) - return -ENOMEM; - if (IS_ERR(clock)) - return PTR_ERR(clock); - - rate = clk_get_rate(clock); + unsigned long rate = clk_get_rate(clock); tcd = base; @@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) clocksource_mmio_readw_down); sched_clock_register(clps711x_sched_clock_read, 16, rate); - - return 0; } static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) @@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, struct clock_event_device *clkevt; unsigned long rate; - if (!irq) - return -EINVAL; - if (!base) - return -ENOMEM; - if (IS_ERR(clock)) - return PTR_ERR(clock); - clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); if (!clkevt) return -ENOMEM; @@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, "clps711x-timer", clkevt); } -void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base, - unsigned int irq) -{ - struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL); - struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL); - - BUG_ON(_clps711x_clksrc_init(tc1, tc1_base)); - BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq)); -} - -#ifdef CONFIG_TIMER_OF static int __init clps711x_timer_init(struct device_node *np) { unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); void __iomem *base = of_iomap(np, 0); + if (!base) + return -ENOMEM; + if (!irq) + return -EINVAL; + if (IS_ERR(clock)) + return PTR_ERR(clock); + switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: - return _clps711x_clksrc_init(clock, base); + clps711x_clksrc_init(clock, base); + break; case CLPS711X_CLKSRC_CLOCKEVENT: return _clps711x_clkevt_init(clock, base, irq); default: return -EINVAL; } + + return 0; } TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); -#endif diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 54f8a331b53a..37671a5d4ed9 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -struct irqaction gic_compare_irqaction = { +static struct irqaction gic_compare_irqaction = { .handler = gic_compare_interrupt, .percpu_dev_id = &gic_clockevent_device, .flags = IRQF_PERCPU | IRQF_TIMER, diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 43f4d5c4d6fa..f987027ca566 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs) return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); } -void tc_clksrc_suspend(struct clocksource *cs) +static void tc_clksrc_suspend(struct clocksource *cs) { int i; @@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs) bmr_cache = readl(tcaddr + ATMEL_TC_BMR); } -void tc_clksrc_resume(struct clocksource *cs) +static void tc_clksrc_resume(struct clocksource *cs) { int i; diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c index eed6feff8b5f..30c6f4ce672b 100644 --- a/drivers/clocksource/timer-oxnas-rps.c +++ b/drivers/clocksource/timer-oxnas-rps.c @@ -296,4 +296,4 @@ err_alloc: TIMER_OF_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); TIMER_OF_DECLARE(ox820_rps, - "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init); + "oxsemi,ox820-rps-timer", oxnas_rps_timer_init); diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index e8163693e936..5e6038fbf115 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void) static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { .name = "riscv_clocksource", .rating = 300, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .read = riscv_clocksource_rdtime, }; @@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n) return error; } - sched_clock_register(riscv_sched_clock, - BITS_PER_LONG, riscv_timebase); + sched_clock_register(riscv_sched_clock, 64, riscv_timebase); error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, "clockevents/riscv/timer:starting", diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index c364027638e1..ee8ec5a8cb16 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, return 0; } -/* Optimized set_load which removes costly spin wait in timer_start */ -int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, - unsigned int load) -{ - u32 l; - - if (unlikely(!timer)) - return -EINVAL; - - omap_dm_timer_enable(timer); - - l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); - if (autoreload) { - l |= OMAP_TIMER_CTRL_AR; - omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); - } else { - l &= ~OMAP_TIMER_CTRL_AR; - } - l |= OMAP_TIMER_CTRL_ST; - - __omap_dm_timer_load_start(timer, l, load, timer->posted); - - /* Save the context */ - timer->context.tclr = l; - timer->context.tldr = load; - timer->context.tcrr = load; - return 0; -} static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e22f0dbaebb1..2986119dd31f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu) if (ret) return ret; - return cppc_perf.guaranteed_perf; + if (cppc_perf.guaranteed_perf) + return cppc_perf.guaranteed_perf; + + return cppc_perf.nominal_perf; } #else /* CONFIG_ACPI_CPPC_LIB */ @@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void) const struct x86_cpu_id *id; int rc; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + if (no_load) return -ENODEV; @@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void) } else { id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) { - pr_info("CPU ID not supported\n"); + pr_info("CPU model not supported\n"); return -ENODEV; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 3f49427766b8..2b51e0718c9f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) clk_put(priv->clk); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - kfree(priv); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + kfree(priv); return 0; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b1eadc6652b5..7205d9f4029e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - if (mapped_nents) { + if (mapped_nents) sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + sec4_sg_src_index, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - } else { + else sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); - } + if (*next_buflen) + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index ec8a291d62ba..54093ffd0aef 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( d = bcm2835_dma_create_cb_chain(chan, direction, false, info, extra, frames, src, dst, 0, 0, - GFP_KERNEL); + GFP_NOWAIT); if (!d) return NULL; diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 131f3974740d..814853842e29 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); #else - mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); + mtk_dma_set(pc, MTK_CQDMA_DST2, 0); #endif /* setup the length */ diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b4f25698169..e2a5398f89b5 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; + unsigned int chcrb; + unsigned int tcrb; + unsigned int i; if (!desc) return 0; @@ -1330,14 +1333,31 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, } /* + * We need to read two registers. + * Make sure the control register does not skip to next chunk + * while reading the counter. + * Trying it 3 times should be enough: Initial read, retry, retry + * for the paranoid. + */ + for (i = 0; i < 3; i++) { + chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK; + tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); + /* Still the same? */ + if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK)) + break; + } + WARN_ONCE(i >= 3, "residue might be not continuous!"); + + /* * In descriptor mode the descriptor running pointer is not maintained * by the interrupt handler, find the running descriptor from the * descriptor pointer field in the CHCRB register. In non-descriptor * mode just use the running descriptor pointer. */ if (desc->hwdescs.use) { - dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & - RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; + dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; if (dptr == 0) dptr = desc->nchunks; dptr--; @@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, } /* Add the residue for the current chunk. */ - residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; + residue += tcrb << desc->xfer_shift; return residue; } @@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, enum dma_status status; unsigned long flags; unsigned int residue; + bool cyclic; status = dma_cookie_status(chan, cookie, txstate); if (status == DMA_COMPLETE || !txstate) @@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, spin_lock_irqsave(&rchan->lock, flags); residue = rcar_dmac_chan_get_residue(rchan, cookie); + cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; spin_unlock_irqrestore(&rchan->lock, flags); /* if there's no residue, the cookie is complete */ - if (!residue) + if (!residue && !cyclic) return DMA_COMPLETE; dma_set_residue(txstate, residue); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 4e0eede599a8..ac0301b69593 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev) dmadev->nr_channels = nr_channels; dmadev->nr_requests = nr_requests; - ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", + device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", dmadev->ahb_addr_masks, count); - if (ret) - return ret; dmadev->nr_ahb_addr_masks = count; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 8e17149655f0..540e8cd16ee6 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -116,7 +116,7 @@ config EXTCON_PALMAS config EXTCON_PTN5150 tristate "NXP PTN5150 CC LOGIC USB EXTCON support" - depends on I2C && GPIOLIB || COMPILE_TEST + depends on I2C && (GPIOLIB || COMPILE_TEST) select REGMAP_I2C help Say Y here to enable support for USB peripheral and USB host diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c index 91b90c0cea73..12acdac85820 100644 --- a/drivers/gpio/gpio-adnp.c +++ b/drivers/gpio/gpio-adnp.c @@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset) if (err < 0) goto out; - if (err & BIT(pos)) - err = -EACCES; + if (value & BIT(pos)) { + err = -EPERM; + goto out; + } err = 0; diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 854bce4fb9e7..217507002dbc 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) gpio->offset_timer = devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); + if (!gpio->offset_timer) + return -ENOMEM; return aspeed_gpio_setup_irqs(gpio, pdev); } diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index f0223cee9774..77092268ee95 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 0ecd2369c2ca..a09d2f9ebacc 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); + if (index < 0) + goto err_destroy; sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 154d959e8993..b6a4efce7c92 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file, struct gpio_mockup_chip *chip; struct seq_file *sfile; struct gpio_chip *gc; + int val, cnt; char buf[3]; - int val, rv; if (*ppos != 0) return 0; @@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file, gc = &chip->gc; val = gpio_mockup_get(gc, priv->offset); - snprintf(buf, sizeof(buf), "%d\n", val); + cnt = snprintf(buf, sizeof(buf), "%d\n", val); - rv = copy_to_user(usr_buf, buf, sizeof(buf)); - if (rv) - return rv; - - return sizeof(buf) - 1; + return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt); } static ssize_t gpio_mockup_debugfs_write(struct file *file, diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 8b9c3ab70f6e..6a3ec575a404 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np, * to determine if the flags should have inverted semantics. */ if (IS_ENABLED(CONFIG_SPI_MASTER) && - of_property_read_bool(np, "cs-gpios")) { + of_property_read_bool(np, "cs-gpios") && + !strcmp(propname, "cs-gpios")) { struct device_node *child; u32 cs; int ret; @@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np, * conflict and the "spi-cs-high" flag will * take precedence. */ - if (of_property_read_bool(np, "spi-cs-high")) { + if (of_property_read_bool(child, "spi-cs-high")) { if (*flags & OF_GPIO_ACTIVE_LOW) { pr_warn("%s GPIO handle specifies active low - ignored\n", - of_node_full_name(np)); + of_node_full_name(child)); *flags &= ~OF_GPIO_ACTIVE_LOW; } } else { if (!(*flags & OF_GPIO_ACTIVE_LOW)) pr_info("%s enforce active low on chipselect handle\n", - of_node_full_name(np)); + of_node_full_name(child)); *flags |= OF_GPIO_ACTIVE_LOW; } break; @@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip) of_node_get(chip->of_node); - return of_gpiochip_scan_gpios(chip); + status = of_gpiochip_scan_gpios(chip); + if (status) { + of_node_put(chip->of_node); + gpiochip_remove_pin_ranges(chip); + } + + return status; } void of_gpiochip_remove(struct gpio_chip *chip) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 144af0733581..bca3e7740ef6 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, status = gpiochip_add_irqchip(chip, lock_key, request_key); if (status) - goto err_remove_chip; + goto err_free_gpiochip_mask; status = of_gpiochip_add(chip); if (status) @@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, status = gpiochip_init_valid_mask(chip); if (status) - goto err_remove_chip; + goto err_remove_of_chip; for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; @@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (gpiolib_initialized) { status = gpiochip_setup_dev(gdev); if (status) - goto err_remove_chip; + goto err_remove_acpi_chip; } return 0; -err_remove_chip: +err_remove_acpi_chip: acpi_gpiochip_remove(chip); +err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); +err_remove_chip: + gpiochip_irqchip_remove(chip); +err_free_gpiochip_mask: gpiochip_free_valid_mask(chip); err_remove_irqchip_mask: gpiochip_irqchip_free_valid_mask(chip); @@ -2776,7 +2780,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) } config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); - return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); + return chip->set_config(chip, gpio_chip_hwgpio(desc), config); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); @@ -2813,7 +2817,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, !transitory); gpio = gpio_chip_hwgpio(desc); - rc = gpio_set_config(chip, gpio, packed); + rc = chip->set_config(chip, gpio, packed); if (rc == -ENOTSUPP) { dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", gpio); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4f8fb4ecde34..79fb302fb954 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) /* No need to recover an evicted BO */ if (shadow->tbo.mem.mem_type != TTM_PL_TT || + shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) continue; @@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) break; if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); fence = next; - if (r <= 0) + if (tmo == 0) { + r = -ETIMEDOUT; break; + } else if (tmo < 0) { + r = tmo; + break; + } } else { fence = next; } @@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r <= 0 || tmo <= 0) { - DRM_ERROR("recover vram bo from shadow failed\n"); + if (r < 0 || tmo <= 0) { + DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); return -EIO; } @@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, struct pci_dev *pdev = adev->pdev; enum pci_bus_speed cur_speed; enum pcie_link_width cur_width; + u32 ret = 1; *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; @@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, while (pdev) { cur_speed = pcie_get_speed_cap(pdev); cur_width = pcie_get_width_cap(pdev); + ret = pcie_bandwidth_available(adev->pdev, NULL, + NULL, &cur_width); + if (!ret) + cur_width = PCIE_LNK_WIDTH_RESRV; if (cur_speed != PCI_SPEED_UNKNOWN) { if (*speed == PCI_SPEED_UNKNOWN) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0b8ef2d27d6b..fe393a46f881 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) +#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) /* * IB @@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) * cost waiting for it coming back under RUNTIME only */ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; + } else if (adev->gmc.xgmi.hive_id) { + tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; } for (i = 0; i < adev->num_rings; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bfa9062ce6b9..16fcb56c232b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; + vm->bulk_moveable &= list_empty(&vm->evicted); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d0309e8c9d12..a11db2b1a63f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); - adev->gfx.rlc.funcs->reset(adev); - gfx_v9_0_init_pg(adev); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 600259b4e291..2fe8397241ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) } ring->vm_inv_eng = inv_eng - 1; - change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index d0d966d6080a..1696644ec022 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); } + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8be9677c0c07..cf9a49f49d3a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = { { 0x9876, &carrizo_device_info }, /* Carrizo */ { 0x9877, &carrizo_device_info }, /* Carrizo */ { 0x15DD, &raven_device_info }, /* Raven */ + { 0x15D8, &raven_device_info }, /* Raven */ #endif { 0x67A0, &hawaii_device_info }, /* Hawaii */ { 0x67A1, &hawaii_device_info }, /* Hawaii */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fb27783d7a54..3082b55b1e77 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; + memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; @@ -5429,9 +5430,11 @@ static void get_freesync_config_for_crtc( struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); struct drm_display_mode *mode = &new_crtc_state->base.mode; + int vrefresh = drm_mode_vrefresh(mode); new_crtc_state->vrr_supported = new_con_state->freesync_capable && - aconnector->min_vfreq <= drm_mode_vrefresh(mode); + vrefresh >= aconnector->min_vfreq && + vrefresh <= aconnector->max_vfreq; if (new_crtc_state->vrr_supported) { new_crtc_state->stream->ignore_msa_timing_param = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c68fbd55db3c..a6cda201c964 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return UPDATE_TYPE_FULL; } + if (u->surface->force_full_update) { + update_flags->bits.full_update = 1; + return UPDATE_TYPE_FULL; + } + type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc, } dc_resource_state_copy_construct(state, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) + new_pipe->plane_state->force_full_update = true; + } } @@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc, dc->current_state = context; dc_release_state(old); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } } /*let's use current_state to update watermark etc*/ if (update_type >= UPDATE_TYPE_FULL) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4eba3c4800b6..ea18e9c2d8ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2660,12 +2660,18 @@ void core_link_enable_stream( void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + dal_ddc_service_write_scdc_data( + stream->link->ddc, 0, + stream->timing.flags.LTE_340MCSC_SCRAMBLE); + core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1a7fd6aa77eb..0515095574e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -503,6 +503,9 @@ struct dc_plane_state { struct dc_plane_status status; struct dc_context *ctx; + /* HACK: Workaround for forcing full reprogramming under some conditions */ + bool force_full_update; + /* private to dc_surface.c */ enum dc_irq_source irq_source; struct kref refcount; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 4febf4ef7240..4fe3664fb495 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -190,6 +190,12 @@ static void submit_channel_request( 1, 0); } + + REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); + + REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, + 10, aux110->timeout_period/10); + /* set the delay and the number of bytes to write */ /* The length include @@ -242,9 +248,6 @@ static void submit_channel_request( } } - REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); - REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index d27f22c05e4b..e28ed6a00ff4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a, * at most within ~240usec. That means, * increasing this timeout will not affect normal operation, * and we'll timeout after - * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. + * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec. * This timeout is especially important for - * resume from S3 and CTS. + * converters, resume from S3, and CTS. */ - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6 }; struct dce_aux { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 683829466a44..0ba68d41b9c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position( REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); - //account for cases where we see negative offset relative to overlay plane - if (src_x_offset < 0 && src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, 0); - x_hotspot -= src_x_offset; - y_hotspot -= src_y_offset; - } else if (src_x_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, pos->y); - x_hotspot -= src_x_offset; - } else if (src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, + REG_SET_2(CURSOR_POSITION, 0, CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, 0); - y_hotspot -= src_y_offset; - } else { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); - } + CURSOR_Y_POSITION, pos->y); REG_SET_2(CURSOR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, x_hotspot, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 9aa7bec1b5fe..23b5b94a4939 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) * MP0CLK DS */ data->registry_data.disallowed_features = 0xE0041C00; + /* ECC feature should be disabled on old SMUs */ + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); + hwmgr->smu_version = smum_get_argument(hwmgr); + if (hwmgr->smu_version < 0x282100) + data->registry_data.disallowed_features |= FEATURE_ECC_MASK; + data->registry_data.od_state_in_dc_support = 0; data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; @@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; + data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; for (i = 0; i < GNLD_FEATURES_MAX; i++) { data->smu_features[i].smu_feature_bitmap = @@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) "FCLK_DS", "MP1CLK_DS", "MP0CLK_DS", - "XGMI"}; + "XGMI", + "ECC"}; static const char *output_title[] = { "FEATURES", "BITMASK", @@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) struct vega20_single_dpm_table *dpm_table; bool vblank_too_short = false; bool disable_mclk_switching; + bool disable_fclk_switching; uint32_t i, latency; disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && @@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + if ((disable_mclk_switching && + (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || + hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) + disable_fclk_switching = true; + else + disable_fclk_switching = false; + /* fclk */ dpm_table = &(data->dpm_table.fclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; - if (hwmgr->display_config->nb_pstate_switch_disable) + if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; /* vclk */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index a5bc758ae097..ac2a3118a0ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -80,6 +80,7 @@ enum { GNLD_DS_MP1CLK, GNLD_DS_MP0CLK, GNLD_XGMI, + GNLD_ECC, GNLD_FEATURES_MAX }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 63d5cf691549..195c4ae67058 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -99,7 +99,7 @@ #define FEATURE_DS_MP1CLK_BIT 30 #define FEATURE_DS_MP0CLK_BIT 31 #define FEATURE_XGMI_BIT 32 -#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_ECC_BIT 33 #define FEATURE_SPARE_34_BIT 34 #define FEATURE_SPARE_35_BIT 35 #define FEATURE_SPARE_36_BIT 36 @@ -165,7 +165,8 @@ #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) -#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) +#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) +#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index a63e5f0dae56..ab7968c8f6a2 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1037,6 +1037,35 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, } EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); +/* Filter out invalid setups to avoid configuring SCDC and scrambling */ +static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) +{ + struct drm_display_info *display = &hdmi->connector.display_info; + + /* Completely disable SCDC support for older controllers */ + if (hdmi->version < 0x200a) + return false; + + /* Disable if no DDC bus */ + if (!hdmi->ddc) + return false; + + /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ + if (!display->hdmi.scdc.supported || + !display->hdmi.scdc.scrambling.supported) + return false; + + /* + * Disable if display only support low TMDS rates and scrambling + * for low rates is not supported either + */ + if (!display->hdmi.scdc.scrambling.low_rates && + display->max_tmds_clock <= 340000) + return false; + + return true; +} + /* * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: * - The Source shall suspend transmission of the TMDS clock and data @@ -1055,7 +1084,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi) unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ - if (hdmi->connector.display_info.hdmi.scdc.supported) { + if (dw_hdmi_support_scdc(hdmi)) { if (mtmdsclock > HDMI14_MAX_TMDSCLK) drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1); else @@ -1579,8 +1608,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, /* Set up HDMI_FC_INVIDCONF */ inv_val = (hdmi->hdmi_data.hdcp_enable || - vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || - hdmi_info->scdc.scrambling.low_rates ? + (dw_hdmi_support_scdc(hdmi) && + (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || + hdmi_info->scdc.scrambling.low_rates)) ? HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); @@ -1646,7 +1676,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, } /* Scrambling Control */ - if (hdmi_info->scdc.supported) { + if (dw_hdmi_support_scdc(hdmi)) { if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates) { /* @@ -1658,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, * Source Devices compliant shall set the * Source Version = 1. */ - drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION, + drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION, &bytes); - drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION, + drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION, min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); /* Enabled Scrambling in the Sink */ - drm_scdc_set_scrambling(&hdmi->i2c->adap, 1); + drm_scdc_set_scrambling(hdmi->ddc, 1); /* * To activate the scrambler feature, you must ensure @@ -1680,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); - drm_scdc_set_scrambling(&hdmi->i2c->adap, 0); + drm_scdc_set_scrambling(hdmi->ddc, 0); } } @@ -1774,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) * iteration for others. * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing * the workaround with a single iteration. + * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have + * been identified as needing the workaround with a single iteration. */ switch (hdmi->version) { @@ -1782,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) break; case 0x131a: case 0x132a: + case 0x200a: case 0x201a: + case 0x211a: case 0x212a: count = 1; break; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 40ac19848034..fbb76332cc9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs->atomic_disable(crtc, old_crtc_state); else if (funcs->disable) funcs->disable(crtc); - else + else if (funcs->dpms) funcs->dpms(crtc, DRM_MODE_DPMS_OFF); if (!(dev->irq_enabled && dev->num_crtcs)) @@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, if (new_crtc_state->enable) { DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (funcs->atomic_enable) funcs->atomic_enable(crtc, old_crtc_state); - else + else if (funcs->commit) funcs->commit(crtc); } } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 381581b01d48..05bbc2b622fc 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev) synchronize_srcu(&drm_unplug_srcu); drm_dev_unregister(dev); - - mutex_lock(&drm_global_mutex); - if (dev->open_count == 0) - drm_dev_put(dev); - mutex_unlock(&drm_global_mutex); + drm_dev_put(dev); } EXPORT_SYMBOL(drm_dev_unplug); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0e9349ff2d16..af2ab640cadb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, best_depth = fmt->depth; } } - if (sizes.surface_depth != best_depth) { + if (sizes.surface_depth != best_depth && best_depth) { DRM_INFO("requested bpp %d, scaled depth down to %d", sizes.surface_bpp, best_depth); sizes.surface_depth = best_depth; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 83a5bbca6e7e..7caa3c7ed978 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_close_helper(filp); - if (!--dev->open_count) { + if (!--dev->open_count) drm_lastclose(dev); - if (drm_dev_is_unplugged(dev)) - drm_put_dev(dev); - } + mutex_unlock(&drm_global_mutex); drm_minor_release(minor); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 0573eab0e190..f35e4ab55b27 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -20,6 +20,7 @@ #include "regs-vp.h" #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> @@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha) mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +static bool mixer_is_synced(struct mixer_context *ctx) { - /* block update on vsync */ - mixer_reg_writemask(ctx, MXR_STATUS, enable ? - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + u32 base, shadow; + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + return !(mixer_reg_read(ctx, MXR_CFG) & + MXR_CFG_LAYER_UPDATE_COUNT_MASK); + + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + return false; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); + if (base != shadow) + return false; + + return true; +} + +static int mixer_wait_for_sync(struct mixer_context *ctx) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100000); + + while (!mixer_is_synced(ctx)) { + usleep_range(1000, 2000); + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + return 0; +} + +static void mixer_disable_sync(struct mixer_context *ctx) +{ + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); +} + +static void mixer_enable_sync(struct mixer_context *ctx) +{ + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? - VP_SHADOW_UPDATE_ENABLE : 0); + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); } static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) @@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_regs_dump(ctx); } -static void mixer_layer_update(struct mixer_context *ctx) -{ - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { @@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); - /* layer update mandatory for mixer 16.0.33.0 */ - if (ctx->mxr_ver == MXR_VER_16_0_33_0 || - ctx->mxr_ver == MXR_VER_128_0_0_184) - mixer_layer_update(ctx); - spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); @@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - u32 val, base, shadow; + u32 val; spin_lock(&ctx->reg_slock); @@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && - vp_reg_read(ctx, VP_SHADOW_UPDATE)) - goto out; - - base = mixer_reg_read(ctx, MXR_CFG); - shadow = mixer_reg_read(ctx, MXR_CFG_S); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); - if (base != shadow) - goto out; - } + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) + && !mixer_is_synced(ctx)) + goto out; drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) { - struct mixer_context *mixer_ctx = crtc->ctx; + struct mixer_context *ctx = crtc->ctx; - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, false); + if (mixer_wait_for_sync(ctx)) + dev_err(ctx->dev, "timeout waiting for VSYNC\n"); + mixer_disable_sync(ctx); } static void mixer_update_plane(struct exynos_drm_crtc *crtc, @@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, true); + mixer_enable_sync(mixer_ctx); exynos_crtc_handle_event(crtc); } @@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) exynos_drm_pipe_clk_enable(crtc, true); - mixer_vsync_set_update(ctx, false); + mixer_disable_sync(ctx); mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_commit(ctx); - mixer_vsync_set_update(ctx, true); + mixer_enable_sync(ctx); set_bit(MXR_BIT_POWERED, &ctx->flags); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 35b4ec3f7618..3592d04c33b2 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, } if (index_mode) { - if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { + if (guest_gma >= I915_GTT_PAGE_SIZE) { ret = -EFAULT; goto err; } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 035479e273be..e3f9caa7839f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) /** * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU * @vgpu: a vGPU - * @conncted: link state + * @connected: link state * * This function is used to trigger hotplug interrupt for vGPU * diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..69a9a1b2ea4a 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_vgpu_primary_plane_format p; struct intel_vgpu_cursor_plane_format c; - int ret; + int ret, tile_height = 1; if (plane_id == DRM_PLANE_TYPE_PRIMARY) { ret = intel_vgpu_decode_primary_plane(vgpu, &p); @@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev, break; case PLANE_CTL_TILED_X: info->drm_format_mod = I915_FORMAT_MOD_X_TILED; + tile_height = 8; break; case PLANE_CTL_TILED_Y: info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; + tile_height = 32; break; case PLANE_CTL_TILED_YF: info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; + tile_height = 32; break; default: gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); } - - info->size = (((p.stride * p.height * p.bpp) / 8) + - (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { ret = intel_vgpu_decode_cursor_plane(vgpu, &c); if (ret) @@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->x_hot = UINT_MAX; info->y_hot = UINT_MAX; } - - info->size = (((info->stride * c.height * c.bpp) / 8) - + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else { gvt_vgpu_err("invalid plane id:%d\n", plane_id); return -EINVAL; } + info->size = (info->stride * roundup(info->height, tile_height) + + PAGE_SIZE - 1) >> PAGE_SHIFT; if (info->size == 0) { gvt_vgpu_err("fb size is zero\n"); return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index c7103dd2d8d5..9814773882ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) { - struct intel_vgpu_ppgtt_spt *spt; + struct intel_vgpu_ppgtt_spt *spt, *spn; struct radix_tree_iter iter; - void **slot; + LIST_HEAD(all_spt); + void __rcu **slot; + rcu_read_lock(); radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { spt = radix_tree_deref_slot(slot); - ppgtt_free_spt(spt); + list_move(&spt->post_shadow_list, &all_spt); } + rcu_read_unlock(); + + list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) + ppgtt_free_spt(spt); } static int ppgtt_handle_guest_write_page_table_bytes( @@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, } list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); + + mutex_lock(&gvt->gtt.ppgtt_mm_lock); list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); + return mm; } @@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) */ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) { - atomic_dec(&mm->pincount); + atomic_dec_if_positive(&mm->pincount); } /** @@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) if (ret) return ret; + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); list_move_tail(&mm->ppgtt_mm.lru_list, &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); - + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); } return 0; @@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) struct intel_vgpu_mm *mm; struct list_head *pos, *n; + mutex_lock(&gvt->gtt.ppgtt_mm_lock); + list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); @@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) continue; list_del_init(&mm->ppgtt_mm.lru_list); + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); invalidate_ppgtt_mm(mm); return 1; } + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); return 0; } @@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) } } INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); + mutex_init(&gvt->gtt.ppgtt_mm_lock); return 0; } @@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); if (mm->type == INTEL_GVT_MM_PPGTT) { + mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); list_del_init(&mm->ppgtt_mm.lru_list); + mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); if (mm->ppgtt_mm.shadowed) invalidate_ppgtt_mm(mm); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d8cb04cc946d..edb610dc5d86 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -88,6 +88,7 @@ struct intel_gvt_gtt { void (*mm_free_page_table)(struct intel_vgpu_mm *mm); struct list_head oos_page_use_list_head; struct list_head oos_page_free_list_head; + struct mutex ppgtt_mm_lock; struct list_head ppgtt_mm_lru_list_head; struct page *scratch_page; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d5fcc447d22f..a68addf95c23 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, void *buf, unsigned long count, bool is_write) { - void *aperture_va; + void __iomem *aperture_va; if (!intel_vgpu_in_aperture(vgpu, off) || !intel_vgpu_in_aperture(vgpu, off + count)) { @@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return -EIO; if (is_write) - memcpy(aperture_va + offset_in_page(off), buf, count); + memcpy_toio(aperture_va + offset_in_page(off), buf, count); else - memcpy(buf, aperture_va + offset_in_page(off), count); + memcpy_fromio(buf, aperture_va + offset_in_page(off), count); io_mapping_unmap(aperture_va); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 7d84cfb9051a..7902fb162d09 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ + {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1bb8f936fdaa..05b953793316 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, int i = 0; if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) - return -1; + return -EINVAL; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; @@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (workload->shadow) return 0; - ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); - if (ret < 0) { - gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); - return ret; - } - /* pin shadow context by gvt even the shadow context will be pinned * when i915 alloc request. That is because gvt will update the guest * context from shadow context when workload is completed, and at that @@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); + ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); + if (ret < 0) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + goto err_req; + } + ret = intel_gvt_workload_req_alloc(workload); if (ret) goto err_req; @@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ret = prepare_workload(workload); out: + if (ret) { + /* We might still need to add request with + * clean ctx to retire it properly.. + */ + rq = fetch_and_zero(&workload->req); + i915_request_put(rq); + } + if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); @@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload( goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + if (!scheduler->current_vgpu->active || + list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) goto out; /* @@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_runtime_pm_put_unchecked(dev_priv); } - if (ret && (vgpu_is_vm_unhealthy(ret))) { - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + if (ret) { + if (vgpu_is_vm_unhealthy(ret)) + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); intel_vgpu_destroy_workload(workload); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0bd890c04fe4..f6f6e5b78e97 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); if (ret) { - ret = -EINTR; + if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { + try_again = true; + continue; + } break; } crtc = connector->state->crtc; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9adc7bb9e69c..a67a63b5aa84 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void) INTEL_DEVID(dev_priv) == 0x5915 || \ INTEL_DEVID(dev_priv) == 0x591E) #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ - INTEL_DEVID(dev_priv) == 0x87C0) + INTEL_DEVID(dev_priv) == 0x87C0 || \ + INTEL_DEVID(dev_priv) == 0x87CA) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ INTEL_INFO(dev_priv)->gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 30d516e975c6..8558e81fdc2a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * pages from. */ if (!obj->base.filp) { - i915_gem_object_put(obj); - return -ENXIO; + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, @@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - i915_gem_object_put(obj); - return -EINTR; + addr = -EINTR; + goto err; } vma = find_vma(mm, addr); if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) @@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, i915_gem_object_put(obj); args->addr_ptr = (u64)addr; - return 0; err: i915_gem_object_put(obj); - return addr; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9a65341fec09..aa6791255252 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) i915_error_generate_code(error, engines)); if (engines) { /* Just show the first executing process, more is confusing */ - i = ffs(engines); + i = __ffs(engines); len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 638a586469f9..047855dd8c6b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2863,7 +2863,7 @@ enum i915_power_well_id { #define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) #define GEN11_GT_VDBOX_DISABLE_MASK 0xff #define GEN11_GT_VEBOX_DISABLE_SHIFT 16 -#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) +#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) #define GEN11_EU_DISABLE _MMIO(0x9134) #define GEN11_EU_DIS_MASK 0xFF @@ -9243,7 +9243,7 @@ enum skl_power_gate { #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ _TRANS_DDI_FUNC_CTL2_A) #define PORT_SYNC_MODE_ENABLE (1 << 4) -#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) +#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0) #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 73a7bee24a66..641e0778fa9c 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) } } +static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, + struct intel_dsi *intel_dsi) +{ + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) { + WARN_ON(intel_dsi->io_wakeref[port]); + intel_dsi->io_wakeref[port] = + intel_display_power_get(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); + } +} + static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); } - for_each_dsi_port(port, intel_dsi->ports) { - intel_dsi->io_wakeref[port] = - intel_display_power_get(dev_priv, - port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); - } + get_dsi_io_power_domains(dev_priv, intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) @@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); } I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + for_each_dsi_port(port, intel_dsi->ports) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + POSTING_READ(DPCLKA_CFGCR0_ICL); mutex_unlock(&dev_priv->dpll_lock); @@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) DRM_ERROR("DDI port:%c buffer not idle\n", port_name(port)); } - gen11_dsi_ungate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) @@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, return 0; } -static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) +static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u64 domains = 0; - enum port port; - - for_each_dsi_port(port, intel_dsi->ports) - if (port == PORT_A) - domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO); - else - domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO); - - return domains; + get_dsi_io_power_domains(to_i915(encoder->base.dev), + enc_to_intel_dsi(&encoder->base)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b508d8a735e0..4364f42cac6b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_hdmi = info->supports_dvi; info->supports_dp = (port != PORT_E); + info->supports_edp = (port == PORT_A); } } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 14d580cdefd3..98cea1f4b3bf 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) intel_aux_power_domain(dig_port); } -static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) +static void intel_ddi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; - u64 domains; /* * TODO: Add support for MST encoders. Atm, the following should never @@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, * hook. */ if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) - return 0; + return; dig_port = enc_to_dig_port(&encoder->base); - domains = BIT_ULL(dig_port->ddi_io_power_domain); + intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC @@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, */ if (intel_crtc_has_dp_encoder(crtc_state) || intel_port_is_tc(dev_priv, encoder->port)) - domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); /* * VDSC power is needed when DSC is enabled */ if (crtc_state->dsc_params.compression_enable) - domains |= BIT_ULL(intel_dsc_power_domain(crtc_state)); - - return domains; + intel_display_power_get(dev_priv, + intel_dsc_power_domain(crtc_state)); } void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) return; } /* - * DSI ports should have their DDI clock ungated when disabled - * and gated when enabled. + * For DSI we keep the ddi clocks gated + * except during enable/disable sequence. */ - ddi_clk_needed = !encoder->base.crtc; + ddi_clk_needed = false; } val = I915_READ(DPCLKA_CFGCR0_ICL); @@ -3863,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); else ret = intel_dp_compute_config(encoder, pipe_config, conn_state); + if (ret) + return ret; - if (IS_GEN9_LP(dev_priv) && ret) + if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); - return ret; + return 0; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ccb616351bba..421aac80a838 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) struct intel_encoder *encoder; for_each_intel_encoder(&dev_priv->drm, encoder) { - u64 get_domains; - enum intel_display_power_domain domain; struct intel_crtc_state *crtc_state; if (!encoder->get_power_domains) @@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) continue; crtc_state = to_intel_crtc_state(encoder->base.crtc->state); - get_domains = encoder->get_power_domains(encoder, crtc_state); - for_each_power_domain(domain, get_domains) - intel_display_power_get(dev_priv, domain); + encoder->get_power_domains(encoder, crtc_state); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cf709835fb9a..48da4a969a0a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, return -EINVAL; } -/* Optimize link config in order: max bpp, min lanes, min clock */ -static int -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - const struct link_config_limits *limits) -{ - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int bpp, clock, lane_count; - int mode_rate, link_clock, link_avail; - - for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { - mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - bpp); - - for (lane_count = limits->min_lane_count; - lane_count <= limits->max_lane_count; - lane_count <<= 1) { - for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { - link_clock = intel_dp->common_rates[clock]; - link_avail = intel_dp_max_data_rate(link_clock, - lane_count); - - if (mode_rate <= link_avail) { - pipe_config->lane_count = lane_count; - pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = link_clock; - - return 0; - } - } - } - } - - return -EINVAL; -} - static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) { int i, num_bpc; @@ -1922,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, int pipe_bpp; int ret; + pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && + intel_dp_supports_fec(intel_dp, pipe_config); + if (!intel_dp_supports_dsc(intel_dp, pipe_config)) return -EINVAL; @@ -2031,15 +1998,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.min_bpp = 6 * 3; limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); - if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { + if (intel_dp_is_edp(intel_dp)) { /* * Use the maximum clock and number of lanes the eDP panel - * advertizes being capable of. The eDP 1.3 and earlier panels - * are generally designed to support only a single clock and - * lane configuration, and typically these values correspond to - * the native resolution of the panel. With eDP 1.4 rate select - * and DSC, this is decreasingly the case, and we need to be - * able to select less than maximum link config. + * advertizes being capable of. The panels are generally + * designed to support only a single clock and lane + * configuration, and typically these values correspond to the + * native resolution of the panel. */ limits.min_lane_count = limits.max_lane_count; limits.min_clock = limits.max_clock; @@ -2053,22 +2018,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp->common_rates[limits.max_clock], limits.max_bpp, adjusted_mode->crtc_clock); - if (intel_dp_is_edp(intel_dp)) - /* - * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 - * section A.1: "It is recommended that the minimum number of - * lanes be used, using the minimum link rate allowed for that - * lane configuration." - * - * Note that we use the max clock and lane count for eDP 1.3 and - * earlier, and fast vs. wide is irrelevant. - */ - ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, - &limits); - else - /* Optimize for slow and wide. */ - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, - &limits); + /* + * Optimize for slow and wide. This is the place to add alternative + * optimization policy. + */ + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); /* enable compression if the mode doesn't fit available BW */ DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); @@ -2165,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return -EINVAL; - pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && - intel_dp_supports_fec(intel_dp, pipe_config); - ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 15db41394b9e..d5660ac1b0d6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -270,10 +270,12 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_state *pipe_config); - /* Returns a mask of power domains that need to be referenced as part - * of the hardware state readout code. */ - u64 (*get_power_domains)(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state); + /* + * Acquires the power domains needed for an active encoder during + * hardware state readout. + */ + void (*get_power_domains)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); /* * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index e8f694b57b8a..376ffe842e26 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); + unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); - unsigned long conn_configured, conn_seq; int i, j; bool *save_enabled; bool fallback = true, ret = true; @@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, drm_modeset_backoff(&ctx); memcpy(save_enabled, enabled, count); - conn_seq = GENMASK(count - 1, 0); + mask = GENMASK(count - 1, 0); conn_configured = 0; retry: + conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -372,8 +373,7 @@ retry: if (conn_configured & BIT(i)) continue; - /* First pass, only consider tiled connectors */ - if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) + if (conn_seq == 0 && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -477,10 +477,8 @@ retry: conn_configured |= BIT(i); } - if (conn_configured != conn_seq) { /* repeat until no more are found */ - conn_seq = conn_configured; + if ((conn_configured & mask) != mask && conn_configured != conn_seq) goto retry; - } /* * If the BIOS didn't enable everything it could, fall back to have the diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 32dce7176f63..b9b0ea4e2404 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg) struct i915_gem_context *ctx; ctx = live_context(i915, file); - if (!ctx) + if (IS_ERR(ctx)) break; /* We will need some GGTT space for the rq's context */ diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 6403728fe778..31c93c3ccd00 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->sb_lock); } +static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp; + + tmp = I915_READ(PIPEMISC(crtc->pipe)); + + switch (tmp & PIPEMISC_DITHER_BPC_MASK) { + case PIPEMISC_DITHER_6_BPC: + return 18; + case PIPEMISC_DITHER_8_BPC: + return 24; + case PIPEMISC_DITHER_10_BPC: + return 30; + case PIPEMISC_DITHER_12_BPC: + return 36; + default: + MISSING_CASE(tmp); + return 0; + } +} + static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); + pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + /* Enable Frame time stamo based scanline reporting */ adjusted_mode->private_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index ec3602ebbc1c..54011df8c2e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, if (disable_partial) ipu_plane_disable(ipu_crtc->plane[1], true); if (disable_full) - ipu_plane_disable(ipu_crtc->plane[0], false); + ipu_plane_disable(ipu_crtc->plane[0], true); } static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 22e68a100e7b..5d333138f913 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock) static unsigned int mt2701_calculate_factor(int clock) { if (clock <= 64000) - return 16; - else if (clock <= 128000) - return 8; - else if (clock <= 256000) return 4; - else + else if (clock <= 128000) return 2; + else + return 1; } static const struct mtk_dpi_conf mt8173_conf = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index cf59ea9bccfd..57ce4708ef1b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -15,6 +15,7 @@ #include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_of.h> @@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = { .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, + .gem_prime_vmap = mtk_drm_gem_prime_vmap, + .gem_prime_vunmap = mtk_drm_gem_prime_vunmap, .fops = &mtk_drm_fops, .name = DRIVER_NAME, @@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; + ret = drm_fbdev_generic_setup(drm, 32); + if (ret) + DRM_ERROR("Failed to initialize fbdev: %d\n", ret); + return 0; err_deinit: diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 259b7b0de1d2..38483e9ee071 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -241,3 +241,49 @@ err_gem_free: kfree(mtk_gem); return ERR_PTR(ret); } + +void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct sg_table *sgt; + struct sg_page_iter iter; + unsigned int npages; + unsigned int i = 0; + + if (mtk_gem->kvaddr) + return mtk_gem->kvaddr; + + sgt = mtk_gem_prime_get_sg_table(obj); + if (IS_ERR(sgt)) + return NULL; + + npages = obj->size >> PAGE_SHIFT; + mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); + if (!mtk_gem->pages) + goto out; + + for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { + mtk_gem->pages[i++] = sg_page_iter_page(&iter); + if (i > npages) + break; + } + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + +out: + kfree((void *)sgt); + + return mtk_gem->kvaddr; +} + +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + + if (!mtk_gem->pages) + return; + + vunmap(vaddr); + mtk_gem->kvaddr = 0; + kfree((void *)mtk_gem->pages); +} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 534639b43a1c..c047a7ef294f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -37,6 +37,7 @@ struct mtk_drm_gem_obj { dma_addr_t dma_addr; unsigned long dma_attrs; struct sg_table *sg; + struct page **pages; }; #define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base) @@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); +void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 915cc84621ae..e04e6c293d39 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, if (IS_ERR(regmap)) ret = PTR_ERR(regmap); if (ret) { - ret = PTR_ERR(regmap); dev_err(dev, "Failed to get system configuration registers: %d\n", ret); @@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(remote); hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); return -EINVAL; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c index 4ef9c57ffd44..5223498502c4 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c @@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = { .owner = THIS_MODULE, }; -long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - hdmi_phy->pll_rate = rate; - if (rate <= 74250000) - *parent_rate = rate; - else - *parent_rate = rate / 2; - - return rate; -} - -unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - return hdmi_phy->pll_rate; -} - void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, u32 bits) { @@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) return NULL; } -static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy, - const struct clk_ops **ops) +static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, + struct clk_init_data *clk_init) { - if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops) - *ops = hdmi_phy->conf->hdmi_phy_clk_ops; - else - dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n"); + clk_init->flags = hdmi_phy->conf->flags; + clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops; } static int mtk_hdmi_phy_probe(struct platform_device *pdev) @@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) struct clk_init_data clk_init = { .num_parents = 1, .parent_names = (const char * const *)&ref_clk_name, - .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, }; struct phy *phy; @@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) hdmi_phy->dev = dev; hdmi_phy->conf = (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev); - mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops); + mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init); hdmi_phy->pll_hw.init = &clk_init; hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); if (IS_ERR(hdmi_phy->pll)) { diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h index f39b1fc66612..2d8b3182470d 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h @@ -21,6 +21,7 @@ struct mtk_hdmi_phy; struct mtk_hdmi_phy_conf { bool tz_disabled; + unsigned long flags; const struct clk_ops *hdmi_phy_clk_ops; void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); @@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, u32 val, u32 mask); struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); -long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate); -unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate); extern struct platform_driver mtk_hdmi_phy_driver; extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c index fcc42dc6ea7f..d3cc4022e988 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c @@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw) mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); usleep_range(80, 100); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); @@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); usleep_range(80, 100); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); @@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) usleep_range(80, 100); } +static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + return rate; +} + static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (rate <= 64000000) pos_div = 3; - else if (rate <= 12800000) - pos_div = 1; + else if (rate <= 128000000) + pos_div = 2; else pos_div = 1; mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC), RG_HTPLL_IC_MASK); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR), @@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + unsigned long out_rate, val; + + val = (readl(hdmi_phy->regs + HDMI_CON6) + & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV; + switch (val) { + case 0x00: + out_rate = parent_rate; + break; + case 0x01: + out_rate = parent_rate / 2; + break; + default: + out_rate = parent_rate / 4; + break; + } + + val = (readl(hdmi_phy->regs + HDMI_CON6) + & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV; + out_rate *= (val + 1) * 2; + val = (readl(hdmi_phy->regs + HDMI_CON2) + & RG_HDMITX_TX_POSDIV_MASK); + out_rate >>= (val >> RG_HDMITX_TX_POSDIV); + + if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV) + out_rate /= 5; + + return out_rate; +} + static const struct clk_ops mtk_hdmi_phy_pll_ops = { .prepare = mtk_hdmi_pll_prepare, .unprepare = mtk_hdmi_pll_unprepare, @@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); usleep_range(80, 100); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); @@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); usleep_range(80, 100); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); @@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { .tz_disabled = true, + .flags = CLK_SET_RATE_GATE, .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index ed5916b27658..47f8a2951682 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) usleep_range(100, 150); } +static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + hdmi_phy->pll_rate = rate; + if (rate <= 74250000) + *parent_rate = rate; + else + *parent_rate = rate / 2; + + return rate; +} + static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + return hdmi_phy->pll_rate; +} + static const struct clk_ops mtk_hdmi_phy_pll_ops = { .prepare = mtk_hdmi_pll_prepare, .unprepare = mtk_hdmi_pll_unprepare, @@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) } struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = { + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2281ed3eb774..8a4ebcb6405c 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ret = drm_dev_register(drm, 0); if (ret) - goto free_drm; + goto uninstall_irq; drm_fbdev_generic_setup(drm, 32); return 0; +uninstall_irq: + drm_irq_uninstall(drm); free_drm: drm_dev_put(drm); @@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev) static void meson_drv_unbind(struct device *dev) { - struct drm_device *drm = dev_get_drvdata(dev); - struct meson_drm *priv = drm->dev_private; + struct meson_drm *priv = dev_get_drvdata(dev); + struct drm_device *drm = priv->drm; if (priv->canvas) { meson_canvas_free(priv->canvas, priv->canvas_id_osd1); @@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev) } drm_dev_unregister(drm); + drm_irq_uninstall(drm); drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); drm_dev_put(drm); diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index e28814f4ea6c..563953ec6ad0 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector, DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); /* If sink max TMDS clock, we reject the mode */ - if (mode->clock > connector->display_info.max_tmds_clock) + if (connector->display_info.max_tmds_clock && + mode->clock > connector->display_info.max_tmds_clock) return MODE_BAD; /* Check against non-VIC supported modes */ diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 88a52f6b39fe..7dfbbbc1beea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (IS_ERR_VALUE(ret) && ret != -EACCES) + if (ret < 0 && ret != -EACCES) return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index aa9fec80492d..40c47d6a7d78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -100,12 +100,10 @@ static void nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) { struct nouveau_dmem_chunk *chunk; - struct nouveau_drm *drm; unsigned long idx; chunk = (void *)hmm_devmem_page_get_drvdata(page); idx = page_to_pfn(page) - chunk->pfn_first; - drm = chunk->drm; /* * FIXME: @@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm) /* FIXME handle pin failure */ WARN_ON(ret); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); - /* FIXME handle pin failure */ - WARN_ON(ret); - } mutex_unlock(&drm->dmem->mutex); } @@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm) list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { nouveau_bo_unpin(chunk->bo); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - nouveau_bo_unpin(chunk->bo); - } mutex_unlock(&drm->dmem->mutex); } @@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) */ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device, size); - if (drm->dmem->devmem == NULL) { + if (IS_ERR(drm->dmem->devmem)) { kfree(drm->dmem); drm->dmem = NULL; return; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 340383150fb9..ebf9c96d43ee 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3); hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE); hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE); + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); hdmi4_core_disable(core); return 0; } @@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) if (err) return err; + /* + * Initialize CEC clock divider: CEC needs 2MHz clock hence + * set the divider to 24 to get 48/24=2MHz clock + */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Clear TX FIFO */ if (!hdmi_cec_clear_tx_fifo(adap)) { pr_err("cec-%s: could not clear TX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear RX FIFO */ if (!hdmi_cec_clear_rx_fifo(adap)) { pr_err("cec-%s: could not clear RX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear CEC interrupts */ @@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp); } return 0; + +err_disable_clk: + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); + hdmi4_core_disable(core); + + return err; } static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) @@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core, return ret; core->wp = wp; - /* - * Initialize CEC clock divider: CEC needs 2MHz clock hence - * set the devider to 24 to get 48/24=2MHz clock - */ - REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Disable clock initially, hdmi_cec_adap_enable() manages it */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); ret = cec_register_adapter(core->adap, &pdev->dev); if (ret < 0) { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 813ba42f2753..e384b95ad857 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, else acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; /* - * The I2S input word length is twice the lenght given in the IEC-60958 + * The I2S input word length is twice the length given in the IEC-60958 * status word. If the word size is greater than * 20 bits, increment by one. */ diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 578d867a81d5..f33e349c4ec5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -255,10 +255,14 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = qxl_gem_prime_pin, .gem_prime_unpin = qxl_gem_prime_unpin, + .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, + .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 8b448eca1cd9..114653b471c6 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) qxl_bo_unpin(bo); } +struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *qxl_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + return ERR_PTR(-ENOSYS); +} + void *qxl_gem_prime_vmap(struct drm_gem_object *obj) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index c7d4c6073ea5..0d4ade9d4722 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop) clk_disable(vop->hclk); } +static void vop_win_disable(struct vop *vop, const struct vop_win_data *win) +{ + if (win->phy->scl && win->phy->scl->ext) { + VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE); + } + + VOP_WIN_SET(vop, win, enable, 0); +} + static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); @@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc) struct vop_win *vop_win = &vop->win[i]; const struct vop_win_data *win = vop_win->data; - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); } spin_unlock(&vop->reg_lock); @@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, spin_lock(&vop->reg_lock); - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); spin_unlock(&vop->reg_lock); } @@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop) int channel = i * 2 + 1; VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); VOP_WIN_SET(vop, win, gate, 1); } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 19fc601c9eeb..a1bec2779e76 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) EXPORT_SYMBOL(drm_sched_increase_karma); /** - * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job + * drm_sched_stop - stop the scheduler * * @sched: scheduler instance - * @bad: bad scheduler job * */ void drm_sched_stop(struct drm_gpu_scheduler *sched) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 3ebd9f5e2719..29258b404e54 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -16,6 +16,7 @@ #include <linux/of_reserved_mem.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev) ret = -ENOMEM; goto free_drm; } + + dev_set_drvdata(dev, drm); drm->dev_private = drv; INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->engine_list); @@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev) drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); + + component_unbind_all(dev, NULL); of_reserved_mem_device_release(dev); + drm_dev_put(drm); } @@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev) static int sun4i_drv_remove(struct platform_device *pdev) { + component_master_del(&pdev->dev, &sun4i_drv_master_ops); + return 0; } diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index dc47720c99ba..39d8509d96a0 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -48,8 +48,13 @@ static enum drm_mode_status sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, const struct drm_display_mode *mode) { - /* This is max for HDMI 2.0b (4K@60Hz) */ - if (mode->clock > 594000) + /* + * Controller support maximum of 594 MHz, which correlates to + * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than + * 340 MHz scrambling has to be enabled. Because scrambling is + * not yet implemented, just limit to 340 MHz for now. + */ + if (mode->clock > 340000) return MODE_CLOCK_HIGH; return MODE_OK; diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index fc36e0c10a37..b1e7c76e9c17 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, err_unregister_gates: for (i = 0; i < CLK_NUM; i++) - if (clk_data->hws[i]) + if (!IS_ERR_OR_NULL(clk_data->hws[i])) clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); err_assert_reset: @@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master, of_clk_del_provider(dev->of_node); for (i = 0; i < CLK_NUM; i++) - clk_hw_unregister_gate(clk_data->hws[i]); + if (clk_data->hws[i]) + clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); reset_control_assert(tcon_top->rst); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 47c55974756d..d23c4bfde790 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) hdmi->dvi = !tegra_output_is_hdmi(output); if (!hdmi->dvi) { - err = tegra_hdmi_setup_audio(hdmi); - if (err < 0) - hdmi->dvi = true; + /* + * Make sure that the audio format has been configured before + * enabling audio, otherwise we may try to divide by zero. + */ + if (hdmi->format.sample_rate > 0) { + err = tegra_hdmi_setup_audio(hdmi); + if (err < 0) + hdmi->dvi = true; + } } if (hdmi->config->has_hda) diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index ba9b3cfb8c3d..b3436c2aed68 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct tegra_dc *dc = to_tegra_dc(old_state->crtc); struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc *dc; u32 value; /* rien ne va plus */ if (!old_state || !old_state->crtc) return; + dc = to_tegra_dc(old_state->crtc); + /* * XXX Legacy helpers seem to sometimes call ->atomic_disable() even * on planes that are already disabled. Make sure we fallback to the diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 39bfed9623de..982ce37ecde1 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic) if (vic->booted) return 0; +#ifdef CONFIG_IOMMU_API if (vic->config->supports_sid) { struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); u32 value; @@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic) vic_writel(vic, value, VIC_THI_STREAMID1); } } +#endif /* setup clockgating registers */ vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3f56647cdb35..1a01669b159a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); * ttm_global_mutex - protecting the global BO state */ DEFINE_MUTEX(ttm_global_mutex); -struct ttm_bo_global ttm_bo_glob = { - .use_count = 0 -}; +unsigned ttm_bo_glob_use_count; +struct ttm_bo_global ttm_bo_glob; static struct attribute ttm_bo_count = { .name = "bo_count", @@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, reservation_object_add_shared_fence(bo->resv, fence); ret = reservation_object_reserve_shared(bo->resv, 1); - if (unlikely(ret)) + if (unlikely(ret)) { + dma_fence_put(fence); return ret; + } dma_fence_put(bo->moving); bo->moving = fence; @@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void) struct ttm_bo_global *glob = &ttm_bo_glob; mutex_lock(&ttm_global_mutex); - if (--glob->use_count > 0) + if (--ttm_bo_glob_use_count > 0) goto out; kobject_del(&glob->kobj); kobject_put(&glob->kobj); ttm_mem_global_release(&ttm_mem_glob); + memset(glob, 0, sizeof(*glob)); out: mutex_unlock(&ttm_global_mutex); } @@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void) unsigned i; mutex_lock(&ttm_global_mutex); - if (++glob->use_count > 1) + if (++ttm_bo_glob_use_count > 1) goto out; ret = ttm_mem_global_init(&ttm_mem_glob); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index f1567c353b54..9a0909decb36 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -461,8 +461,8 @@ out_no_zone: void ttm_mem_global_release(struct ttm_mem_global *glob) { - unsigned int i; struct ttm_mem_zone *zone; + unsigned int i; /* let the page allocator first stop the shrink work. */ ttm_page_alloc_fini(); @@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) zone = glob->zones[i]; kobject_del(&zone->kobj); kobject_put(&zone->kobj); - } + } kobject_del(&glob->kobj); kobject_put(&glob->kobj); + memset(glob, 0, sizeof(*glob)); } static void ttm_check_swapping(struct ttm_mem_global *glob) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f841accc2c00..627f8dc91d0e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(flags & TTM_PAGE_FLAG_DMA32)) { - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + if (!(flags & TTM_PAGE_FLAG_DMA32) && + (npages - i) >= HPAGE_PMD_NR) { + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j == HPAGE_PMD_NR) @@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); - while (i < npages) { + while ((npages - i) >= HPAGE_PMD_NR) { struct page *p = pages[i]; unsigned j; if (!p) break; - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j != HPAGE_PMD_NR) diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 66885c24590f..c1bd5e3d9e4a 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -18,18 +18,19 @@ #include "udl_connector.h" #include "udl_drv.h" -static bool udl_get_edid_block(struct udl_device *udl, int block_idx, - u8 *buff) +static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, + size_t len) { int ret, i; u8 *read_buff; + struct udl_device *udl = data; read_buff = kmalloc(2, GFP_KERNEL); if (!read_buff) - return false; + return -1; - for (i = 0; i < EDID_LENGTH; i++) { - int bval = (i + block_idx * EDID_LENGTH) << 8; + for (i = 0; i < len; i++) { + int bval = (i + block * EDID_LENGTH) << 8; ret = usb_control_msg(udl->udev, usb_rcvctrlpipe(udl->udev, 0), (0x02), (0x80 | (0x02 << 5)), bval, @@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx, if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); - return false; + return -1; } - buff[i] = read_buff[1]; + buf[i] = read_buff[1]; } kfree(read_buff); - return true; -} - -static bool udl_get_edid(struct udl_device *udl, u8 **result_buff, - int *result_buff_size) -{ - int i, extensions; - u8 *block_buff = NULL, *buff_ptr; - - block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (block_buff == NULL) - return false; - - if (udl_get_edid_block(udl, 0, block_buff) && - memchr_inv(block_buff, 0, EDID_LENGTH)) { - extensions = ((struct edid *)block_buff)->extensions; - if (extensions > 0) { - /* we have to read all extensions one by one */ - *result_buff_size = EDID_LENGTH * (extensions + 1); - *result_buff = kmalloc(*result_buff_size, GFP_KERNEL); - buff_ptr = *result_buff; - if (buff_ptr == NULL) { - kfree(block_buff); - return false; - } - memcpy(buff_ptr, block_buff, EDID_LENGTH); - kfree(block_buff); - buff_ptr += EDID_LENGTH; - for (i = 1; i < extensions; ++i) { - if (udl_get_edid_block(udl, i, buff_ptr)) { - buff_ptr += EDID_LENGTH; - } else { - kfree(*result_buff); - *result_buff = NULL; - return false; - } - } - return true; - } - /* we have only base edid block */ - *result_buff = block_buff; - *result_buff_size = EDID_LENGTH; - return true; - } - - kfree(block_buff); - - return false; + return 0; } static int udl_get_modes(struct drm_connector *connector) @@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector, static enum drm_connector_status udl_detect(struct drm_connector *connector, bool force) { - u8 *edid_buff = NULL; - int edid_buff_size = 0; struct udl_device *udl = connector->dev->dev_private; struct udl_drm_connector *udl_connector = container_of(connector, @@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force) udl_connector->edid = NULL; } - - if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) + udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl); + if (!udl_connector->edid) return connector_status_disconnected; - udl_connector->edid = (struct edid *)edid_buff; - return connector_status_connected; } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 22cd2d13e272..ff47f890e6ad 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -52,6 +52,7 @@ static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = udl_driver_load, .unload = udl_driver_unload, + .release = udl_driver_release, /* gem hooks */ .gem_free_object_unlocked = udl_gem_free_object, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index e9e9b1ff678e..4ae67d882eae 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb); int udl_driver_load(struct drm_device *dev, unsigned long flags); void udl_driver_unload(struct drm_device *dev); +void udl_driver_release(struct drm_device *dev); int udl_fbdev_init(struct drm_device *dev); void udl_fbdev_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index d5a23295dd80..bb7b58407039 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_put(&gobj->base); + drm_gem_object_put_unlocked(&gobj->base); unlock: mutex_unlock(&udl->gem_lock); return ret; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 9086d0d1b880..1f8ef34ade24 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev) udl_free_urb_list(dev); udl_fbdev_cleanup(dev); - udl_modeset_cleanup(dev); kfree(udl); } + +void udl_driver_release(struct drm_device *dev) +{ + udl_modeset_cleanup(dev); + drm_dev_fini(dev); + kfree(dev); +} diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 730008d3da76..1baa10e94484 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -1042,7 +1042,7 @@ static void vc4_crtc_reset(struct drm_crtc *crtc) { if (crtc->state) - __drm_atomic_helper_crtc_destroy_state(crtc->state); + vc4_crtc_destroy_state(crtc, crtc->state); crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); if (crtc->state) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 5930facd6d2d..11a8f99ba18c 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, ret = drm_gem_handle_create(file, &obj->base, handle); drm_gem_object_put_unlocked(&obj->base); if (ret) - goto err; + return ERR_PTR(ret); return &obj->base; - -err: - __vgem_gem_destroy(obj); - return ERR_PTR(ret); } static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b996ac1d4fcc..af92964b6889 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -205,10 +205,14 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = virtgpu_gem_prime_pin, .gem_prime_unpin = virtgpu_gem_prime_unpin, + .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, + .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3238fdf58eb4..d577cb76f5ad 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ int virtgpu_gem_prime_pin(struct drm_gem_object *obj); void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *sgt); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index c59ec34c80a5..eb51a78e1199 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) WARN_ONCE(1, "not implemented"); } +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + return ERR_PTR(-ENODEV); +} + +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + return ERR_PTR(-ENODEV); +} + void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 138b0bb325cf..69048e73377d 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, ret = drm_gem_handle_create(file, &obj->gem, handle); drm_gem_object_put_unlocked(&obj->gem); - if (ret) { - drm_gem_object_release(&obj->gem); - kfree(obj); + if (ret) return ERR_PTR(ret); - } return &obj->gem; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6165fe2c4504..1bfa353d995c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -546,29 +546,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) } /** - * vmw_assume_iommu - Figure out whether coherent dma-remapping might be - * taking place. - * @dev: Pointer to the struct drm_device. - * - * Return: true if iommu present, false otherwise. - */ -static bool vmw_assume_iommu(struct drm_device *dev) -{ - const struct dma_map_ops *ops = get_dma_ops(dev->dev); - - return !dma_is_direct(ops) && ops && - ops->map_page != dma_direct_map_page; -} - -/** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. * * @dev_priv: Pointer to a struct vmw_private * - * This functions tries to determine the IOMMU setup and what actions - * need to be taken by the driver to make system pages visible to the - * device. + * This functions tries to determine what actions need to be taken by the + * driver to make system pages visible to the device. * If this function decides that DMA is not possible, it returns -EINVAL. * The driver may then try to disable features of the device that require * DMA. @@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) static const char *names[vmw_dma_map_max] = { [vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", - [vmw_dma_map_populate] = "Keeping DMA mappings.", + [vmw_dma_map_populate] = "Caching DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; if (vmw_force_coherent) dev_priv->map_mode = vmw_dma_alloc_coherent; - else if (vmw_assume_iommu(dev_priv->dev)) - dev_priv->map_mode = vmw_dma_map_populate; - else if (!vmw_force_iommu) - dev_priv->map_mode = vmw_dma_phys; - else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) - dev_priv->map_mode = vmw_dma_alloc_coherent; + else if (vmw_restrict_iommu) + dev_priv->map_mode = vmw_dma_map_bind; else dev_priv->map_mode = vmw_dma_map_populate; - if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) - dev_priv->map_mode = vmw_dma_map_bind; - /* No TTM coherent page pool? FIXME: Ask TTM instead! */ if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && (dev_priv->map_mode == vmw_dma_alloc_coherent)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b913a56f3426..2a9112515f46 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; - struct drm_display_mode *old_mode; struct drm_display_mode *mode; int ret; - old_mode = par->set_mode; mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->vdisplay = var->yres; vmw_guess_mode_timing(mode); - if (old_mode && drm_mode_equal(old_mode, mode)) { - drm_mode_destroy(vmw_priv->dev, mode); - mode = old_mode; - old_mode = NULL; - } else if (!vmw_kms_validate_mode_vram(vmw_priv, + if (!vmw_kms_validate_mode_vram(vmw_priv, mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info) schedule_delayed_work(&par->local_work, 0); out_unlock: - if (old_mode) - drm_mode_destroy(vmw_priv->dev, old_mode); + if (par->set_mode) + drm_mode_destroy(vmw_priv->dev, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b93c558dd86e..7da752ca1c34 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return id; + return (id != -ENOMEM ? 0 : id); spin_lock(&gman->lock); diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 27101c04a827..0c0eb43abf65 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job) static void host1x_channel_set_streamid(struct host1x_channel *channel) { #if HOST1X_HW >= 6 + u32 sid = 0x7f; +#ifdef CONFIG_IOMMU_API struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent); - u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f; + if (spec) + sid = spec->ids[0] & 0xffff; +#endif host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID); #endif diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c index 9b2b3fa479c4..5e44ff1f2085 100644 --- a/drivers/gpu/ipu-v3/ipu-dp.c +++ b/drivers/gpu/ipu-v3/ipu-dp.c @@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp, ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, DP_COM_CONF_CSC_DEF_BOTH); } else { - if (flow->foreground.in_cs == flow->out_cs) + if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN || + flow->foreground.in_cs == flow->out_cs) /* * foreground identical to output, apply color * conversion on background @@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) struct ipu_dp_priv *priv = flow->priv; u32 reg, csc; + dp->in_cs = IPUV3_COLORSPACE_UNKNOWN; + if (!dp->foreground) return; @@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) reg = readl(flow->base + DP_COM_CONF); csc = reg & DP_COM_CONF_CSC_DEF_MASK; - if (csc == DP_COM_CONF_CSC_DEF_FG) - reg &= ~DP_COM_CONF_CSC_DEF_MASK; + reg &= ~DP_COM_CONF_CSC_DEF_MASK; + if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG) + reg |= DP_COM_CONF_CSC_DEF_BG; reg &= ~DP_COM_CONF_FG_EN; writel(reg, flow->base + DP_COM_CONF); @@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) mutex_init(&priv->mutex); for (i = 0; i < IPUV3_NUM_FLOWS; i++) { + priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN; + priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN; priv->flow[i].foreground.foreground = true; priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; priv->flow[i].priv = priv; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 6ca8d322b487..4ca0cdfa6b33 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -150,6 +150,7 @@ config HID_ASUS tristate "Asus" depends on LEDS_CLASS depends on ASUS_WMI || ASUS_WMI=n + select POWER_SUPPLY ---help--- Support for Asus notebook built-in keyboard and touchpad via i2c, and the Asus Republic of Gamers laptop keyboard special keys. diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9993b692598f..860e21ec6a49 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n) u32 hid_field_extract(const struct hid_device *hid, u8 *report, unsigned offset, unsigned n) { - if (n > 32) { - hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", + if (n > 256) { + hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n", n, current->comm); - n = 32; + n = 256; } return __extract(report, offset, n); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index ac9fda1b5a72..1384e57182af 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ + if (down_interruptible(&hdev->driver_input_lock)) + return 0; + hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); + up(&hdev->driver_input_lock); + return 0; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b6d93f4ad037..adce58f24f76 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1083,6 +1083,7 @@ #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b10b1922c5bd..b607286a0bc8 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ + switch (usage->hid & 0xf) { + case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; + default: goto ignore; + } + break; + } + /* * Some lazy vendors declare 255 usages for System Control, * leading to the creation of ABS_X|Y axis and too many others. @@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x06a: map_key_clear(KEY_GREEN); break; case 0x06b: map_key_clear(KEY_BLUE); break; case 0x06c: map_key_clear(KEY_YELLOW); break; - case 0x06d: map_key_clear(KEY_ZOOM); break; + case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break; case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; @@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; + case 0x079: map_key_clear(KEY_KBDILLUMUP); break; + case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break; + case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; case 0x083: map_key_clear(KEY_LAST); break; case 0x084: map_key_clear(KEY_ENTER); break; @@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x1b8: map_key_clear(KEY_VIDEO); break; case 0x1bc: map_key_clear(KEY_MESSENGER); break; case 0x1bd: map_key_clear(KEY_INFO); break; + case 0x1cb: map_key_clear(KEY_ASSISTANT); break; case 0x201: map_key_clear(KEY_NEW); break; case 0x202: map_key_clear(KEY_OPEN); break; case 0x203: map_key_clear(KEY_CLOSE); break; @@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x22d: map_key_clear(KEY_ZOOMIN); break; case 0x22e: map_key_clear(KEY_ZOOMOUT); break; case 0x22f: map_key_clear(KEY_ZOOMRESET); break; + case 0x232: map_key_clear(KEY_FULL_SCREEN); break; case 0x233: map_key_clear(KEY_SCROLLUP); break; case 0x234: map_key_clear(KEY_SCROLLDOWN); break; case 0x238: /* AC Pan */ @@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; + case 0x29f: map_key_clear(KEY_SCALE); break; + default: map_key_clear(KEY_UNKNOWN); } break; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 15ed6177a7a3..199cc256e9d9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) kfree(data); return -ENOMEM; } + data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); + if (!data->wq) { + kfree(data->effect_ids); + kfree(data); + return -ENOMEM; + } + data->hidpp = hidpp; data->feature_index = feature_index; data->version = version; @@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) /* ignore boost value at response.fap.params[2] */ /* init the hardware command queue */ - data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); atomic_set(&data->workqueue_size, 0); /* initialize with zero autocenter to get wheel in usable state */ @@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) input_report_rel(mydata->input, REL_Y, v); v = hid_snto32(data[6], 8); - hidpp_scroll_counter_handle_scroll( - &hidpp->vertical_wheel_counter, v); + if (v != 0) + hidpp_scroll_counter_handle_scroll( + &hidpp->vertical_wheel_counter, v); input_sync(mydata->input); } diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 953908f2267c..77ffba48cc73 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = { { } }; -/** +/* * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer * * There are composite devices for which we want to ignore only a certain @@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev) if (hdev->product == 0x0401 && strncmp(hdev->name, "ELAN0800", 8) != 0) return true; + /* Same with product id 0x0400 */ + if (hdev->product == 0x0400 && + strncmp(hdev->name, "QTEC0001", 8) != 0) + return true; break; } @@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev) } if (bl_entry != NULL) - dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", + dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n", bl_entry->driver_data, bl_entry->vendor, bl_entry->product); @@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev) quirks |= bl_entry->driver_data; if (quirks) - dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", + dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n", quirks, hdev->vendor, hdev->product); return quirks; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8141cadfca0e..8dae0f9b819e 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam) static int steam_register(struct steam_device *steam) { int ret; + bool client_opened; /* * This function can be called several times in a row with the @@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam) * Unlikely, but getting the serial could fail, and it is not so * important, so make up a serial number and go on. */ + mutex_lock(&steam->mutex); if (steam_get_serial(steam) < 0) strlcpy(steam->serial_no, "XXXXXXXXXX", sizeof(steam->serial_no)); + mutex_unlock(&steam->mutex); hid_info(steam->hdev, "Steam Controller '%s' connected", steam->serial_no); @@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam) } mutex_lock(&steam->mutex); - if (!steam->client_opened) { + client_opened = steam->client_opened; + if (!client_opened) steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + if (!client_opened) ret = steam_input_register(steam); - } else { + else ret = 0; - } - mutex_unlock(&steam->mutex); return ret; } @@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; + unsigned long flags; + bool connected; + + spin_lock_irqsave(&steam->lock, flags); + connected = steam->connected; + spin_unlock_irqrestore(&steam->lock, flags); + mutex_lock(&steam->mutex); steam->client_opened = false; + if (connected) + steam_set_lizard_mode(steam, lizard_mode); mutex_unlock(&steam->mutex); - if (steam->connected) { - steam_set_lizard_mode(steam, lizard_mode); + if (connected) steam_input_register(steam); - } } static int steam_client_ll_raw_request(struct hid_device *hdev, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 7710d9f957da..0187c9f8fc22 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params, goto cleanup; } rc = usb_string(udev, 201, ver_ptr, ver_len); - if (ver_ptr == NULL) { - rc = -ENOMEM; - goto cleanup; - } if (rc == -EPIPE) { *ver_ptr = '\0'; } else if (rc < 0) { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 90164fed08d3..4d1f24ee249c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -184,6 +184,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_NO_RUNTIME_PM }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E, + I2C_HID_QUIRK_NO_RUNTIME_PM }, { 0, 0 } }; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6f929bfa9fcd..d0f1dfe2bcbb 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1759,6 +1759,7 @@ config SENSORS_VT8231 config SENSORS_W83773G tristate "Nuvoton W83773G" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the Nuvoton W83773G hardware monitoring chip. diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index e4f9f7ce92fa..f9abeeeead9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = { }; static const u32 ntc_temp_config[] = { - HWMON_T_INPUT, HWMON_T_TYPE, + HWMON_T_INPUT | HWMON_T_TYPE, 0 }; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index b91a80abf724..4679acb4918e 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ) s++; } } + + s = (sensors->power.num_sensors * 4) + 1; } else { for (i = 0; i < sensors->power.num_sensors; ++i) { s = i + 1; @@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ) show_power, NULL, 3, i); attr++; } - } - if (sensors->caps.num_sensors >= 1) { s = sensors->power.num_sensors + 1; + } + if (sensors->caps.num_sensors >= 1) { snprintf(attr->name, sizeof(attr->name), "power%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 0, 0); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f2c681971201..f8979abb9a19 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -131,6 +131,7 @@ config I2C_I801 Cannon Lake (PCH) Cedar Fork (PCH) Ice Lake (PCH) + Comet Lake (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index bb8e3f149979..d464799e40a3 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) pm_runtime_get_sync(dev->dev); - if (dev->suspended) { - dev_err(dev->dev, "Error %s call while suspended\n", __func__); + if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) { ret = -ESHUTDOWN; goto done_nolock; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c91e145ef5a5..679c6c41f64b 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -71,6 +71,7 @@ * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes * Cedar Fork (PCH) 0x18df 32 hard yes yes yes * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes + * Comet Lake (PCH) 0x02a3 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -240,6 +241,7 @@ #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 +#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3 struct i801_mux_config { char *gpio_chip; @@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, { 0, } }; @@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_DNV_SMBUS: case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: + case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS: priv->features |= FEATURE_I2C_BLOCK_READ; priv->features |= FEATURE_IRQ; priv->features |= FEATURE_SMBUS_PEC; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 42fed40198a0..fd70b110e8f4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct clk_notifier_data *ndata = data; - struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk, + struct imx_i2c_struct *i2c_imx = container_of(nb, struct imx_i2c_struct, - clk); + clk_change_nb); if (action & POST_RATE_CHANGE) i2c_imx_set_clk(i2c_imx, ndata->new_rate); @@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev) /* Init DMA config if supported */ ret = i2c_imx_dma_request(i2c_imx, phy_addr); if (ret < 0) - goto clk_notifier_unregister; + goto del_adapter; dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ +del_adapter: + i2c_del_adapter(&i2c_imx->adapter); clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); rpm_disable: diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index d18b0941b71a..f14d4b3fab44 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev) i2c->adapter = synquacer_i2c_ops; i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.dev.parent = &pdev->dev; + i2c->adapter.dev.of_node = pdev->dev.of_node; + ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev)); i2c->adapter.nr = pdev->id; init_completion(&i2c->completion); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 38af18645133..688aa3b5f3ac 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - int i = 0, scl = 1, ret; + int i = 0, scl = 1, ret = 0; if (bri->prepare_recovery) bri->prepare_recovery(adap); @@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev) if (client->flags & I2C_CLIENT_HOST_NOTIFY) { dev_dbg(dev, "Using Host Notify IRQ\n"); + /* Keep adapter active when Host Notify is required */ + pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); @@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev) device_init_wakeup(&client->dev, false); client->irq = client->init_irq; + if (client->flags & I2C_CLIENT_HOST_NOTIFY) + pm_runtime_put(&client->adapter->dev); return status; } diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 2dc628d4f1ae..1412abcff010 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, { struct i3c_dev_boardinfo *boardinfo; struct device *dev = &master->dev; - struct i3c_device_info info = { }; enum i3c_addr_slot_status addrstatus; u32 init_dyn_addr = 0; @@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, boardinfo->pid = ((u64)reg[1] << 32) | reg[2]; - if ((info.pid & GENMASK_ULL(63, 48)) || - I3C_PID_RND_LOWER_32BITS(info.pid)) + if ((boardinfo->pid & GENMASK_ULL(63, 48)) || + I3C_PID_RND_LOWER_32BITS(boardinfo->pid)) return -EINVAL; boardinfo->init_dyn_addr = init_dyn_addr; diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 59279224e07f..10c26ffaa8ef 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master) static void dw_i3c_master_disable(struct dw_i3c_master *master) { - writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE, + writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, master->regs + DEVICE_CTRL); } diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 7096e577b23f..50f3ff386bea 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev) mutex_lock(&data->mutex); ret = kxcjk1013_set_mode(data, OPERATION); + if (ret == 0) + ret = kxcjk1013_set_range(data, data->range); mutex_unlock(&data->mutex); return ret; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ff5f2da2e1b1..54d9978b2740 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, if (sigma_delta->info->has_registers) { data[0] = reg << sigma_delta->info->addr_shift; data[0] |= sigma_delta->info->read_mask; + data[0] |= sigma_delta->comm; spi_message_add_tail(&t[0], &m); } spi_message_add_tail(&t[1], &m); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 75d2f73582a3..596841a3c4db 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev, ret = wait_event_interruptible_timeout(st->wq_data_avail, st->done, msecs_to_jiffies(1000)); - if (ret == 0) - ret = -ETIMEDOUT; - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - - *val = st->last_value; + /* Disable interrupts, regardless if adc conversion was + * successful or not + */ at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); - st->last_value = 0; - st->done = false; + if (ret > 0) { + /* a valid conversion took place */ + *val = st->last_value; + st->last_value = 0; + st->done = false; + ret = IIO_VAL_INT; + } else if (ret == 0) { + /* conversion timeout */ + dev_err(&idev->dev, "ADC Channel %d timeout.\n", + chan->channel); + ret = -ETIMEDOUT; + } + mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index b13c61539d46..6401ca7a9a20 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev) err_free_irq: free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); err_clk_disable_unprepare: clk_disable_unprepare(xadc->clk); err_free_samplerate_trigger: @@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev) iio_triggered_buffer_cleanup(indio_dev); } free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); clk_disable_unprepare(xadc->clk); - cancel_delayed_work(&xadc->zynq_unmask_work); kfree(xadc->data); kfree(indio_dev->channels); diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index d5d146e9e372..92c684d2b67e 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -64,6 +64,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate matter sensor. @@ -71,6 +72,19 @@ config PMS7003 To compile this driver as a module, choose M here: the module will be called pms7003. +config SENSIRION_SGP30 + tristate "Sensirion SGPxx gas sensors" + depends on I2C + select CRC8 + help + Say Y here to build I2C interface support for the following + Sensirion SGP gas sensors: + * SGP30 gas sensor + * SGPC3 low power gas sensor + + To compile this driver as module, choose M here: the + module will be called sgp30. + config SPS30 tristate "SPS30 particulate matter sensor" depends on I2C diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index 0ae89b87e2d6..4edc5d21cb9f 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h @@ -2,11 +2,9 @@ #ifndef BME680_H_ #define BME680_H_ -#define BME680_REG_CHIP_I2C_ID 0xD0 -#define BME680_REG_CHIP_SPI_ID 0x50 +#define BME680_REG_CHIP_ID 0xD0 #define BME680_CHIP_ID_VAL 0x61 -#define BME680_REG_SOFT_RESET_I2C 0xE0 -#define BME680_REG_SOFT_RESET_SPI 0x60 +#define BME680_REG_SOFT_RESET 0xE0 #define BME680_CMD_SOFTRESET 0xB6 #define BME680_REG_STATUS 0x73 #define BME680_SPI_MEM_PAGE_BIT BIT(4) diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 70c1fe4366f4..ccde4c65ff93 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -63,9 +63,23 @@ struct bme680_data { s32 t_fine; }; +static const struct regmap_range bme680_volatile_ranges[] = { + regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB), + regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS), + regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG), +}; + +static const struct regmap_access_table bme680_volatile_table = { + .yes_ranges = bme680_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges), +}; + const struct regmap_config bme680_regmap_config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0xef, + .volatile_table = &bme680_volatile_table, + .cache_type = REGCACHE_RBTREE, }; EXPORT_SYMBOL(bme680_regmap_config); @@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data, s64 var1, var2, var3; s16 calc_temp; + /* If the calibration is invalid, attempt to reload it */ + if (!calib->par_t2) + bme680_read_calib(data, calib); + var1 = (adc_temp >> 3) - (calib->par_t1 << 1); var2 = (var1 * calib->par_t2) >> 11; var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; @@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data) return ret; } -static int bme680_read_temp(struct bme680_data *data, - int *val, int *val2) +static int bme680_read_temp(struct bme680_data *data, int *val) { struct device *dev = regmap_get_device(data->regmap); int ret; @@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data, * compensate_press/compensate_humid to get compensated * pressure/humidity readings. */ - if (val && val2) { - *val = comp_temp; - *val2 = 100; - return IIO_VAL_FRACTIONAL; + if (val) { + *val = comp_temp * 10; /* Centidegrees to millidegrees */ + return IIO_VAL_INT; } return ret; @@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data, s32 adc_press; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data, u32 comp_humidity; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_TEMP: - return bme680_read_temp(data, val, val2); + return bme680_read_temp(data, val); case IIO_PRESSURE: return bme680_read_press(data, val, val2); case IIO_HUMIDITYRELATIVE: @@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, { struct iio_dev *indio_dev; struct bme680_data *data; + unsigned int val; int ret; + ret = regmap_write(regmap, BME680_REG_SOFT_RESET, + BME680_CMD_SOFTRESET); + if (ret < 0) { + dev_err(dev, "Failed to reset chip\n"); + return ret; + } + + ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); + if (ret < 0) { + dev_err(dev, "Error reading chip ID\n"); + return ret; + } + + if (val != BME680_CHIP_ID_VAL) { + dev_err(dev, "Wrong chip ID, got %x expected %x\n", + val, BME680_CHIP_ID_VAL); + return -ENODEV; + } + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c index b2f805b6b36a..de9c9e3d23ea 100644 --- a/drivers/iio/chemical/bme680_i2c.c +++ b/drivers/iio/chemical/bme680_i2c.c @@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client, { struct regmap *regmap; const char *name = NULL; - unsigned int val; - int ret; regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); if (IS_ERR(regmap)) { @@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client, return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&client->dev, "Failed to reset chip\n"); - return ret; - } - - ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val); - if (ret < 0) { - dev_err(&client->dev, "Error reading I2C chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - if (id) name = id->name; diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c index d0b7bdd3f066..3b838068a7e4 100644 --- a/drivers/iio/chemical/bme680_spi.c +++ b/drivers/iio/chemical/bme680_spi.c @@ -12,28 +12,93 @@ #include "bme680.h" +struct bme680_spi_bus_context { + struct spi_device *spi; + u8 current_page; +}; + +/* + * In SPI mode there are only 7 address bits, a "page" register determines + * which part of the 8-bit range is active. This function looks at the address + * and writes the page selection bit if needed + */ +static int bme680_regmap_spi_select_page( + struct bme680_spi_bus_context *ctx, u8 reg) +{ + struct spi_device *spi = ctx->spi; + int ret; + u8 buf[2]; + u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */ + + if (page == ctx->current_page) + return 0; + + /* + * Data sheet claims we're only allowed to change bit 4, so we must do + * a read-modify-write on each and every page select + */ + buf[0] = BME680_REG_STATUS; + ret = spi_write_then_read(spi, buf, 1, buf + 1, 1); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + buf[0] = BME680_REG_STATUS; + if (page) + buf[1] |= BME680_SPI_MEM_PAGE_BIT; + else + buf[1] &= ~BME680_SPI_MEM_PAGE_BIT; + + ret = spi_write(spi, buf, 2); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + ctx->current_page = page; + + return 0; +} + static int bme680_regmap_spi_write(void *context, const void *data, size_t count) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; u8 buf[2]; memcpy(buf, data, 2); + + ret = bme680_regmap_spi_select_page(ctx, buf[0]); + if (ret) + return ret; + /* * The SPI register address (= full register address without bit 7) * and the write command (bit7 = RW = '0') */ buf[0] &= ~0x80; - return spi_write_then_read(spi, buf, 2, NULL, 0); + return spi_write(spi, buf, 2); } static int bme680_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; + u8 addr = *(const u8 *)reg; + + ret = bme680_regmap_spi_select_page(ctx, addr); + if (ret) + return ret; - return spi_write_then_read(spi, reg, reg_size, val, val_size); + addr |= 0x80; /* bit7 = RW = '1' */ + + return spi_write_then_read(spi, &addr, 1, val, val_size); } static struct regmap_bus bme680_regmap_bus = { @@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = { static int bme680_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); + struct bme680_spi_bus_context *bus_context; struct regmap *regmap; - unsigned int val; int ret; spi->bits_per_word = 8; @@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi) return ret; } + bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL); + if (!bus_context) + return -ENOMEM; + + bus_context->spi = spi; + bus_context->current_page = 0xff; /* Undefined on warm boot */ + regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, - &spi->dev, &bme680_regmap_config); + bus_context, &bme680_regmap_config); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to register spi regmap %d\n", (int)PTR_ERR(regmap)); return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&spi->dev, "Failed to reset chip\n"); - return ret; - } - - /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */ - ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val); - if (ret < 0) { - dev_err(&spi->dev, "Error reading SPI chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - /* - * select Page 1 of spi_mem_page to enable access to - * to registers from address 0x00 to 0x7F. - */ - ret = regmap_write_bits(regmap, BME680_REG_STATUS, - BME680_SPI_MEM_PAGE_BIT, - BME680_SPI_MEM_PAGE_1_VAL); - if (ret < 0) { - dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n"); - return ret; - } - return bme680_core_probe(&spi->dev, regmap, id->name); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 89cb0066a6e0..8d76afb87d87 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, * Do not use IIO_DEGREE_TO_RAD to avoid precision * loss. Round to the nearest integer. */ - *val = div_s64(val64 * 314159 + 9000000ULL, 1000); - *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1); - ret = IIO_VAL_FRACTIONAL; + *val = 0; + *val2 = div_s64(val64 * 3141592653ULL, + 180 << (CROS_EC_SENSOR_BITS - 1)); + ret = IIO_VAL_INT_PLUS_NANO; break; case MOTIONSENSE_TYPE_MAG: /* diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 6d71fd905e29..c701a45469f6 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, inoutbuf[0] = 0x60; /* write EEPROM */ inoutbuf[0] |= data->ref_mode << 3; + inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0; inoutbuf[1] = data->dac_value >> 4; inoutbuf[2] = (data->dac_value & 0xf) << 4; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 63ca31628a93..92c07ab826eb 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: return bmg160_get_filter(data, val); case IIO_CHAN_INFO_SCALE: - *val = 0; switch (chan->type) { case IIO_TEMP: - *val2 = 500000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 500; + return IIO_VAL_INT; case IIO_ANGL_VEL: { int i; @@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { if (bmg160_scale_table[i].dps_range == data->dps_range) { + *val = 0; *val2 = bmg160_scale_table[i].scale; return IIO_VAL_INT_PLUS_MICRO; } diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 77fac81a3adc..5ddebede31a6 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -29,7 +29,8 @@ #include "mpu3050.h" -#define MPU3050_CHIP_ID 0x69 +#define MPU3050_CHIP_ID 0x68 +#define MPU3050_CHIP_ID_MASK 0x7E /* * Register map: anything suffixed *_H is a big-endian high byte and always @@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev, goto err_power_down; } - if (val != MPU3050_CHIP_ID) { - dev_err(dev, "unsupported chip id %02x\n", (u8)val); + if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { + dev_err(dev, "unsupported chip id %02x\n", + (u8)(val & MPU3050_CHIP_ID_MASK)); ret = -ENODEV; goto err_power_down; } diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index cd5bfe39591b..dadd921a4a30 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, const unsigned long *mask; unsigned long *trialmask; - trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*trialmask), - GFP_KERNEL); + trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*trialmask), GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 4700fd5d8c90..9c4d92115504 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register); **/ void iio_device_unregister(struct iio_dev *indio_dev) { - mutex_lock(&indio_dev->info_exist_lock); - cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); + mutex_lock(&indio_dev->info_exist_lock); + iio_device_unregister_debugfs(indio_dev); iio_disable_all_buffers(indio_dev); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index ea0bc6885517..32cc8fe7902f 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -160,6 +160,7 @@ struct ib_uverbs_file { struct mutex umap_lock; struct list_head umaps; + struct page *disassociate_page; struct idr idr; /* spinlock protects write access to idr */ diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 70b7d80431a9..c489f545baae 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref) kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file); put_device(&file->device->dev); + + if (file->disassociate_page) + __free_pages(file->disassociate_page, 0); kfree(file); } @@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma) kfree(priv); } +/* + * Once the zap_vma_ptes has been called touches to the VMA will come here and + * we return a dummy writable zero page for all the pfns. + */ +static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) +{ + struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; + struct rdma_umap_priv *priv = vmf->vma->vm_private_data; + vm_fault_t ret = 0; + + if (!priv) + return VM_FAULT_SIGBUS; + + /* Read only pages can just use the system zero page. */ + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { + vmf->page = ZERO_PAGE(vmf->address); + get_page(vmf->page); + return 0; + } + + mutex_lock(&ufile->umap_lock); + if (!ufile->disassociate_page) + ufile->disassociate_page = + alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); + + if (ufile->disassociate_page) { + /* + * This VMA is forced to always be shared so this doesn't have + * to worry about COW. + */ + vmf->page = ufile->disassociate_page; + get_page(vmf->page); + } else { + ret = VM_FAULT_SIGBUS; + } + mutex_unlock(&ufile->umap_lock); + + return ret; +} + static const struct vm_operations_struct rdma_umap_ops = { .open = rdma_umap_open, .close = rdma_umap_close, + .fault = rdma_umap_fault, }; static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, @@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, struct ib_uverbs_file *ufile = ucontext->ufile; struct rdma_umap_priv *priv; + if (!(vma->vm_flags & VM_SHARED)) + return ERR_PTR(-EINVAL); + if (vma->vm_end - vma->vm_start != size) return ERR_PTR(-EINVAL); @@ -992,7 +1039,9 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) * at a time to get the lock ordering right. Typically there * will only be one mm, so no big deal. */ - down_write(&mm->mmap_sem); + down_read(&mm->mmap_sem); + if (!mmget_still_valid(mm)) + goto skip_mm; mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { @@ -1004,10 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); - vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); } mutex_unlock(&ufile->umap_lock); - up_write(&mm->mmap_sem); + skip_mm: + up_read(&mm->mmap_sem); mmput(mm); } } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 612f04190ed8..9784c6c0d2ec 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) int total_contexts; int ret; unsigned ngroups; - int qos_rmt_count; + int rmt_count; int user_rmt_reduced; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); @@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd) n_usr_ctxts = rcv_contexts - total_contexts; } - /* each user context requires an entry in the RMT */ - qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); - if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { - user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; + /* + * The RMT entries are currently allocated as shown below: + * 1. QOS (0 to 128 entries); + * 2. FECN for PSM (num_user_contexts + num_vnic_contexts); + * 3. VNIC (num_vnic_contexts). + * It should be noted that PSM FECN oversubscribe num_vnic_contexts + * entries of RMT because both VNIC and PSM could allocate any receive + * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, + * and PSM FECN must reserve an RMT entry for each possible PSM receive + * context. + */ + rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); + if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { + user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; dd_dev_err(dd, "RMT size is reducing the number of user receive contexts from %u to %d\n", n_usr_ctxts, @@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, u64 reg; int i, idx, regoff, regidx; u8 offset; + u32 total_cnt; /* there needs to be enough room in the map table */ - if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { + total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; + if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); return; } @@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, /* add rule 1 */ add_rsm_rule(dd, RSM_INS_FECN, &rrd); - rmt->used += dd->num_user_contexts; + rmt->used += total_cnt; } /* Initialize RSM for VNIC */ diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 9b643c2409cf..eba300330a02 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp) if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { - qp->s_flags &= ~RVT_S_ANY_WAIT_IO; + qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; rvt_put_qp(qp); diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index e6726c1ab866..5991211d72bd 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -3088,7 +3088,7 @@ send_last: update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } @@ -3166,7 +3166,7 @@ send_last: update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index fdda33aca77f..43cbce7a19ea 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) make_tid_rdma_ack(qp, ohdr, ps)) return 1; - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { - if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) - goto bail; - /* We are in the error state, flush the work request. */ - if (qp->s_last == READ_ONCE(qp->s_head)) - goto bail; - /* If DMAs are in progress, we can't flush immediately. */ - if (iowait_sdma_pending(&priv->s_iowait)) { - qp->s_flags |= RVT_S_WAIT_DMA; - goto bail; - } - clear_ahg(qp); - wqe = rvt_get_swqe_ptr(qp, qp->s_last); - hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? - IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); - /* will get called again */ - goto done_free_tx; - } + /* + * Bail out if we can't send data. + * Be reminded that this check must been done after the call to + * make_tid_rdma_ack() because the responding QP could be in + * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA. + */ + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) + goto bail; if (priv->s_flags & RVT_S_WAIT_ACK) goto bail; @@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, middle, ps); return 1; -done_free_tx: - hfi1_put_txreq(ps->s_txreq); - ps->s_txreq = NULL; - return 1; - bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f1fec56f3ff4..8e29dbb5b5fb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; dma_offset = offset = idx_offset * table->obj_size; } else { + u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ + hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); /* mtt mhop */ i = mhop.l0_idx; @@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, hem_idx = i; hem = table->hem[hem_idx]; - dma_offset = offset = (obj & (table->num_obj - 1)) * - table->obj_size % mhop.bt_chunk_size; + dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % + mhop.bt_chunk_size; if (mhop.hop_num == 2) dma_offset = offset = 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index b09f1cde2ff5..08be0e4eabcd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table; dma_addr_t dma_handle; __le64 *mtts; - u32 s = start_index * sizeof(u64); u32 bt_page_size; u32 i; @@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, return -EINVAL; mtts = hns_roce_table_find(hr_dev, table, - mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, + mtt->first_seg + + start_index / HNS_ROCE_MTT_ENTRY_PER_SEG, &dma_handle); if (!mtts) return -ENOMEM; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 57c76eafef2f..60cf9f03e941 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) wait_for_completion(&hr_qp->free); if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { - if (hr_dev->caps.sccc_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->sccc_table, - hr_qp->qpn); if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); @@ -536,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) { - if (attr->qp_type == IB_QPT_XRC_TGT) + if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) return 0; return 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index c5a881172524..337410f40860 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - local_ipaddr = ntohl(in->ifa_list->ifa_address); + + if (!in->ifa_list) + local_ipaddr = 0; + else + local_ipaddr = ntohl(in->ifa_list->ifa_address); + rcu_read_unlock(); } else { local_ipaddr = ntohl(ifa->ifa_address); @@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, case NETDEV_UP: /* Fall through */ case NETDEV_CHANGEADDR: + + /* Just skip if no need to handle ARP cache */ + if (!local_ipaddr) + break; + i40iw_manage_arp_cache(iwdev, netdev->dev_addr, &local_ipaddr, diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 782499abcd98..2a0b59a4b6eb 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) unsigned long flags; for (i = 0 ; i < dev->num_ports; i++) { - cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); det = &sriov->alias_guid.ports_guid[i]; + cancel_delayed_work_sync(&det->alias_guid_work); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); while (!list_empty(&det->cb_list)) { cb_ctx = list_entry(det->cb_list.next, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index eaa055007f28..9e08df7914aa 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -20,6 +20,7 @@ enum devx_obj_flags { DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, + DEVX_OBJ_FLAGS_DCT = 1 << 1, }; struct devx_async_data { @@ -39,7 +40,10 @@ struct devx_obj { u32 dinlen; /* destroy inbox length */ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; u32 flags; - struct mlx5_ib_devx_mr devx_mr; + union { + struct mlx5_ib_devx_mr devx_mr; + struct mlx5_core_dct core_dct; + }; }; struct devx_umem { @@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in) obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, MLX5_GET(arm_rq_in, in, srq_number)); break; - case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, MLX5_GET(drain_dct_in, in, dctn)); @@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in) case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_ARM_RQ: - case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: @@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) devx_cleanup_mkey(obj); - ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct); + else + ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, + sizeof(out)); if (ib_is_destroy_retryable(ret, why, uobject)) return ret; @@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( devx_set_umem_valid(cmd_in); } - err = mlx5_cmd_exec(dev->mdev, cmd_in, - cmd_in_len, - cmd_out, cmd_out_len); + if (opcode == MLX5_CMD_OP_CREATE_DCT) { + obj->flags |= DEVX_OBJ_FLAGS_DCT; + err = mlx5_core_create_dct(dev->mdev, &obj->core_dct, + cmd_in, cmd_in_len, + cmd_out, cmd_out_len); + } else { + err = mlx5_cmd_exec(dev->mdev, cmd_in, + cmd_in_len, + cmd_out, cmd_out_len); + } + if (err) goto obj_free; @@ -1214,7 +1228,11 @@ err_copy: if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) devx_cleanup_mkey(obj); obj_destroy: - mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + mlx5_core_destroy_dct(obj->mdev, &obj->core_dct); + else + mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, + sizeof(out)); obj_free: kfree(obj); return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 994c19d01211..d3dd290ae1b1 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, *active_speed = IB_SPEED_EDR; break; case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_EDR; + break; case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_HDR; break; + case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_EDR; + break; case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_HDR; @@ -1112,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_GEN(mdev, qp_packet_based)) resp.flags |= MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; + + resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; } if (field_avail(typeof(resp), sw_parsing_caps, @@ -2059,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, if (vma->vm_flags & VM_WRITE) return -EPERM; + vma->vm_flags &= ~VM_MAYWRITE; if (!dev->mdev->clock_info_page) return -EOPNOTSUPP; @@ -2224,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm if (vma->vm_flags & VM_WRITE) return -EPERM; + vma->vm_flags &= ~VM_MAYWRITE; /* Don't expose to user-space information it shouldn't have */ if (PAGE_SIZE > 4096) return -EOPNOTSUPP; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = (dev->mdev->iseg_base + offsetof(struct mlx5_init_seg, internal_timer_h)) >> PAGE_SHIFT; - if (io_remap_pfn_range(vma, vma->vm_start, pfn, - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; - break; + return rdma_user_mmap_io(&context->ibucontext, vma, pfn, + PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); case MLX5_IB_MMAP_CLOCK_INFO: return mlx5_ib_mmap_clock_info_page(dev, vma, context); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index c20bfc41ecf1..0aa10ebda5d9 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; - u64 access_mask = ODP_READ_ALLOWED_BIT; + u64 access_mask; u64 start_idx, page_mask; struct ib_umem_odp *odp; size_t size; @@ -607,6 +607,7 @@ next_mr: page_shift = mr->umem->page_shift; page_mask = ~(BIT(page_shift) - 1); start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; + access_mask = ODP_READ_ALLOWED_BIT; if (prefetch && !downgrade && !mr->umem->writable) { /* prefetch with write-access must diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6b1f0e76900b..8870c350fda0 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr, rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq); - if (rcqe_sz == 128) { - MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); + if (init_attr->qp_type == MLX5_IB_QPT_DCT) { + if (rcqe_sz == 128) + MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); + return; } - if (init_attr->qp_type != MLX5_IB_QPT_DCT) - MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); + MLX5_SET(qpc, qpc, cs_res, + rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE : + MLX5_RES_SCAT_DATA32_CQE); } static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, @@ -3729,6 +3732,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; + u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0}; u32 min_resp_len = offsetof(typeof(resp), dctn) + sizeof(resp.dctn); @@ -3747,7 +3751,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, - MLX5_ST_SZ_BYTES(create_dct_in)); + MLX5_ST_SZ_BYTES(create_dct_in), out, + sizeof(out)); if (err) return err; resp.dctn = qp->dct.mdct.mqp.qpn; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 6d8b3e0de57a..ec41400fec0c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); pvrdma_page_dir_cleanup(dev, &dev->async_pdir); pvrdma_free_slots(dev); + dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, + dev->dsrbase); iounmap(dev->regs); kfree(dev->sgid_tbl); diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 728795043496..0bb6e39dd03a 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) if (unlikely(mapped_segs == mr->mr.max_segs)) return -ENOMEM; - if (mr->mr.length == 0) { - mr->mr.user_base = addr; - mr->mr.iova = addr; - } - m = mapped_segs / RVT_SEGSZ; n = mapped_segs % RVT_SEGSZ; mr->mr.map[m]->segs[n].vaddr = (void *)addr; @@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) * @sg_nents: number of entries in sg * @sg_offset: offset in bytes into sg * + * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages. + * * Return: number of sg elements mapped to the memory region */ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rvt_mr *mr = to_imr(ibmr); + int ret; mr->mr.length = 0; mr->mr.page_shift = PAGE_SHIFT; - return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, - rvt_set_page); + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page); + mr->mr.user_base = ibmr->iova; + mr->mr.iova = ibmr->iova; + mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; + mr->mr.length = (size_t)ibmr->length; + return ret; } /** @@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, ibmr->rkey = key; mr->mr.lkey = key; mr->mr.access_flags = access; + mr->mr.iova = ibmr->iova; atomic_set(&mr->mr.lkey_invalid, 0); return 0; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index a878351f1643..52d7f55fca32 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -420,7 +420,7 @@ config KEYBOARD_MPR121 config KEYBOARD_SNVS_PWRKEY tristate "IMX SNVS Power Key Driver" - depends on SOC_IMX6SX || SOC_IMX7D + depends on ARCH_MXC || COMPILE_TEST depends on OF help This is the snvs powerkey driver for the Freescale i.MX application diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index effb63205d3d..4c67cf30a5d9 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } + pdata->input = input; + platform_set_drvdata(pdev, pdata); + error = devm_request_irq(&pdev->dev, pdata->irq, imx_snvs_pwrkey_interrupt, 0, pdev->name, pdev); @@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } - pdata->input = input; - platform_set_drvdata(pdev, pdata); - device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 628ef617bb2f..f9525d6f0bfe 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0600", 0 }, { "ELAN0601", 0 }, { "ELAN0602", 0 }, + { "ELAN0603", 0 }, + { "ELAN0604", 0 }, { "ELAN0605", 0 }, + { "ELAN0606", 0 }, + { "ELAN0607", 0 }, { "ELAN0608", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, { "ELAN060C", 0 }, + { "ELAN060F", 0 }, + { "ELAN0610", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0615", 0 }, + { "ELAN0616", 0 }, { "ELAN0617", 0 }, { "ELAN0618", 0 }, + { "ELAN0619", 0 }, + { "ELAN061A", 0 }, + { "ELAN061B", 0 }, { "ELAN061C", 0 }, { "ELAN061D", 0 }, { "ELAN061E", 0 }, + { "ELAN061F", 0 }, { "ELAN0620", 0 }, { "ELAN0621", 0 }, { "ELAN0622", 0 }, + { "ELAN0623", 0 }, + { "ELAN0624", 0 }, + { "ELAN0625", 0 }, + { "ELAN0626", 0 }, + { "ELAN0627", 0 }, + { "ELAN0628", 0 }, + { "ELAN0629", 0 }, + { "ELAN062A", 0 }, + { "ELAN062B", 0 }, + { "ELAN062C", 0 }, + { "ELAN062D", 0 }, + { "ELAN0631", 0 }, + { "ELAN0632", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index fc3ab93b7aea..7fb358f96195 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev, error = rmi_register_function(fn); if (error) - goto err_put_fn; + return error; if (pdt->function_number == 0x01) data->f01_container = fn; @@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev, list_add_tail(&fn->node, &data->function_list); return RMI_SCAN_CONTINUE; - -err_put_fn: - put_device(&fn->dev); - return error; } void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index df64d6aed4f7..93901ebd122a 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c @@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn) } rc = f11_write_control_regs(fn, &f11->sens_query, - &f11->dev_controls, fn->fd.query_base_addr); + &f11->dev_controls, fn->fd.control_base_addr); if (rc) dev_warn(&fn->dev, "Failed to write control registers\n"); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b319e51c379b..f7cdd2ab7f11 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, /* Everything is mapped - write the right values into s->dma_address */ for_each_sg(sglist, s, nelems, i) { - s->dma_address += address + s->offset; + /* + * Add in the remaining piece of the scatter-gather offset that + * was masked out when we were determining the physical address + * via (sg_phys(s) & PAGE_MASK) earlier. + */ + s->dma_address += address + (s->offset & ~PAGE_MASK); s->dma_length = s->length; } @@ -3164,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev, return; list_for_each_entry(entry, &amd_iommu_unity_map, list) { + int type, prot = 0; size_t length; - int prot = 0; if (devid < entry->devid_start || devid > entry->devid_end) continue; + type = IOMMU_RESV_DIRECT; length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) prot |= IOMMU_WRITE; + if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) + /* Exclusion range */ + type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, - IOMMU_RESV_DIRECT); + length, prot, type); if (!region) { dev_err(dev, "Out of memory allocating dm-regions\n"); return; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index f773792d77fd..ff40ba758cf3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; + u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) @@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m) if (e == NULL) return -ENOMEM; + if (m->flags & IVMD_FLAG_EXCL_RANGE) + init_exclusion_range(m); + switch (m->type) { default: kfree(e); @@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) while (p < end) { m = (struct ivmd_header *)p; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - else if (m->flags & IVMD_FLAG_UNITY_MAP) + if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) init_unity_map_range(m); p += m->length; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index eae0741f72dc..87965e4d9647 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -374,6 +374,8 @@ #define IOMMU_PROT_IR 0x01 #define IOMMU_PROT_IW 0x02 +#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) + /* IOMMU capabilities */ #define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_NPCACHE 26 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 87274b54febd..28cb713d728c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) u32 pmen; unsigned long flags; + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flags); pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen &= ~DMA_PMEN_EPM; @@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd ctx_lo = context[0].lo; - sdev->did = domain->iommu_did[iommu->seq_id]; + sdev->did = FLPT_DEFAULT_DID; sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index f101afc315ab..9a8a8870e267 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -160,6 +160,14 @@ #define ARM_V7S_TCR_PD1 BIT(5) +#ifdef CONFIG_ZONE_DMA32 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 +#else +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA +#endif + typedef u32 arm_v7s_iopte; static bool selftest_running; @@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, void *table = NULL; if (lvl == 1) - table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); + table = (void *)__get_free_pages( + __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); else if (lvl == 2) - table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); + table = kmem_cache_zalloc(data->l2_tables, gfp); phys = virt_to_phys(table); - if (phys != (arm_v7s_iopte)phys) + if (phys != (arm_v7s_iopte)phys) { /* Doesn't fit in PTE */ + dev_err(dev, "Page table does not fit in PTE: %pa", &phys); goto out_free; + } if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) @@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", ARM_V7S_TABLE_SIZE(2), ARM_V7S_TABLE_SIZE(2), - SLAB_CACHE_DMA, NULL); + ARM_V7S_TABLE_SLAB_FLAGS, NULL); if (!data->l2_tables) goto out_free_data; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 33a982e33716..109de67d5d72 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { - dev_warn(dev, - "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", - iommu_def_domain_type); dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); + if (dom) { + dev_warn(dev, + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", + iommu_def_domain_type); + } } group->default_domain = dom; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f8d3ba247523..2de8122e218f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr_iova = rb_entry(curr, struct iova, node); } while (curr && new_pfn <= curr_iova->pfn_hi); - if (limit_pfn < size || new_pfn < iovad->start_pfn) + if (limit_pfn < size || new_pfn < iovad->start_pfn) { + iovad->max32_alloc_size = size; goto iova32_full; + } /* pfn_lo will point to size aligned address if size_aligned is set */ new->pfn_lo = new_pfn; @@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; iova32_full: - iovad->max32_alloc_size = size; spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return -ENOMEM; } diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c index aa7290784636..0390603170b4 100644 --- a/drivers/irqchip/irq-ath79-misc.c +++ b/drivers/irqchip/irq-ath79-misc.c @@ -22,6 +22,15 @@ #define AR71XX_RESET_REG_MISC_INT_ENABLE 4 #define ATH79_MISC_IRQ_COUNT 32 +#define ATH79_MISC_PERF_IRQ 5 + +static int ath79_perfcount_irq; + +int get_c0_perfcount_int(void) +{ + return ath79_perfcount_irq; +} +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); static void ath79_misc_irq_handler(struct irq_desc *desc) { @@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init( { void __iomem *base = domain->host_data; + ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ); + /* Disable and clear all interrupts */ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 83364fedbf0a..5e4ca139e4ea 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -275,14 +275,14 @@ out_free: return ret; } -int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, +static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, struct device_node *parent) { return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); } IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); -int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, +static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, struct device_node *parent) { return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 2dd1ff0cf558..7577755bdcf4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) ra = container_of(a, struct lpi_range, entry); rb = container_of(b, struct lpi_range, entry); - return rb->base_id - ra->base_id; + return ra->base_id - rb->base_id; } static void merge_lpi_ranges(void) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ba2a37a27a54..fd3110c171ba 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, #endif } -static int gic_init_bases(struct gic_chip_data *gic, int irq_start, +static int gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle) { - irq_hw_number_t hwirq_base; - int gic_irqs, irq_base, ret; + int gic_irqs, ret; if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { /* Frankein-GIC without banked registers... */ @@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start, } else { /* Legacy support */ /* * For primary GICs, skip over SGIs. - * For secondary GICs, skip over PPIs, too. + * No secondary GIC support whatsoever. */ - if (gic == &gic_data[0] && (irq_start & 31) > 0) { - hwirq_base = 16; - if (irq_start != -1) - irq_start = (irq_start & ~31) + 16; - } else { - hwirq_base = 32; - } + int irq_base; - gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + gic_irqs -= 16; /* calculate # of irqs to allocate */ - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + irq_base = irq_alloc_descs(16, 16, gic_irqs, numa_node_id()); if (irq_base < 0) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; + WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n"); + irq_base = 16; } gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); + 16, &gic_irq_domain_ops, gic); } if (WARN_ON(!gic->domain)) { @@ -1195,7 +1187,6 @@ error: } static int __init __gic_init_bases(struct gic_chip_data *gic, - int irq_start, struct fwnode_handle *handle) { char *name; @@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, gic_init_chip(gic, NULL, name, false); } - ret = gic_init_bases(gic, irq_start, handle); + ret = gic_init_bases(gic, handle); if (ret) kfree(name); return ret; } -void __init gic_init(unsigned int gic_nr, int irq_start, - void __iomem *dist_base, void __iomem *cpu_base) +void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base) { struct gic_chip_data *gic; - if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR)) - return; - /* * Non-DT/ACPI systems won't run a hypervisor, so let's not * bother with these... */ static_branch_disable(&supports_deactivate_key); - gic = &gic_data[gic_nr]; + gic = &gic_data[0]; gic->raw_dist_base = dist_base; gic->raw_cpu_base = cpu_base; - __gic_init_bases(gic, irq_start, NULL); + __gic_init_bases(gic, NULL); } static void gic_teardown(struct gic_chip_data *gic) @@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) if (ret) return ret; - ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); + ret = gic_init_bases(*gic, &dev->of_node->fwnode); if (ret) { gic_teardown(*gic); return ret; @@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent) if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) static_branch_disable(&supports_deactivate_key); - ret = __gic_init_bases(gic, -1, &node->fwnode); + ret = __gic_init_bases(gic, &node->fwnode); if (ret) { gic_teardown(gic); return ret; @@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, return -ENOMEM; } - ret = __gic_init_bases(gic, -1, domain_handle); + ret = __gic_init_bases(gic, domain_handle); if (ret) { pr_err("Failed to initialise GIC\n"); irq_domain_free_fwnode(domain_handle); diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index d1098f4da6a4..88df3d00052c 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev) raw_spin_lock_init(&data->lock); - of_property_read_u32(np, "fsl,num-irqs", &irqs_num); - of_property_read_u32(np, "fsl,channel", &data->channel); + ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num); + if (ret) + return ret; + ret = of_property_read_u32(np, "fsl,channel", &data->channel); + if (ret) + return ret; /* * There is one output irq for each group of 64 inputs. diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c index 86b72fbd3b45..353111a10413 100644 --- a/drivers/irqchip/irq-ls1x.c +++ b/drivers/irqchip/irq-ls1x.c @@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node, NULL); if (!priv->domain) { pr_err("ls1x-irq: cannot add IRQ domain\n"); + err = -ENOMEM; goto out_iounmap; } diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 567b29c47608..98b6e1d4b1a6 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) void __iomem *base = d->chip_data; u32 val; + if (!msg->address_lo && !msg->address_hi) + return; + base += get_mbigen_vec_reg(d->hwirq); val = readl_relaxed(base); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 3496b61a312a..8eed478f3b7e 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node, return 0; } -const struct irq_domain_ops mmp_irq_domain_ops = { +static const struct irq_domain_ops mmp_irq_domain_ops = { .map = mmp_irq_domain_map, .xlate = mmp_irq_domain_xlate, }; diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index add4c9c934c8..18832ccc8ff8 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -478,7 +478,7 @@ dispose_irq: return ret; } -struct mvebu_sei_caps mvebu_sei_ap806_caps = { +static struct mvebu_sei_caps mvebu_sei_ap806_caps = { .ap_range = { .first = 0, .size = 21, diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index a93296b9b45d..7bd1d4cb2e19 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, const struct stm32_exti_bank *stm32_bank; struct stm32_exti_chip_data *chip_data; void __iomem *base = h_data->base; - u32 irqs_mask; stm32_bank = h_data->drv_data->exti_banks[bank_idx]; chip_data = &h_data->chips_data[bank_idx]; @@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, raw_spin_lock_init(&chip_data->rlock); - /* Determine number of irqs supported */ - writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst); - irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst); - /* * This IP has no reset, so after hot reboot we should * clear registers to avoid residue */ writel_relaxed(0, base + stm32_bank->imr_ofst); writel_relaxed(0, base + stm32_bank->emr_ofst); - writel_relaxed(0, base + stm32_bank->rtsr_ofst); - writel_relaxed(0, base + stm32_bank->ftsr_ofst); - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst); pr_info("%pOF: bank%d\n", h_data->node, bank_idx); diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 4d85645c87f7..0928fd1f0e0c 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev, if (m->clock2) test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); - if (ent->device == 0xB410) { + if (ent->vendor == PCI_VENDOR_ID_DIGIUM && + ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) { test_and_set_bit(HFC_CHIP_B410P, &hc->chip); test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 4ab8b1b6608f..a14e35d40538 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) struct sock *sk = sock->sk; int err = 0; - if (!maddr || maddr->family != AF_ISDN) + if (addr_len < sizeof(struct sockaddr_mISDN)) return -EINVAL; - if (addr_len < sizeof(struct sockaddr_mISDN)) + if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 7fea18b0c15d..7cb4d685a1f1 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client, const struct i2c_device_id *id) { int devid; + const struct of_device_id *of_id; struct pca9532_data *data = i2c_get_clientdata(client); struct pca9532_platform_data *pca9532_pdata = dev_get_platdata(&client->dev); @@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client, dev_err(&client->dev, "no platform data\n"); return -EINVAL; } - devid = (int)(uintptr_t)of_match_device( - of_pca9532_leds_match, &client->dev)->data; + of_id = of_match_device(of_pca9532_leds_match, + &client->dev); + if (unlikely(!of_id)) + return -EINVAL; + devid = (int)(uintptr_t) of_id->data; } else { devid = id->driver_data; } diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 3dd3ed46d473..136f86a1627d 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev, trigger_data->net_dev = NULL; } - strncpy(trigger_data->device_name, buf, size); + memcpy(trigger_data->device_name, buf, size); + trigger_data->device_name[size] = 0; if (size > 0 && trigger_data->device_name[size - 1] == '\n') trigger_data->device_name[size - 1] = 0; @@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb, container_of(nb, struct led_netdev_data, notifier); if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE - && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER - && evt != NETDEV_CHANGENAME) + && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER) return NOTIFY_DONE; - if (strcmp(dev->name, trigger_data->device_name)) + if (!(dev == trigger_data->net_dev || + (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name)))) return NOTIFY_DONE; cancel_delayed_work_sync(&trigger_data->work); @@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb, dev_hold(dev); trigger_data->net_dev = dev; break; - case NETDEV_CHANGENAME: case NETDEV_UNREGISTER: - if (trigger_data->net_dev) { - dev_put(trigger_data->net_dev); - trigger_data->net_dev = NULL; - } + dev_put(trigger_data->net_dev); + trigger_data->net_dev = NULL; break; case NETDEV_UP: case NETDEV_CHANGE: diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 3789185144da..0b7d5fb4548d 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) struct pblk_sec_meta *meta; struct bio *new_bio = rqd->bio; struct bio *bio = pr_ctx->orig_bio; - struct bio_vec src_bv, dst_bv; void *meta_list = rqd->meta_list; - int bio_init_idx = pr_ctx->bio_init_idx; unsigned long *read_bitmap = pr_ctx->bitmap; + struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT; + struct bvec_iter new_iter = BVEC_ITER_ALL_INIT; int nr_secs = pr_ctx->orig_nr_secs; int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); void *src_p, *dst_p; - int hole, i; + int bit, i; if (unlikely(nr_holes == 1)) { struct ppa_addr ppa; @@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) /* Fill the holes in the original bio */ i = 0; - hole = find_first_zero_bit(read_bitmap, nr_secs); - do { - struct pblk_line *line; + for (bit = 0; bit < nr_secs; bit++) { + if (!test_bit(bit, read_bitmap)) { + struct bio_vec dst_bv, src_bv; + struct pblk_line *line; - line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); - kref_put(&line->ref, pblk_line_put); + line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); + kref_put(&line->ref, pblk_line_put); - meta = pblk_get_meta(pblk, meta_list, hole); - meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); + meta = pblk_get_meta(pblk, meta_list, bit); + meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); - src_bv = new_bio->bi_io_vec[i++]; - dst_bv = bio->bi_io_vec[bio_init_idx + hole]; + dst_bv = bio_iter_iovec(bio, orig_iter); + src_bv = bio_iter_iovec(new_bio, new_iter); - src_p = kmap_atomic(src_bv.bv_page); - dst_p = kmap_atomic(dst_bv.bv_page); + src_p = kmap_atomic(src_bv.bv_page); + dst_p = kmap_atomic(dst_bv.bv_page); - memcpy(dst_p + dst_bv.bv_offset, - src_p + src_bv.bv_offset, - PBLK_EXPOSED_PAGE_SIZE); + memcpy(dst_p + dst_bv.bv_offset, + src_p + src_bv.bv_offset, + PBLK_EXPOSED_PAGE_SIZE); - kunmap_atomic(src_p); - kunmap_atomic(dst_p); + kunmap_atomic(src_p); + kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, &pblk->page_bio_pool); + flush_dcache_page(dst_bv.bv_page); + mempool_free(src_bv.bv_page, &pblk->page_bio_pool); - hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); - } while (hole < nr_secs); + bio_advance_iter(new_bio, &new_iter, + PBLK_EXPOSED_PAGE_SIZE); + i++; + } + bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE); + } bio_put(new_bio); kfree(pr_ctx); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..c4ef1fceead6 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -115,6 +115,7 @@ struct mapped_device { struct srcu_struct io_barrier; }; +void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index b53f30f16b4d..4b76f84424c3 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -36,7 +36,7 @@ struct dm_device { struct list_head list; }; -const char *dm_allowed_targets[] __initconst = { +const char * const dm_allowed_targets[] __initconst = { "crypt", "delay", "linear", diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index d57d997a52c8..7c678f50aaa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) { return range1->logical_sector < range2->logical_sector + range2->n_sectors && - range2->logical_sector + range2->n_sectors > range2->logical_sector; + range1->logical_sector + range1->n_sectors > range2->logical_sector; } static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity struct dm_integrity_range *last_range = list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); struct task_struct *last_range_task; - if (!ranges_overlap(range, last_range)) - break; last_range_task = last_range->task; list_del(&last_range->wait_entry); if (!add_new_range(ic, last_range, false)) { @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) journal_watermark = val; else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) sync_msec = val; - else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { + else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { if (ic->meta_dev) { dm_put_device(ti, ic->meta_dev); ic->meta_dev = NULL; @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } ic->sectors_per_block = val >> SECTOR_SHIFT; - } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { + } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, "Invalid internal_hash argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { + } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, "Invalid journal_crypt argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { + } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, "Invalid journal_mac argument"); if (r) @@ -3616,7 +3614,7 @@ static struct target_type integrity_target = { .io_hints = dm_integrity_io_hints, }; -int __init dm_integrity_init(void) +static int __init dm_integrity_init(void) { int r; @@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void) return r; } -void dm_integrity_exit(void) +static void __exit dm_integrity_exit(void) { dm_unregister_target(&integrity_target); kmem_cache_destroy(journal_io_cache); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 09773636602d..b66745bd08bb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) } if (unlikely(error == BLK_STS_TARGET)) { - if (req_op(clone) == REQ_OP_WRITE_SAME && - !clone->q->limits.max_write_same_sectors) + if (req_op(clone) == REQ_OP_DISCARD && + !clone->q->limits.max_discard_sectors) + disable_discard(tio->md); + else if (req_op(clone) == REQ_OP_WRITE_SAME && + !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); - if (req_op(clone) == REQ_OP_WRITE_ZEROES && - !clone->q->limits.max_write_zeroes_sectors) + else if (req_op(clone) == REQ_OP_WRITE_ZEROES && + !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ba9481f1bf3c..cde3b49b2a91 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) return true; } +static int device_requires_stable_pages(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && bdi_cap_stable_pages_required(q->backing_dev_info); +} + +/* + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. + */ +static bool dm_table_requires_stable_pages(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) + return true; + } + + return false; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_verify_integrity(t); /* + * Some devices don't use blk_integrity but still want stable pages + * because they do their own checksumming. + */ + if (dm_table_requires_stable_pages(t)) + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + else + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; + + /* * Determine whether or not this queue's I/O timings contribute * to the entropy pool, Only request-based targets use this. * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68d24056d0b1..043f0761e4a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) } } +void disable_discard(struct mapped_device *md) +{ + struct queue_limits *limits = dm_get_queue_limits(md); + + /* device doesn't really support DISCARD, disable it */ + limits->max_discard_sectors = 0; + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); +} + void disable_write_same(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); @@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { - if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) + if (bio_op(bio) == REQ_OP_DISCARD && + !bio->bi_disk->queue->limits.max_discard_sectors) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_SAME && + !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); - if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } @@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) return -EINVAL; } - /* - * BIO based queue uses its own splitting. When multipage bvecs - * is switched on, size of the incoming bio may be too big to - * be handled in some targets, such as crypt. - * - * When these targets are ready for the big bio, we can remove - * the limit. - */ - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); + ti->max_io_len = (uint32_t) len; return 0; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0ce2d8dfc5f1..26ad6468d13a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1246,7 +1246,7 @@ config MFD_STA2X11 config MFD_SUN6I_PRCM bool "Allwinner A31 PRCM controller" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST select MFD_CORE help Support for the PRCM (Power/Reset/Clock Management) unit available diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 69df27769c21..43ac71691fe4 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = { static const struct mfd_cell sprd_pmic_devs[] = { { .name = "sc27xx-wdt", - .of_compatible = "sprd,sc27xx-wdt", + .of_compatible = "sprd,sc2731-wdt", }, { .name = "sc27xx-rtc", - .of_compatible = "sprd,sc27xx-rtc", + .of_compatible = "sprd,sc2731-rtc", }, { .name = "sc27xx-charger", - .of_compatible = "sprd,sc27xx-charger", + .of_compatible = "sprd,sc2731-charger", }, { .name = "sc27xx-chg-timer", - .of_compatible = "sprd,sc27xx-chg-timer", + .of_compatible = "sprd,sc2731-chg-timer", }, { .name = "sc27xx-fast-chg", - .of_compatible = "sprd,sc27xx-fast-chg", + .of_compatible = "sprd,sc2731-fast-chg", }, { .name = "sc27xx-chg-wdt", - .of_compatible = "sprd,sc27xx-chg-wdt", + .of_compatible = "sprd,sc2731-chg-wdt", }, { .name = "sc27xx-typec", - .of_compatible = "sprd,sc27xx-typec", + .of_compatible = "sprd,sc2731-typec", }, { .name = "sc27xx-flash", - .of_compatible = "sprd,sc27xx-flash", + .of_compatible = "sprd,sc2731-flash", }, { .name = "sc27xx-eic", - .of_compatible = "sprd,sc27xx-eic", + .of_compatible = "sprd,sc2731-eic", }, { .name = "sc27xx-efuse", - .of_compatible = "sprd,sc27xx-efuse", + .of_compatible = "sprd,sc2731-efuse", }, { .name = "sc27xx-thermal", - .of_compatible = "sprd,sc27xx-thermal", + .of_compatible = "sprd,sc2731-thermal", }, { .name = "sc27xx-adc", - .of_compatible = "sprd,sc27xx-adc", + .of_compatible = "sprd,sc2731-adc", }, { .name = "sc27xx-audio-codec", - .of_compatible = "sprd,sc27xx-audio-codec", + .of_compatible = "sprd,sc2731-audio-codec", }, { .name = "sc27xx-regulator", - .of_compatible = "sprd,sc27xx-regulator", + .of_compatible = "sprd,sc2731-regulator", }, { .name = "sc27xx-vibrator", - .of_compatible = "sprd,sc27xx-vibrator", + .of_compatible = "sprd,sc2731-vibrator", }, { .name = "sc27xx-keypad-led", - .of_compatible = "sprd,sc27xx-keypad-led", + .of_compatible = "sprd,sc2731-keypad-led", }, { .name = "sc27xx-bltc", - .of_compatible = "sprd,sc27xx-bltc", + .of_compatible = "sprd,sc2731-bltc", }, { .name = "sc27xx-fgu", - .of_compatible = "sprd,sc27xx-fgu", + .of_compatible = "sprd,sc2731-fgu", }, { .name = "sc27xx-7sreset", - .of_compatible = "sprd,sc27xx-7sreset", + .of_compatible = "sprd,sc2731-7sreset", }, { .name = "sc27xx-poweroff", - .of_compatible = "sprd,sc27xx-poweroff", + .of_compatible = "sprd,sc2731-poweroff", }, { .name = "sc27xx-syscon", - .of_compatible = "sprd,sc27xx-syscon", + .of_compatible = "sprd,sc2731-syscon", }, }; diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 299016bc46d9..104477b512a2 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1245,6 +1245,28 @@ free: return status; } +static int __maybe_unused twl_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + disable_irq(client->irq); + + return 0; +} + +static int __maybe_unused twl_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); + static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, + .driver.pm = &twl_dev_pm_ops, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 39f832d27288..36d0d5c9cfba 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev) struct fastrpc_session_ctx *sess; struct device *dev = &pdev->dev; int i, sessions = 0; + int rc; cctx = dev_get_drvdata(dev->parent); if (!cctx) @@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev) } cctx->sesscount++; spin_unlock(&cctx->lock); - dma_set_mask(dev, DMA_BIT_MASK(32)); + rc = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, "32-bit DMA enable failed\n"); + return rc; + } return 0; } diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 3525236ed8d9..19c84214a7ea 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref) /* We also need to update CI for internal queues */ if (cs->submitted) { + int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt); + + WARN_ONCE((cs_cnt < 0), + "hl%d: error in CS active cnt %d\n", + hdev->id, cs_cnt); + hl_int_hw_queue_update_ci(cs); spin_lock(&hdev->hw_queues_mirror_lock); diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index a53c12aff6ad..974a87789bd8 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data) struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; enum vm_type_t *vm_type; bool once = true; + u64 j; int i; if (!dev_entry->hdev->mmu_enable) @@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data) } else { phys_pg_pack = hnode->ptr; seq_printf(s, - " 0x%-14llx %-10u %-4u\n", + " 0x%-14llx %-10llu %-4u\n", hnode->vaddr, phys_pg_pack->total_size, phys_pg_pack->handle); } @@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data) phys_pg_pack->page_size); seq_puts(s, " physical address\n"); seq_puts(s, "---------------------\n"); - for (i = 0 ; i < phys_pg_pack->npages ; i++) { + for (j = 0 ; j < phys_pg_pack->npages ; j++) { seq_printf(s, " 0x%-14llx\n", - phys_pg_pack->pages[i]); + phys_pg_pack->pages[j]); } } spin_unlock(&vm->idr_lock); diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index de46aa6ed154..77d51be66c7e 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -11,6 +11,8 @@ #include <linux/sched/signal.h> #include <linux/hwmon.h> +#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) + bool hl_device_disabled_or_in_reset(struct hl_device *hdev) { if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) @@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev) spin_lock_init(&hdev->hw_queues_mirror_lock); atomic_set(&hdev->in_reset, 0); atomic_set(&hdev->fd_open_cnt, 0); + atomic_set(&hdev->cs_active_cnt, 0); return 0; @@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev) pci_save_state(hdev->pdev); + /* Block future CS/VM/JOB completion operations */ + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (rc) { + dev_err(hdev->dev, "Can't suspend while in reset\n"); + return -EIO; + } + + /* This blocks all other stuff that is not blocked by in_reset */ + hdev->disabled = true; + + /* + * Flush anyone that is inside the critical section of enqueue + * jobs to the H/W + */ + hdev->asic_funcs->hw_queues_lock(hdev); + hdev->asic_funcs->hw_queues_unlock(hdev); + + /* Flush processes that are sending message to CPU */ + mutex_lock(&hdev->send_cpu_message_lock); + mutex_unlock(&hdev->send_cpu_message_lock); + rc = hdev->asic_funcs->suspend(hdev); if (rc) dev_err(hdev->dev, @@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev) pci_set_power_state(hdev->pdev, PCI_D0); pci_restore_state(hdev->pdev); - rc = pci_enable_device(hdev->pdev); + rc = pci_enable_device_mem(hdev->pdev); if (rc) { dev_err(hdev->dev, "Failed to enable PCI device in resume\n"); return rc; } + pci_set_master(hdev->pdev); + rc = hdev->asic_funcs->resume(hdev); if (rc) { - dev_err(hdev->dev, - "Failed to enable PCI access from device CPU\n"); - return rc; + dev_err(hdev->dev, "Failed to resume device after suspend\n"); + goto disable_device; + } + + + hdev->disabled = false; + atomic_set(&hdev->in_reset, 0); + + rc = hl_device_reset(hdev, true, false); + if (rc) { + dev_err(hdev->dev, "Failed to reset device during resume\n"); + goto disable_device; } return 0; + +disable_device: + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); + + return rc; } static void hl_device_hard_reset_pending(struct work_struct *work) @@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work) struct hl_device_reset_work *device_reset_work = container_of(work, struct hl_device_reset_work, reset_work); struct hl_device *hdev = device_reset_work->hdev; - u16 pending_cnt = HL_PENDING_RESET_PER_SEC; + u16 pending_total, pending_cnt; struct task_struct *task = NULL; + if (hdev->pldm) + pending_total = HL_PLDM_PENDING_RESET_PER_SEC; + else + pending_total = HL_PENDING_RESET_PER_SEC; + + pending_cnt = pending_total; + /* Flush all processes that are inside hl_open */ mutex_lock(&hdev->fd_open_cnt_lock); @@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work) } } + pending_cnt = pending_total; + + while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { + + pending_cnt--; + + ssleep(1); + } + + if (atomic_read(&hdev->fd_open_cnt)) + dev_crit(hdev->dev, + "Going to hard reset with open user contexts\n"); + mutex_unlock(&hdev->fd_open_cnt_lock); hl_device_reset(hdev, true, true); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 238dd57c541b..3c509e19d69d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev) return retval; } -static void goya_resume_external_queues(struct hl_device *hdev) -{ - WREG32(mmDMA_QM_0_GLBL_CFG1, 0); - WREG32(mmDMA_QM_1_GLBL_CFG1, 0); - WREG32(mmDMA_QM_2_GLBL_CFG1, 0); - WREG32(mmDMA_QM_3_GLBL_CFG1, 0); - WREG32(mmDMA_QM_4_GLBL_CFG1, 0); -} - /* * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU * @@ -1697,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev) /* * Workaround for H2 #HW-23 bug - * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it - * to 16 on KMD DMA - * We need to limit only these DMAs because the user can only read + * Set DMA max outstanding read requests to 240 on DMA CH 1. + * This limitation is still large enough to not affect Gen4 bandwidth. + * We need to only limit that DMA channel because the user can only read * from Host using DMA CH 1 */ - WREG32(mmDMA_CH_0_CFG0, 0x0fff0010); WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); goya->hw_cap_initialized |= HW_CAP_GOLDEN; @@ -2178,36 +2168,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev) return retval; } -static void goya_resume_internal_queues(struct hl_device *hdev) -{ - WREG32(mmMME_QM_GLBL_CFG1, 0); - WREG32(mmMME_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC0_QM_GLBL_CFG1, 0); - WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC1_QM_GLBL_CFG1, 0); - WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC2_QM_GLBL_CFG1, 0); - WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC3_QM_GLBL_CFG1, 0); - WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC4_QM_GLBL_CFG1, 0); - WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC5_QM_GLBL_CFG1, 0); - WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC6_QM_GLBL_CFG1, 0); - WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0); - - WREG32(mmTPC7_QM_GLBL_CFG1, 0); - WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0); -} - static void goya_dma_stall(struct hl_device *hdev) { WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); @@ -2905,20 +2865,6 @@ int goya_suspend(struct hl_device *hdev) { int rc; - rc = goya_stop_internal_queues(hdev); - - if (rc) { - dev_err(hdev->dev, "failed to stop internal queues\n"); - return rc; - } - - rc = goya_stop_external_queues(hdev); - - if (rc) { - dev_err(hdev->dev, "failed to stop external queues\n"); - return rc; - } - rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); if (rc) dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); @@ -2928,15 +2874,7 @@ int goya_suspend(struct hl_device *hdev) int goya_resume(struct hl_device *hdev) { - int rc; - - goya_resume_external_queues(hdev); - goya_resume_internal_queues(hdev); - - rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); - if (rc) - dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); - return rc; + return goya_init_iatu(hdev); } static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, @@ -3070,7 +3008,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, *dma_handle = hdev->asic_prop.sram_base_address; - base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; + base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID]; switch (queue_id) { case GOYA_QUEUE_ID_MME: @@ -3754,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, * WA for HW-23. * We can't allow user to read from Host using QMANs other than 1. */ - if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 && + if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 && hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), le32_to_cpu(user_dma_pkt->tsize), hdev->asic_prop.va_space_host_start_address, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index a7c95e9f9b9a..a8ee52c880cd 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -793,11 +793,11 @@ struct hl_vm_hash_node { * struct hl_vm_phys_pg_pack - physical page pack. * @vm_type: describes the type of the virtual area descriptor. * @pages: the physical page array. + * @npages: num physical pages in the pack. + * @total_size: total size of all the pages in this list. * @mapping_cnt: number of shared mappings. * @asid: the context related to this list. - * @npages: num physical pages in the pack. * @page_size: size of each page in the pack. - * @total_size: total size of all the pages in this list. * @flags: HL_MEM_* flags related to this list. * @handle: the provided handle related to this list. * @offset: offset from the first page. @@ -807,11 +807,11 @@ struct hl_vm_hash_node { struct hl_vm_phys_pg_pack { enum vm_type_t vm_type; /* must be first */ u64 *pages; + u64 npages; + u64 total_size; atomic_t mapping_cnt; u32 asid; - u32 npages; u32 page_size; - u32 total_size; u32 flags; u32 handle; u32 offset; @@ -1056,13 +1056,15 @@ struct hl_device_reset_work { * @cb_pool_lock: protects the CB pool. * @user_ctx: current user context executing. * @dram_used_mem: current DRAM memory consumption. - * @in_reset: is device in reset flow. - * @curr_pll_profile: current PLL profile. - * @fd_open_cnt: number of open user processes. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, KMD will restore this * value and update the F/W after the re-initialization + * @in_reset: is device in reset flow. + * @curr_pll_profile: current PLL profile. + * @fd_open_cnt: number of open user processes. + * @cs_active_cnt: number of active command submissions on this device (active + * means already in H/W queues) * @major: habanalabs KMD major. * @high_pll: high PLL profile frequency. * @soft_reset_cnt: number of soft reset since KMD loading. @@ -1128,11 +1130,12 @@ struct hl_device { struct hl_ctx *user_ctx; atomic64_t dram_used_mem; + u64 timeout_jiffies; + u64 max_power; atomic_t in_reset; atomic_t curr_pll_profile; atomic_t fd_open_cnt; - u64 timeout_jiffies; - u64 max_power; + atomic_t cs_active_cnt; u32 major; u32 high_pll; u32 soft_reset_cnt; diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index 67bece26417c..ef3bb6951360 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c @@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) spin_unlock(&hdev->hw_queues_mirror_lock); } - list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { + atomic_inc(&hdev->cs_active_cnt); + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) if (job->ext_queue) ext_hw_queue_schedule_job(job); else int_hw_queue_schedule_job(job); - } cs->submitted = true; diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 3a12fd1a5274..ce1fda40a8b8 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, struct hl_device *hdev = ctx->hdev; struct hl_vm *vm = &hdev->vm; struct hl_vm_phys_pg_pack *phys_pg_pack; - u64 paddr = 0; - u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; - int handle, rc, i; + u64 paddr = 0, total_size, num_pgs, i; + u32 num_curr_pgs, page_size, page_shift; + int handle, rc; bool contiguous; num_curr_pgs = 0; @@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); if (!paddr) { dev_err(hdev->dev, - "failed to allocate %u huge contiguous pages\n", + "failed to allocate %llu huge contiguous pages\n", num_pgs); return -ENOMEM; } @@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, phys_pg_pack->flags = args->flags; phys_pg_pack->contiguous = contiguous; - phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); + phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); if (!phys_pg_pack->pages) { rc = -ENOMEM; goto pages_arr_err; @@ -148,7 +148,7 @@ page_err: gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], page_size); - kfree(phys_pg_pack->pages); + kvfree(phys_pg_pack->pages); pages_arr_err: kfree(phys_pg_pack); pages_pack_err: @@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev, struct hl_vm_phys_pg_pack *phys_pg_pack) { struct hl_vm *vm = &hdev->vm; - int i; + u64 i; if (!phys_pg_pack->created_from_userptr) { if (phys_pg_pack->contiguous) { @@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev, } } - kfree(phys_pg_pack->pages); + kvfree(phys_pg_pack->pages); kfree(phys_pg_pack); } @@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev, * - Return the start address of the virtual block */ static u64 get_va_block(struct hl_device *hdev, - struct hl_va_range *va_range, u32 size, u64 hint_addr, + struct hl_va_range *va_range, u64 size, u64 hint_addr, bool is_userptr) { struct hl_vm_va_block *va_block, *new_va_block = NULL; @@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev, } if (!new_va_block) { - dev_err(hdev->dev, "no available va block for size %u\n", size); + dev_err(hdev->dev, "no available va block for size %llu\n", + size); goto out; } @@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, struct hl_vm_phys_pg_pack *phys_pg_pack; struct scatterlist *sg; dma_addr_t dma_addr; - u64 page_mask; - u32 npages, total_npages, page_size = PAGE_SIZE; + u64 page_mask, total_npages; + u32 npages, page_size = PAGE_SIZE; bool first = true, is_huge_page_opt = true; int rc, i, j; @@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, page_mask = ~(((u64) page_size) - 1); - phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); + phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), + GFP_KERNEL); if (!phys_pg_pack->pages) { rc = -ENOMEM; goto page_pack_arr_mem_err; @@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, struct hl_vm_phys_pg_pack *phys_pg_pack) { struct hl_device *hdev = ctx->hdev; - u64 next_vaddr = vaddr, paddr; + u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i; u32 page_size = phys_pg_pack->page_size; - int i, rc = 0, mapped_pg_cnt = 0; + int rc = 0; for (i = 0 ; i < phys_pg_pack->npages ; i++) { paddr = phys_pg_pack->pages[i]; @@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); if (rc) { dev_err(hdev->dev, - "map failed for handle %u, npages: %d, mapped: %d", + "map failed for handle %u, npages: %llu, mapped: %llu", phys_pg_pack->handle, phys_pg_pack->npages, mapped_pg_cnt); goto err; @@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) struct hl_vm_hash_node *hnode = NULL; struct hl_userptr *userptr = NULL; enum vm_type_t *vm_type; - u64 next_vaddr; + u64 next_vaddr, i; u32 page_size; bool is_userptr; - int i, rc; + int rc; /* protect from double entrance */ mutex_lock(&ctx->mem_hash_lock); diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c index 2f2e99cb2743..3a5a2cec8305 100644 --- a/drivers/misc/habanalabs/mmu.c +++ b/drivers/misc/habanalabs/mmu.c @@ -832,7 +832,7 @@ err: int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) { struct hl_device *hdev = ctx->hdev; - u64 real_virt_addr; + u64 real_virt_addr, real_phys_addr; u32 real_page_size, npages; int i, rc, mapped_cnt = 0; @@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) npages = page_size / real_page_size; real_virt_addr = virt_addr; + real_phys_addr = phys_addr; for (i = 0 ; i < npages ; i++) { - rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, + rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr, real_page_size); if (rc) goto err; real_virt_addr += real_page_size; + real_phys_addr += real_page_size; mapped_cnt++; } diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index c712b7deb3a9..7c8f203f9a24 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -48,7 +48,6 @@ struct alcor_sdmmc_host { struct mmc_command *cmd; struct mmc_data *data; unsigned int dma_on:1; - unsigned int early_data:1; struct mutex cmd_mutex; @@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host) host->sg_count--; } -static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, - bool early) +static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host) { struct alcor_pci_priv *priv = host->alcor_pci; struct mmc_data *data = host->data; @@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, ctrl |= AU6601_DATA_WRITE; if (data->host_cookie == COOKIE_MAPPED) { - if (host->early_data) { - host->early_data = false; - return; - } - - host->early_data = early; - alcor_data_set_dma(host); ctrl |= AU6601_DATA_DMA_MODE; host->dma_on = 1; @@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host) static void alcor_prepare_data(struct alcor_sdmmc_host *host, struct mmc_command *cmd) { + struct alcor_pci_priv *priv = host->alcor_pci; struct mmc_data *data = cmd->data; if (!data) @@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host, if (data->host_cookie != COOKIE_MAPPED) alcor_prepare_sg_miter(host); - alcor_trigger_data_transfer(host, true); + alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL); } static void alcor_send_cmd(struct alcor_sdmmc_host *host, @@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask) if (!host->data) return false; - alcor_trigger_data_transfer(host, false); + alcor_trigger_data_transfer(host); host->cmd = NULL; return true; } @@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) if (!host->data) alcor_request_complete(host, 1); else - alcor_trigger_data_transfer(host, false); + alcor_trigger_data_transfer(host); host->cmd = NULL; } @@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) break; case AU6601_INT_READ_BUF_RDY: alcor_trf_block_pio(host, true); - if (!host->blocks) - break; - alcor_trigger_data_transfer(host, false); return 1; case AU6601_INT_WRITE_BUF_RDY: alcor_trf_block_pio(host, false); - if (!host->blocks) - break; - alcor_trigger_data_transfer(host, false); return 1; case AU6601_INT_DMA_END: if (!host->sg_count) @@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) break; } - if (intmask & AU6601_INT_DATA_END) - return 0; + if (intmask & AU6601_INT_DATA_END) { + if (!host->dma_on && host->blocks) { + alcor_trigger_data_transfer(host); + return 1; + } else { + return 0; + } + } return 1; } @@ -1044,14 +1036,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host) mmc->caps2 = MMC_CAP2_NO_SDIO; mmc->ops = &alcor_sdc_ops; - /* Hardware cannot do scatter lists */ + /* The hardware does DMA data transfer of 4096 bytes to/from a single + * buffer address. Scatterlists are not supported, but upon DMA + * completion (signalled via IRQ), the original vendor driver does + * then immediately set up another DMA transfer of the next 4096 + * bytes. + * + * This means that we need to handle the I/O in 4096 byte chunks. + * Lacking a way to limit the sglist entries to 4096 bytes, we instead + * impose that only one segment is provided, with maximum size 4096, + * which also happens to be the minimum size. This means that the + * single-entry sglist handled by this driver can be handed directly + * to the hardware, nice and simple. + * + * Unfortunately though, that means we only do 4096 bytes I/O per + * MMC command. A future improvement would be to make the driver + * accept sg lists and entries of any size, and simply iterate + * through them 4096 bytes at a time. + */ mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; - - mmc->max_blk_size = mmc->max_seg_size; - mmc->max_blk_count = mmc->max_segs; - - mmc->max_req_size = mmc->max_seg_size * mmc->max_segs; + mmc->max_req_size = mmc->max_seg_size; } static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 49e0daf2ef5e..f37003df1e01 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { } #endif -static void __init init_mmcsd_host(struct mmc_davinci_host *host) +static void init_mmcsd_host(struct mmc_davinci_host *host) { mmc_davinci_reset_ctrl(host, 1); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index d54612257b06..45f7b9b53d48 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data) struct scatterlist *sg; int i; - for_each_sg(data->sg, sg, data->sg_len, i) { - void *buf = kmap_atomic(sg_page(sg) + sg->offset); - buffer_swap32(buf, sg->length); - kunmap_atomic(buf); - } + for_each_sg(data->sg, sg, data->sg_len, i) + buffer_swap32(sg_virt(sg), sg->length); } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} @@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) { struct mmc_data *data = host->req->data; struct scatterlist *sg; - void *buf; int stat, i; host->data = data; @@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) if (data->flags & MMC_DATA_READ) { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_pull(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_pull(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; } } else { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_push(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_push(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index c907bf502a12..c1d3f0e38921 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param); static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) { struct dma_async_tx_descriptor *tx; - enum dma_data_direction direction; + enum dma_transfer_direction direction; struct dma_slave_config config; struct dma_chan *chan; unsigned int nob = data->blocks; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 71e13844df6c..8742e27e4e8b 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, struct renesas_sdhi *priv; struct resource *res; int irq, ret, i; + u16 ver; of_data = of_device_get_match_data(&pdev->dev); @@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret) goto efree; + ver = sd_ctrl_read16(host, CTL_VERSION); + /* GEN2_SDR104 is first known SDHI to use 32bit block count */ + if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX) + mmc_data->max_blk_count = U16_MAX; + ret = tmio_mmc_host_probe(host); if (ret < 0) goto edisclk; /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ - if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) + if (ver == SDHI_VER_GEN2_SDR50) mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; /* Enable tuning iff we have an SCC and a supported mode */ diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index b1a66ca3821a..9f20fff9781b 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); } +#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\ + SDHCI_INT_TIMEOUT) +#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE) + +static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->is_tuning && host->cmd && !host->data_early && + (intmask & CMD_ERR_MASK)) { + + /* + * Since we are not resetting data lines during tuning + * operation, data error or data complete interrupts + * might still arrive. Mark this request as a failure + * but still wait for the data interrupt + */ + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else + host->cmd->error = -EILSEQ; + + host->cmd = NULL; + + /* + * Sometimes command error interrupts and command complete + * interrupt will arrive together. Clear all command related + * interrupts here. + */ + sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS); + intmask &= ~CMD_MASK; + } + + return intmask; +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = { .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, + .irq = sdhci_omap_irq, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) @@ -1056,6 +1094,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) mmc->f_max = 48000000; } + if (!mmc_can_gpio_ro(mmc)) + mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + pltfm_host->clk = devm_clk_get(dev, "fck"); if (IS_ERR(pltfm_host->clk)) { ret = PTR_ERR(pltfm_host->clk); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 72428b6bfc47..7b7286b4d81e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)) + /* + * We check "time_after" and "!chip_good" before checking "chip_good" to avoid + * the failure due to scheduling. + */ + if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) break; if (chip_good(map, adr, datum)) { diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index f38e5c1b87e4..d984538980e2 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip, struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); u32 ndcr_generic; - if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die) - return; - - writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0); - writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1); - /* * Reset the NDCR register to a clean state for this particular chip, * also clear ND_RUN bit. @@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip, /* Also reset the interrupt status register */ marvell_nfc_clear_int(nfc, NDCR_ALL_INT); + if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die) + return; + + writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0); + writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1); + nfc->selected_chip = chip; marvell_nand->selected_die = die_nr; } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5e4ca082cfcd..7a96d168efc4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -216,8 +216,8 @@ config GENEVE config GTP tristate "GPRS Tunneling Protocol datapath (GTP-U)" - depends on INET && NET_UDP_TUNNEL - select NET_IP_TUNNEL + depends on INET + select NET_UDP_TUNNEL ---help--- This allows one to create gtp virtual interfaces that provide the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b59708c35faf..ee610721098e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this, return NOTIFY_DONE; if (event_dev->flags & IFF_MASTER) { + int ret; + netdev_dbg(event_dev, "IFF_MASTER\n"); - return bond_master_netdev_event(event, event_dev); + ret = bond_master_netdev_event(event, event_dev); + if (ret != NOTIFY_DONE) + return ret; } if (event_dev->flags & IFF_SLAVE) { diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) { - return sprintf(buf, "%pM\n", slave->perm_hwaddr); + return sprintf(buf, "%*phC\n", + slave->dev->addr_len, + slave->perm_hwaddr); } static SLAVE_ATTR_RO(perm_hwaddr); diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index e6234d209787..4212bc4a5f31 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, fs->m_ext.data[1])) return -EINVAL; + if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) + return -EINVAL; + if (fs->location != RX_CLS_LOC_ANY && test_bit(fs->location, priv->cfp.used)) return -EBUSY; @@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) struct cfp_rule *rule; int ret; + if (loc >= CFP_NUM_RULES) + return -EINVAL; + /* Refuse deleting unused rules, and those that are not unique since * that could leave IPv6 rules with one of the chained rule in the * table. diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane < 0) + if (lane < 0 && lane != -ENODEV) return lane; - if (chip->ports[port].serdes_irq) { - err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (lane >= 0) { + if (chip->ports[port].serdes_irq) { + err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (err) + return err; + } + + err = mv88e6390x_serdes_power(chip, port, false); if (err) return err; } - err = mv88e6390x_serdes_power(chip, port, false); - if (err) - return err; + chip->ports[port].cmode = 0; if (cmode) { err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); @@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; + chip->ports[port].cmode = cmode; + + lane = mv88e6390x_serdes_get_lane(chip, port); + if (lane < 0) + return lane; + err = mv88e6390x_serdes_power(chip, port, true); if (err) return err; @@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, } } - chip->ports[port].cmode = cmode; - return 0; } diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 576b37d12a63..c4fa400efdcc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); } +static u32 +qca8k_port_to_phy(int port) +{ + /* From Andrew Lunn: + * Port 0 has no internal phy. + * Port 1 has an internal PHY at MDIO address 0. + * Port 2 has an internal PHY at MDIO address 1. + * ... + * Port 5 has an internal PHY at MDIO address 4. + * Port 6 has no internal PHY. + */ + + return port - 1; +} + +static int +qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data) +{ + u32 phy, val; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + + /* callee is responsible for not passing bad ports, + * but we still would like to make spills impossible. + */ + phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum) | + QCA8K_MDIO_MASTER_DATA(data); + + qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); + + return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); +} + +static int +qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum) +{ + u32 phy, val; + + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) + return -EINVAL; + + /* callee is responsible for not passing bad ports, + * but we still would like to make spills impossible. + */ + phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | + QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | + QCA8K_MDIO_MASTER_REG_ADDR(regnum); + + qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val); + + if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY)) + return -ETIMEDOUT; + + val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) & + QCA8K_MDIO_MASTER_DATA_MASK); + + return val; +} + +static int +qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) +{ + struct qca8k_priv *priv = ds->priv; + + return qca8k_mdio_write(priv, port, regnum, data); +} + +static int +qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct qca8k_priv *priv = ds->priv; + int ret; + + ret = qca8k_mdio_read(priv, port, regnum); + + if (ret < 0) + return 0xffff; + + return ret; +} + +static int +qca8k_setup_mdio_bus(struct qca8k_priv *priv) +{ + u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; + struct device_node *ports, *port; + int err; + + ports = of_get_child_by_name(priv->dev->of_node, "ports"); + if (!ports) + return -EINVAL; + + for_each_available_child_of_node(ports, port) { + err = of_property_read_u32(port, "reg", ®); + if (err) + return err; + + if (!dsa_is_user_port(priv->ds, reg)) + continue; + + if (of_property_read_bool(port, "phy-handle")) + external_mdio_mask |= BIT(reg); + else + internal_mdio_mask |= BIT(reg); + } + + if (!external_mdio_mask && !internal_mdio_mask) { + dev_err(priv->dev, "no PHYs are defined.\n"); + return -EINVAL; + } + + /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through + * the MDIO_MASTER register also _disconnects_ the external MDC + * passthrough to the internal PHYs. It's not possible to use both + * configurations at the same time! + * + * Because this came up during the review process: + * If the external mdio-bus driver is capable magically disabling + * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's + * accessors for the time being, it would be possible to pull this + * off. + */ + if (!!external_mdio_mask && !!internal_mdio_mask) { + dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); + return -EINVAL; + } + + if (external_mdio_mask) { + /* Make sure to disable the internal mdio bus in cases + * a dt-overlay and driver reload changed the configuration + */ + + qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_EN); + return 0; + } + + priv->ops.phy_read = qca8k_phy_read; + priv->ops.phy_write = qca8k_phy_write; + return 0; +} + static int qca8k_setup(struct dsa_switch *ds) { @@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds) if (IS_ERR(priv->regmap)) pr_warn("regmap initialization failed"); + ret = qca8k_setup_mdio_bus(priv); + if (ret) + return ret; + /* Initialize CPU port pad mode (xMII type, delays...) */ phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); if (phy_mode < 0) { @@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) qca8k_port_set_status(priv, port, 1); } -static int -qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_read(priv->bus, phy, regnum); -} - -static int -qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_write(priv->bus, phy, regnum, val); -} - static void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { @@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .setup = qca8k_setup, .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, - .phy_read = qca8k_phy_read, - .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, .get_mac_eee = qca8k_get_mac_eee, @@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->ds->priv = priv; - priv->ds->ops = &qca8k_switch_ops; + priv->ops = qca8k_switch_ops; + priv->ds->ops = &priv->ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index d146e54c8a6c..249fd62268e5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -49,6 +49,18 @@ #define QCA8K_MIB_FLUSH BIT(24) #define QCA8K_MIB_CPU_KEEP BIT(20) #define QCA8K_MIB_BUSY BIT(17) +#define QCA8K_MDIO_MASTER_CTRL 0x3c +#define QCA8K_MDIO_MASTER_BUSY BIT(31) +#define QCA8K_MDIO_MASTER_EN BIT(30) +#define QCA8K_MDIO_MASTER_READ BIT(27) +#define QCA8K_MDIO_MASTER_WRITE 0 +#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) +#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) +#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) +#define QCA8K_MDIO_MASTER_DATA(x) (x) +#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) +#define QCA8K_MDIO_MASTER_MAX_PORTS 5 +#define QCA8K_MDIO_MASTER_MAX_REG 32 #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) @@ -169,6 +181,7 @@ struct qca8k_priv { struct dsa_switch *ds; struct mutex reg_mutex; struct device *dev; + struct dsa_switch_ops ops; }; struct qca8k_mib_desc { diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 808abb6b3671..b15752267c8d 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev) static void set_rx_mode(struct net_device *dev) { int ioaddr = dev->base_addr; - short new_mode; + unsigned short new_mode; if (dev->flags & IFF_PROMISC) { if (corkscrew_debug > 3) diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index 342ae08ec3c2..d60a86aa8aa8 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count, static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); -#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) - /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); @@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) static enum mac8390_access mac8390_testio(unsigned long membase) { - unsigned long outdata = 0xA5A0B5B0; - unsigned long indata = 0x00000000; + u32 outdata = 0xA5A0B5B0; + u32 indata = 0; + /* Try writing 32 bits */ - memcpy_toio((void __iomem *)membase, &outdata, 4); - /* Now compare them */ - if (memcmp_withio(&outdata, membase, 4) == 0) + nubus_writel(outdata, membase); + /* Now read it back */ + indata = nubus_readl(membase); + if (outdata == indata) return ACCESS_32; + + outdata = 0xC5C0D5D0; + indata = 0; + /* Write 16 bit output */ word_memcpy_tocard(membase, &outdata, 4); /* Now read it back */ word_memcpy_fromcard(&indata, membase, 4); if (outdata == indata) return ACCESS_16; + return ACCESS_UNKNOWN; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 74550ccc7a20..e2ffb159cbe2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self, } if (buff->is_ip_cso) { __skb_incr_checksum_unnecessary(skb); - if (buff->is_udp_cso || buff->is_tcp_cso) - __skb_incr_checksum_unnecessary(skb); } else { skb->ip_summed = CHECKSUM_NONE; } + + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); } #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 9e07b469066a..156fbc5601ca 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) adapter->soft_stats.scc += smb->tx_1_col; adapter->soft_stats.mcc += smb->tx_2_col; adapter->soft_stats.latecol += smb->tx_late_col; - adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_underrun += smb->tx_underrun; adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; @@ -3179,7 +3179,7 @@ static struct atl1_stats atl1_gstrings_stats[] = { {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, - {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)}, {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 34a58cd846a0..eacff19ea05b 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -681,7 +681,7 @@ struct atl1_sft_stats { u64 scc; /* packets TX after a single collision */ u64 mcc; /* packets TX after multiple collisions */ u64 latecol; /* TX packets w/ late collisions */ - u64 tx_underun; /* TX packets aborted due to TX FIFO underrun + u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun * or TRD FIFO underrun */ u64 tx_trunc; /* TX packets truncated due to size > MTU */ u64 rx_pause; /* num Pause packets received. */ diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index d99317b3d891..98da0fa27192 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter) netdev->stats.tx_aborted_errors++; if (txs->late_col) netdev->stats.tx_window_errors++; - if (txs->underun) + if (txs->underrun) netdev->stats.tx_fifo_errors++; } while (1); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h index c64a6bdfa7ae..25ec84cb4853 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.h +++ b/drivers/net/ethernet/atheros/atlx/atl2.h @@ -260,7 +260,7 @@ struct tx_pkt_status { unsigned multi_col:1; unsigned late_col:1; unsigned abort_col:1; - unsigned underun:1; /* current packet is aborted + unsigned underrun:1; /* current packet is aborted * due to txram underrun */ unsigned:3; /* reserved */ unsigned update:1; /* always 1'b1 in tx_status_buf */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..10ff37d6dc78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) bnx2x_sample_bulletin(bp); if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { - BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); + BNX2X_ERR("Hypervisor will decline the request, avoiding\n"); rc = -EINVAL; goto out; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..52ade133b57c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info = &rxr->rx_tpa[agg_id]; if (unlikely(cons != rxr->rx_next_cons)) { + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return; } @@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } cons = rxcmp->rx_cmp_opaque; - rx_buf = &rxr->rx_buf_ring[cons]; - data = rx_buf->data; - data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return rc1; } + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + data_ptr = rx_buf->data_ptr; prefetch(data_ptr); misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); @@ -1610,12 +1614,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); + bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); rc = -EIO; - goto next_rx; + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); + bnxt_sched_reset(bp, rxr); + } + goto next_rx_no_len; } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; @@ -1696,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = 1; next_rx: - rxr->rx_prod = NEXT_RX(prod); - rxr->rx_next_cons = NEXT_RX(cons); - cpr->rx_packets += 1; cpr->rx_bytes += len; +next_rx_no_len: + rxr->rx_prod = NEXT_RX(prod); + rxr->rx_next_cons = NEXT_RX(cons); + next_rx_no_prod_no_len: *raw_cons = tmp_raw_cons; @@ -5125,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, close_path ? cmpl_ring_id : @@ -5141,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_RX, close_path ? cmpl_ring_id : @@ -5163,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, type, close_path ? cmpl_ring_id : INVALID_HW_RING_ID); @@ -5305,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_tx_rings = cpu_to_le16(tx_rings); if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5) { enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; enables |= tx_rings + ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; } else { enables |= cp_rings ? - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= ring_grps ? FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -5355,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5) { enables |= tx_rings + ring_grps ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; } else { enables |= cp_rings ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; } @@ -6743,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + u32 tx_stat_size; int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) @@ -6752,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext)); + tx_stat_size = bp->hw_tx_port_stats_ext ? + sizeof(*bp->hw_tx_port_stats_ext) : 0; + req.tx_stat_size = cpu_to_le16(tx_stat_size); req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; - bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8; + bp->fw_tx_stats_ext_size = tx_stat_size ? + le16_to_cpu(resp->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; @@ -8951,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) skip_uc: rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc && vnic->mc_list_count) { + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + } if (rc) - netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", rc); return rc; @@ -10675,6 +10695,7 @@ init_err_cleanup_tc: bnxt_clear_int_mode(bp); init_err_pci_clean: + bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..060a6f386104 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp) pci_set_power_state(tp->pdev, PCI_D3hot); } -static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) { switch (val & MII_TG3_AUX_STAT_SPDMASK) { case MII_TG3_AUX_STAT_10HALF: @@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) bool current_link_up; u32 bmsr, val; u32 lcl_adv, rmt_adv; - u16 current_speed; + u32 current_speed; u8 current_duplex; int i, err; @@ -5719,7 +5719,7 @@ out: static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) { u32 orig_pause_cfg; - u16 orig_active_speed; + u32 orig_active_speed; u8 orig_active_duplex; u32 mac_status; bool current_link_up; @@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) { int err = 0; u32 bmsr, bmcr; - u16 current_speed = SPEED_UNKNOWN; + u32 current_speed = SPEED_UNKNOWN; u8 current_duplex = DUPLEX_UNKNOWN; bool current_link_up = false; u32 local_adv, remote_adv, sgsr; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b685c..6953d0546acb 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info { struct tg3_link_config { /* Describes what we're trying to get. */ u32 advertising; - u16 speed; + u32 speed; u8 duplex; u8 autoneg; u8 flowctrl; @@ -2882,7 +2882,7 @@ struct tg3_link_config { u8 active_flowctrl; u8 active_duplex; - u16 active_speed; + u32 active_speed; u32 rmt_adv; }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ad099fd01b45..3da2795e2486 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { - if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + gem_ptp_do_txstamp(queue, skb, desc) == 0) { /* skb now belongs to timestamp buffer * and will be removed later */ @@ -3370,14 +3372,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, *hclk = devm_clk_get(&pdev->dev, "hclk"); } - if (IS_ERR(*pclk)) { + if (IS_ERR_OR_NULL(*pclk)) { err = PTR_ERR(*pclk); + if (!err) + err = -ENODEV; + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); return err; } - if (IS_ERR(*hclk)) { + if (IS_ERR_OR_NULL(*hclk)) { err = PTR_ERR(*hclk); + if (!err) + err = -ENODEV; + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); return err; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aa2be4807191..c032bef1b776 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -32,6 +32,13 @@ #define DRV_NAME "nicvf" #define DRV_VERSION "1.0" +/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs + * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed + * this value, keeping headroom for the 14 byte Ethernet header and two + * VLAN tags (for QinQ) + */ +#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) + /* Supported devices */ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, @@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; - cancel_delayed_work_sync(&nic->link_change_work); - /* wait till all queued set_rx_mode tasks completes */ - drain_workqueue(nic->nicvf_rx_mode_wq); + if (nic->nicvf_rx_mode_wq) { + cancel_delayed_work_sync(&nic->link_change_work); + drain_workqueue(nic->nicvf_rx_mode_wq); + } mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; /* wait till all queued set_rx_mode tasks completes if any */ - drain_workqueue(nic->nicvf_rx_mode_wq); + if (nic->nicvf_rx_mode_wq) + drain_workqueue(nic->nicvf_rx_mode_wq); netif_carrier_off(netdev); @@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev) /* Send VF config done msg to PF */ nicvf_send_cfg_done(nic); - INIT_DELAYED_WORK(&nic->link_change_work, - nicvf_link_status_check_task); - queue_delayed_work(nic->nicvf_rx_mode_wq, - &nic->link_change_work, 0); + if (nic->nicvf_rx_mode_wq) { + INIT_DELAYED_WORK(&nic->link_change_work, + nicvf_link_status_check_task); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 0); + } return 0; cleanup: @@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) struct nicvf *nic = netdev_priv(netdev); int orig_mtu = netdev->mtu; + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } + netdev->mtu = new_mtu; if (!netif_running(netdev)) @@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool bpf_attached = false; int ret = 0; - /* For now just support only the usual MTU sized frames */ - if (prog && (dev->mtu > 1500)) { + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (prog && dev->mtu > MAX_XDP_MTU) { netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", dev->mtu); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5b4d3badcb73..e246f9733bb8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, /* Check if page can be recycled */ if (page) { ref_count = page_ref_count(page); - /* Check if this page has been used once i.e 'put_page' - * called after packet transmission i.e internal ref_count - * and page's ref_count are equal i.e page can be recycled. + /* This page can be recycled if internal ref_count and page's + * ref_count are equal, indicating that the page has been used + * once for packet transmission. For non-XDP mode, internal + * ref_count is always '1'. */ - if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) - pgcache->ref_count--; - else - page = NULL; - - /* In non-XDP mode, page's ref_count needs to be '1' for it - * to be recycled. - */ - if (!rbdr->is_xdp && (ref_count != 1)) + if (rbdr->is_xdp) { + if (ref_count == pgcache->ref_count) + pgcache->ref_count--; + else + page = NULL; + } else if (ref_count != 1) { page = NULL; + } } if (!page) { @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) while (head < rbdr->pgcnt) { pgcache = &rbdr->pgcache[head]; if (pgcache->page && page_ref_count(pgcache->page) != 0) { - if (!rbdr->is_xdp) { - put_page(pgcache->page); - continue; + if (rbdr->is_xdp) { + page_ref_sub(pgcache->page, + pgcache->ref_count - 1); } - page_ref_sub(pgcache->page, pgcache->ref_count - 1); put_page(pgcache->page); } head++; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 3130b43bba52..02959035ed3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) } /* should never happen! */ - BUG_ON(1); + BUG(); return NULL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 88773ca58e6b..b3da81e90132 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter, break; default: - BUG_ON(1); + BUG(); } return buf_size; diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, ppmax = max; /* pool size must be multiple of unsigned long */ - bmap = BITS_TO_LONGS(ppmax); + bmap = ppmax / BITS_PER_TYPE(unsigned long); + if (!bmap) + return NULL; + ppmax = (bmap * sizeof(unsigned long)) << 3; alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, if (reserve_factor) { ppmax_pool = ppmax / reserve_factor; pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + if (!pool) { + ppmax_pool = 0; + reserve_factor = 0; + } pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", ndev->name, ppmax, ppmax_pool, pool_index_max); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 2ba49e959c3f..dc339dc1adb2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) */ queue_mapping = skb_get_queue_mapping(skb); fq = &priv->fq[queue_mapping]; + + fd_len = dpaa2_fd_get_len(&fd); + nq = netdev_get_tx_queue(net_dev, queue_mapping); + netdev_tx_sent_queue(nq, fd_len); + + /* Everything that happens after this enqueues might race with + * the Tx confirmation callback for this frame + */ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) @@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ free_tx_fd(priv, fq, &fd, false); + netdev_tx_completed_queue(nq, 1, fd_len); } else { - fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; percpu_stats->tx_bytes += fd_len; - - nq = netdev_get_tx_queue(net_dev, queue_mapping); - netdev_tx_sent_queue(nq, fd_len); } return NETDEV_TX_OK; @@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, dpaa2_fd_set_format(&fd, dpaa2_fd_single); dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); - fq = &priv->fq[smp_processor_id()]; + fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 697c2427f2b7..a96ad20ee484 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) int ret; if (enable) { - ret = clk_prepare_enable(fep->clk_ahb); - if (ret) - return ret; - ret = clk_prepare_enable(fep->clk_enet_out); if (ret) - goto failed_clk_enet_out; + return ret; if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) phy_reset_after_clk_enable(ndev->phydev); } else { - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1885,8 +1880,6 @@ failed_clk_ref: failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); -failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ahb); return ret; } @@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) ret = clk_prepare_enable(fep->clk_ipg); if (ret) goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -3563,6 +3559,9 @@ failed_reset: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: @@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); return 0; @@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; - return clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; } static const struct dev_pm_ops fec_pm_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -150,7 +150,6 @@ out_buffer_fail: /* free desc along with its attached buffer */ static void hnae_free_desc(struct hnae_ring *ring) { - hnae_free_buffers(ring); dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, ring->desc_num * sizeof(ring->desc[0]), ring_to_dma_dir(ring)); @@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) /* fini ring, also free the buffer for the ring */ static void hnae_fini_ring(struct hnae_ring *ring) { + if (is_rx_ring(ring)) + hnae_free_buffers(ring); + hnae_free_desc(ring); kfree(ring->desc_cb); ring->desc_cb = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c4..d6fb83437230 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -357,7 +357,7 @@ struct hnae_buf_ops { }; struct hnae_queue { - void __iomem *io_base; + u8 __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a97228c93831..6c0507921623 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) static void hns_mac_param_get(struct mac_params *param, struct hns_mac_cb *mac_cb) { - param->vaddr = (void *)mac_cb->vaddr; + param->vaddr = mac_cb->vaddr; param->mac_mode = hns_get_enet_interface(mac_cb); ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); param->mac_id = mac_cb->mac_id; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef7..22589799f1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -187,7 +187,7 @@ struct mac_statistics { /*mac para struct ,mac get param from nic or dsaf when initialize*/ struct mac_params { char addr[ETH_ALEN]; - void *vaddr; /*virtual address*/ + u8 __iomem *vaddr; /*virtual address*/ struct device *dev; u8 mac_id; /**< Ethernet operation mode (MAC-PHY interface and speed) */ @@ -402,7 +402,7 @@ struct mac_driver { enum mac_mode mac_mode; u8 mac_id; struct hns_mac_cb *mac_cb; - void __iomem *io_base; + u8 __iomem *io_base; unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ unsigned int virt_dev_num; struct device *dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..61eea6ac846f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); - - mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); } /** @@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( /* default config dvc to 0 */ mac_data.tbl_ucast_dvc = 0; mac_data.tbl_ucast_out_port = mac_entry->port_num; - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); @@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; /* config mc entry with mask */ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* config key mask */ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, soft_mac_entry += entry_index; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; } else { /* not zero, just del port, update */ - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) return DSAF_DUMP_REGS_NUM; } +static int hns_dsaf_get_port_id(u8 port) +{ + if (port < DSAF_SERVICE_NW_NUM) + return port; + + if (port >= DSAF_BASE_INNER_PORT_NUM) + return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; + + return -EINVAL; +} + static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) { struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; @@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) memset(&temp_key, 0x0, sizeof(temp_key)); mask_entry.addr[0] = 0x01; hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, - port, mask_entry.addr); + 0xf, mask_entry.addr); tbl_tcam_mcast.tbl_mcast_item_vld = 1; tbl_tcam_mcast.tbl_mcast_old_en = 0; - if (port < DSAF_SERVICE_NW_NUM) { - mskid = port; - } else if (port >= DSAF_BASE_INNER_PORT_NUM) { - mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; - } else { + /* set MAC port to handle multicast */ + mskid = hns_dsaf_get_port_id(port); + if (mskid == -EINVAL) { dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dsaf_dev->ae_dev.name, port, mask_key.high.val, mask_key.low.val); return; } + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1); + /* set pool bit map to handle multicast */ + mskid = hns_dsaf_get_port_id(port_num); + if (mskid == -EINVAL) { + dev_err(dsaf_dev->dev, + "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port_num, + mask_key.high.val, mask_key.low.val); + return; + } dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1); + memcpy(&temp_key, &mask_key, sizeof(mask_key)); hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, (struct dsaf_tbl_tcam_data *)(&mask_key), diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6..76cc8887e1a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); + #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c954..19b94879691f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); } else { - u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + + u8 __iomem *base_addr = mac_cb->serdes_vaddr + (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3d07c8a7639d..17c019106e6e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, } } -static void __iomem * +static u8 __iomem * hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) { return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; @@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) dsaf_dev->ppe_common[comm_index] = NULL; } -static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, - int ppe_idx) +static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, + int ppe_idx) { return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a01..110c6e8222c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -80,7 +80,7 @@ struct hns_ppe_cb { struct hns_ppe_hw_stats hw_stats; u8 index; /* index in a ppe common device */ - void __iomem *io_base; + u8 __iomem *io_base; int virq; u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ @@ -89,7 +89,7 @@ struct hns_ppe_cb { struct ppe_common_cb { struct device *dev; struct dsaf_device *dsaf_dev; - void __iomem *io_base; + u8 __iomem *io_base; enum ppe_common_mode ppe_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6bf346c11b25..ac3518ca4d7b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; - ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + + ring->io_base = ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : @@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) +static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b9733b0b8482..b9e7f11f0896 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1018,7 +1018,7 @@ #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 -static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) +static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) { writel(value, base + reg); } @@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) #define dsaf_set_bit(origin, shift, val) \ dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) -static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = dsaf_read_reg(base, reg); @@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, #define dsaf_get_bit(origin, shift) \ dsaf_get_field((origin), (1ull << (shift)), (shift)) -static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; @@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) #define dsaf_write_b(addr, data)\ - writeb((data), (__iomem unsigned char *)(addr)) + writeb((data), (__iomem u8 *)(addr)) #define dsaf_read_b(addr)\ - readb((__iomem unsigned char *)(addr)) + readb((__iomem u8 *)(addr)) #define hns_mac_reg_read64(drv, offset) \ - readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) + readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) #endif /* _DSAF_REG_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); - dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); + dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 60e7d7ae3787..4cd86ba1f050 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -29,9 +29,6 @@ #define SERVICE_TIMER_HZ (1 * HZ) -#define NIC_TX_CLEAN_MAX_NUM 256 -#define NIC_RX_CLEAN_MAX_NUM 64 - #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 #define HNS_BUFFER_SIZE_2048 2048 @@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); - ring->stats.tx_pkts++; - ring->stats.tx_bytes += skb->len; return NETDEV_TX_OK; @@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, /* issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); } + /* update tx ring statistics. */ + ring->stats.tx_pkts += pkts; + ring->stats.tx_bytes += bytes; NETIF_TX_UNLOCK(ring); @@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_tx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } for (i = h->q_num; i < h->q_num * 2; i++) { @@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_rx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1c1f17ec6be2..162cb9afa0e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -22,6 +22,7 @@ #include "hns3_enet.h" #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) +#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); @@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc_cb->length = size; - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; @@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + buf_num = hns3_tx_bd_count(size); frag_num = skb_shinfo(skb)->nr_frags; for (i = 0; i < frag_num; i++) { frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> - HNS3_MAX_BD_SIZE_OFFSET; + bdnum_for_frag = hns3_tx_bd_count(size); if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; @@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, } if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> - HNS3_MAX_BD_SIZE_OFFSET; + buf_num = hns3_tx_bd_count(skb->len); if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ @@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = hns3_tx_bd_count(skb->len); if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1db0bd41d209..75669cd0c311 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -193,7 +193,6 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_SIZE_OFFSET 16 #define HNS3_MAX_BD_PER_FRAG 8 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index baf5cc251f32..8b8a7d00e8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { }; struct hns_mdio_device { - void *vbase; /* mdio reg base address */ + u8 __iomem *vbase; /* mdio reg base address */ struct regmap *subctrl_vbase; struct hns_mdio_sc_reg sc_reg; }; @@ -96,21 +96,17 @@ enum mdio_c45_op_seq { #define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_RESET_ST 0x5A1C -static void mdio_write_reg(void *base, u32 reg, u32 value) +static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - writel_relaxed(value, reg_addr + reg); + writel_relaxed(value, base + reg); } #define MDIO_WRITE_REG(a, reg, value) \ mdio_write_reg((a)->vbase, (reg), (value)) -static u32 mdio_read_reg(void *base, u32 reg) +static u32 mdio_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - return readl_relaxed(reg_addr + reg); + return readl_relaxed(base + reg); } #define mdio_set_field(origin, mask, shift, val) \ @@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) -static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, +static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = mdio_read_reg(base, reg); @@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) -static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) +static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3baabdc89726..90b62c1412c8 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev, if (ehea_add_adapter_mr(adapter)) { pr_err("creating MR failed\n"); + of_node_put(eth_dn); return -EIO; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ecbb1adcf3b..3dfb2d131eb7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; + reinit_completion(&adapter->init_done); rc = init_crq_queue(adapter); if (rc) { netdev_err(adapter->netdev, @@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; + netdev_features_t old_hw_features = 0; union ibmvnic_crq crq; int i; @@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) adapter->ip_offload_ctrl.large_rx_ipv4 = 0; adapter->ip_offload_ctrl.large_rx_ipv6 = 0; - adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; + if (adapter->state != VNIC_PROBING) { + old_hw_features = adapter->netdev->hw_features; + adapter->netdev->hw_features = 0; + } + + adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) - adapter->netdev->features |= NETIF_F_IP_CSUM; + adapter->netdev->hw_features |= NETIF_F_IP_CSUM; if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) - adapter->netdev->features |= NETIF_F_IPV6_CSUM; + adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; if ((adapter->netdev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) - adapter->netdev->features |= NETIF_F_RXCSUM; + adapter->netdev->hw_features |= NETIF_F_RXCSUM; if (buf->large_tx_ipv4) - adapter->netdev->features |= NETIF_F_TSO; + adapter->netdev->hw_features |= NETIF_F_TSO; if (buf->large_tx_ipv6) - adapter->netdev->features |= NETIF_F_TSO6; + adapter->netdev->hw_features |= NETIF_F_TSO6; + + if (adapter->state == VNIC_PROBING) { + adapter->netdev->features |= adapter->netdev->hw_features; + } else if (old_hw_features != adapter->netdev->hw_features) { + netdev_features_t tmp = 0; - adapter->netdev->hw_features |= adapter->netdev->features; + /* disable features no longer supported */ + adapter->netdev->features &= adapter->netdev->hw_features; + /* turn on features now supported if previously enabled */ + tmp = (old_hw_features ^ adapter->netdev->hw_features) & + adapter->netdev->hw_features; + adapter->netdev->features |= + tmp & adapter->netdev->wanted_features; + } memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; @@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; - init_completion(&adapter->init_done); + reinit_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) adapter->from_passive_init = false; - init_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + init_completion(&adapter->init_done); adapter->resetting = false; adapter->mac_change_pending = false; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..ecef949f3baa 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) /* create driver workqueue */ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name); + if (!fm10k_workqueue) + return -ENOMEM; fm10k_dbg_init(); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d684998ba2b0..d3cc3427caad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -790,6 +790,8 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) return !!vsi->xdp_prog; } -static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) -{ - bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); - int qid = ring->queue_index; - - if (ring_is_xdp(ring)) - qid -= ring->vsi->alloc_queue_pairs; - - if (!xdp_on) - return NULL; - - return xdp_get_umem_from_qid(ring->vsi->netdev, qid); -} - int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4c885801fa26..7874d0ec7fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; /* only magic packet is supported */ - if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) - | (wol->wolopts != WAKE_FILTER)) + if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; /* is this a new value? */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da62218eb70a..b1c265012c8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) } /** + * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled + * @ring: The Tx or Rx ring + * + * Returns the UMEM or NULL. + **/ +static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) +{ + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); + int qid = ring->queue_index; + + if (ring_is_xdp(ring)) + qid -= ring->vsi->alloc_queue_pairs; + + if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) + return NULL; + + return xdp_get_umem_from_qid(ring->vsi->netdev, qid); +} + +/** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * @@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) hash_init(vsi->mac_filter_hash); vsi->irqs_ready = false; + if (type == I40E_VSI_MAIN) { + vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); + if (!vsi->af_xdp_zc_qps) + goto err_rings; + } + ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; @@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) goto unlock_pf; err_rings: + bitmap_free(vsi->af_xdp_zc_qps); pf->next_vsi = i - 1; kfree(vsi); unlock_pf: @@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); + bitmap_free(vsi->af_xdp_zc_qps); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now; + struct timespec64 now, then; + then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now, NULL); - timespec64_add_ns(&now, delta); + now = timespec64_add(now, then); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b5c182e688e3..1b17486543ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, if (err) return err; + set_bit(qid, vsi->af_xdp_zc_qps); + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); if (if_running) { @@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) return err; } + clear_bit(qid, vsi->af_xdp_zc_qps); i40e_xsk_umem_dma_unmap(vsi, umem); if (if_running) { diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -194,6 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..3269d8e94744 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif + bool wake; rtnl_lock(); netif_device_detach(netdev); @@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - if (!runtime) { - retval = pci_save_state(pdev); - if (retval) - return retval; - } -#endif - status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); @@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, wr32(E1000_WUFC, 0); } - *enable_wake = wufc || adapter->en_mng_pt; - if (!*enable_wake) + wake = wufc || adapter->en_mng_pt; + if (!wake) igb_power_down_link(adapter); else igb_power_up_link(adapter); + if (enable_wake) + *enable_wake = wake; + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) static int __maybe_unused igb_suspend(struct device *dev) { - int retval; - bool wake; - struct pci_dev *pdev = to_pci_dev(dev); - - retval = __igb_shutdown(pdev, &wake, 0); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 0); } static int __maybe_unused igb_resume(struct device *dev) @@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) static int __maybe_unused igb_runtime_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake, 1); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 1); } static int __maybe_unused igb_runtime_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; struct mii_bus *bus; + int err = -ENODEV; - adapter->mii_bus = devm_mdiobus_alloc(dev); - if (!adapter->mii_bus) + bus = devm_mdiobus_alloc(dev); + if (!bus) return -ENOMEM; - bus = adapter->mii_bus; - switch (hw->device_id) { /* C3000 SoCs */ case IXGBE_DEV_ID_X550EM_A_KR: @@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) */ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; - return mdiobus_register(bus); + err = mdiobus_register(bus); + if (!err) { + adapter->mii_bus = bus; + return 0; + } ixgbe_no_mii_bus: devm_mdiobus_free(dev, bus); - adapter->mii_bus = NULL; - return -ENODEV; + return err; } /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 71c65cc17904..d3eaf2ceaa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); * switching channels */ typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_hw_modify hw_modify); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 122927f3a600..d5e5afbdca6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, if (!eproto) return -EINVAL; - if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) - return -EOPNOTSUPP; - err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index eac245a93f91..4ab0d030b544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -122,7 +122,9 @@ out: return err; } -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) + * minimum speed value is 40Gbps + */ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) { u32 speed; @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) int err; err = mlx5e_port_linkspeed(priv->mdev, &speed); - if (err) { - mlx5_core_warn(priv->mdev, "cannot get port speed\n"); - return 0; - } + if (err) + speed = SPEED_40000; + speed = max_t(u32, speed, SPEED_40000); xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int mtu) + u32 xoff, unsigned int max_mtu) { int i; @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) return -ENOMEM; port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; - port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; + port_buffer->buffer[i].xon = + port_buffer->buffer[i].xoff - max_mtu; } return 0; @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, /** * update_buffer_lossy() - * mtu: device's MTU + * max_mtu: netdev's max_mtu * pfc_en: <input> current pfc configuration * buffer: <input> current prio to buffer mapping * xoff: <input> xoff value @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * Return 0 if no error. * Set change to true if buffer configuration is modified. */ -static int update_buffer_lossy(unsigned int mtu, +static int update_buffer_lossy(unsigned int max_mtu, u8 pfc_en, u8 *buffer, u32 xoff, struct mlx5e_port_buffer *port_buffer, bool *change) @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu); if (err) return err; @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, return 0; } +#define MINIMUM_MAX_MTU 9216 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, bool update_prio2buffer = false; u8 buffer[MLX5E_MAX_PRIORITY]; bool update_buffer = false; + unsigned int max_mtu; u32 total_used = 0; u8 curr_pfc_en; int err; int i; mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); + max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); err = mlx5e_port_query_buffer(priv, &port_buffer); if (err) @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, - &port_buffer, &update_buffer); + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + xoff, &port_buffer, &update_buffer); if (err) return err; } @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d38e62cdf24..476dd97f7f2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) { - int err; + int err = 0; rtnl_lock(); mutex_lock(&priv->state_lock); - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + + err = mlx5e_safe_reopen_channels(priv); + +out: mutex_unlock(&priv->state_lock); rtnl_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fa2a3c444cdc..eec07b34b4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + if (!(mlx5e_eswitch_rep(*out_dev) && + mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) + return -EOPNOTSUPP; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 03b2a9f9c589..cad34d6f5f45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -33,6 +33,26 @@ #include <linux/bpf_trace.h> #include "en/xdp.h" +int mlx5e_xdp_max_mtu(struct mlx5e_params *params) +{ + int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + + /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). + * The condition checked in mlx5e_rx_is_linear_skb is: + * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) + * (Note that hw_mtu == sw_mtu + hard_mtu.) + * What is returned from this function is: + * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) + * After assigning sw_mtu := max_mtu, the left side of (1) turns to + * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, + * because both PAGE_SIZE and S are already aligned. Any number greater + * than max_mtu would make the left side of (1) greater than PAGE_SIZE, + * so max_mtu is the maximum MTU allowed. + */ + + return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); +} + static inline bool mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, struct xdp_buff *xdp) @@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) mlx5e_xdpi_fifo_pop(xdpi_fifo); if (is_redirect) { - xdp_return_frame(xdpi.xdpf); dma_unmap_single(sq->pdev, xdpi.dma_addr, xdpi.xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi.xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi.di, true); @@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) mlx5e_xdpi_fifo_pop(xdpi_fifo); if (is_redirect) { - xdp_return_frame(xdpi.xdpf); dma_unmap_single(sq->pdev, xdpi.dma_addr, xdpi.xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi.xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi.di, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index ee27a7c8cd87..553956cadc8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -34,13 +34,12 @@ #include "en.h" -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_EMPTY_DS_COUNT \ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) +int mlx5e_xdp_max_mtu(struct mlx5e_params *params); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 3078491cc0d0..1539cf3de5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, if (err) return err; + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return 0; } @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) { + mutex_lock(&mdev->mlx5e_res.td.list_lock); mlx5_core_destroy_tir(mdev, tir->tirn); list_del(&tir->list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) } INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); + mutex_init(&mdev->mlx5e_res.td.list_lock); return 0; @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_tir *tir; - int err = -ENOMEM; + int err = 0; u32 tirn = 0; int inlen; void *in; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + if (!in) { + err = -ENOMEM; goto out; + } if (enable_uc_lb) MLX5_SET(modify_tir_in, in, ctx.self_lb_block, @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in, inlen); @@ -168,6 +176,7 @@ out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a0987cc5fe4a..78dc8fe2a83c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, - unsigned long *advertising_modes, - u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap, bool ext) { unsigned long proto_cap = eth_proto_cap; struct ptys2ethtool_config *table; u32 max_size; int proto; - mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; + max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : + ARRAY_SIZE(ptys2legacy_ethtool_table); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(advertising_modes, advertising_modes, table[proto].advertised, @@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); } -static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, - u8 tx_pause, u8 rx_pause, - struct ethtool_link_ksettings *link_ksettings) +static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings, + bool ext) { unsigned long *advertising = link_ksettings->link_modes.advertising; - ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); + ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); if (rx_pause) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); @@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); - ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, @@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, u8 an_disable_admin; u8 an_status; u8 connector_type; + bool admin_ext; bool ext; int err; @@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin); + /* Fields: eth_proto_admin and ext_eth_proto_admin are + * mutually exclusive. Hence try reading legacy advertising + * when extended advertising is zero. + * admin_ext indicates how eth_proto_admin should be + * interpreted + */ + admin_ext = ext; + if (ext && !eth_proto_admin) { + eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false, + eth_proto_admin); + admin_ext = false; + } + eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); @@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); get_supported(mdev, eth_proto_cap, link_ksettings); - get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); + get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, + admin_ext); get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; @@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) - ext_requested = (link_ksettings->link_modes.advertising[0] > - MLX5E_PTYS_EXT); + ext_requested = !!(link_ksettings->link_modes.advertising[0] > + MLX5E_PTYS_EXT || + link_ksettings->link_modes.advertising[1]); ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); - - /*when ptys_extended_ethernet is set legacy link modes are deprecated */ - if (ext_requested != ext_supported) - return -EPROTONOSUPPORT; + ext_requested &= ext_supported; speed = link_ksettings->base.speed; ethtool2ptys_adver_func = ext_requested ? mlx5e_ethtool2ptys_ext_adver_link : mlx5e_ethtool2ptys_adver_link; - err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); + err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); if (err) { netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", __func__, err); @@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, if (!an_changes && link_modes == eproto.admin) goto out; - mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); + mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); mlx5_toggle_port_link(mdev); out: @@ -1570,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, break; case MLX5_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; break; default: netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", @@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) struct mlx5e_channel *c; int i; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || + priv->channels.params.xdp_prog) return 0; for (i = 0; i < channels->num; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..46157e2a1e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) + /* We disable csum_complete when XDP is enabled since + * XDP programs might manipulate packets which will render + * skb->checksum incorrect. + */ + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); return 0; @@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, return 0; } +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channels new_channels = {}; + + new_channels.params = priv->channels.params; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { priv->tstamp.tx_type = HWTSTAMP_TX_OFF; @@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (params->xdp_prog && !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", - new_mtu, MLX5E_XDP_MAX_MTU); + new_mtu, mlx5e_xdp_max_mtu(params)); err = -EINVAL; goto out; } @@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!report_failed) goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + err = mlx5e_safe_reopen_channels(priv); if (err) netdev_err(priv->netdev, - "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", err); unlock: @@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); + new_channels.params.sw_mtu, + mlx5e_xdp_max_mtu(&new_channels.params)); return -EINVAL; } @@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, { enum mlx5e_traffic_types tt; - rss_params->hfunc = ETH_RSS_HASH_XOR; + rss_params->hfunc = ETH_RSS_HASH_TOP; netdev_rss_key_fill(rss_params->toeplitz_hash_key, sizeof(rss_params->toeplitz_hash_key)); mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3dde5c7e0739..c3b3002ff62f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, { *proto = ((struct ethhdr *)skb->data)->h_proto; *proto = __vlan_get_protocol(skb, *proto, network_depth); - return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); + + if (*proto == htons(ETH_P_IP)) + return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); + + if (*proto == htons(ETH_P_IPV6)) + return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); + + return false; } static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) @@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) rq->stats->ecn_mark += !!rc; } -static u32 mlx5e_get_fcs(const struct sk_buff *skb) -{ - const void *fcs_bytes; - u32 _fcs_bytes; - - fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, - ETH_FCS_LEN, &_fcs_bytes); - - return __get_unaligned_cpu32(fcs_bytes); -} - static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) { void *ip_p = skb->data + network_depth; @@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) +#define MAX_PADDING 8 + +static void +tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, + struct mlx5e_rq_stats *stats) +{ + stats->csum_complete_tail_slow++; + skb->csum = csum_block_add(skb->csum, + skb_checksum(skb, offset, len, 0), + offset); +} + +static void +tail_padding_csum(struct sk_buff *skb, int offset, + struct mlx5e_rq_stats *stats) +{ + u8 tail_padding[MAX_PADDING]; + int len = skb->len - offset; + void *tail; + + if (unlikely(len > MAX_PADDING)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + tail = skb_header_pointer(skb, offset, len, tail_padding); + if (unlikely(!tail)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + stats->csum_complete_tail++; + skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); +} + +static void +mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) +{ + struct ipv6hdr *ip6; + struct iphdr *ip4; + int pkt_len; + + switch (proto) { + case htons(ETH_P_IP): + ip4 = (struct iphdr *)(skb->data + network_depth); + pkt_len = network_depth + ntohs(ip4->tot_len); + break; + case htons(ETH_P_IPV6): + ip6 = (struct ipv6hdr *)(skb->data + network_depth); + pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); + break; + default: + return; + } + + if (likely(pkt_len >= skb->len)) + return; + + tail_padding_csum(skb, pkt_len, stats); +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) + /* True when explicitly set via priv flag, or XDP prog is loaded */ + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet @@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); - if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_block_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb), - skb->len - ETH_FCS_LEN); + + mlx5e_skb_padding_csum(skb, network_depth, proto, stats); stats->csum_complete++; return; } csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && - ((cqe->hds_ip_ext & CQE_L4_OK) || - (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { + (cqe->hds_ip_ext & CQE_L4_OK))) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1a78e05cbba8..b75aa8b8bf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, @@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; @@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4640d4f986f8..16c3b785f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -71,6 +71,8 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; + u64 rx_csum_complete_tail; + u64 rx_csum_complete_tail_slow; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_redirect; @@ -181,6 +183,8 @@ struct mlx5e_rq_stats { u64 packets; u64 bytes; u64 csum_complete; + u64 csum_complete_tail; + u64 csum_complete_tail_slow; u64 csum_unnecessary; u64 csum_unnecessary_inner; u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b4967a0ff8c7..d75dc44eb2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, return true; } +struct ip_ttl_word { + __u8 ttl; + __u8 protocol; + __sum16 check; +}; + +struct ipv6_hoplimit_word { + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; +}; + +static bool is_action_keys_supported(const struct flow_action_entry *act) +{ + u32 mask, offset; + u8 htype; + + htype = act->mangle.htype; + offset = act->mangle.offset; + mask = ~act->mangle.mask; + /* For IPv4 & IPv6 header check 4 byte word, + * to determine that modified fields + * are NOT ttl & hop_limit only. + */ + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { + struct ip_ttl_word *ttl_word = + (struct ip_ttl_word *)&mask; + + if (offset != offsetof(struct iphdr, ttl) || + ttl_word->protocol || + ttl_word->check) { + return true; + } + } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + struct ipv6_hoplimit_word *hoplimit_word = + (struct ipv6_hoplimit_word *)&mask; + + if (offset != offsetof(struct ipv6hdr, payload_len) || + hoplimit_word->payload_len || + hoplimit_word->nexthdr) { + return true; + } + } + return false; +} + static bool modify_header_match_supported(struct mlx5_flow_spec *spec, struct flow_action *flow_action, u32 actions, @@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, { const struct flow_action_entry *act; bool modify_ip_header; - u8 htype, ip_proto; void *headers_v; u16 ethertype; + u8 ip_proto; int i; if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) @@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, act->id != FLOW_ACTION_ADD) continue; - htype = act->mangle.htype; - if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || - htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + if (is_action_keys_supported(act)) { modify_ip_header = true; break; } @@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, return 0; } -static inline int cmp_encap_info(struct ip_tunnel_key *a, - struct ip_tunnel_key *b) +struct encap_key { + struct ip_tunnel_key *ip_tun_key; + int tunnel_type; +}; + +static inline int cmp_encap_info(struct encap_key *a, + struct encap_key *b) { - return memcmp(a, b, sizeof(*a)); + return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || + a->tunnel_type != b->tunnel_type; } -static inline int hash_encap_info(struct ip_tunnel_key *key) +static inline int hash_encap_info(struct encap_key *key) { - return jhash(key, sizeof(*key), 0); + return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), + key->tunnel_type); } @@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; struct ip_tunnel_info *tun_info; - struct ip_tunnel_key *key; + struct encap_key key, e_key; struct mlx5e_encap_entry *e; unsigned short family; uintptr_t hash_key; @@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; tun_info = &parse_attr->tun_info[out_index]; family = ip_tunnel_info_af(tun_info); - key = &tun_info->key; + key.ip_tun_key = &tun_info->key; + key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); - hash_key = hash_encap_info(key); + hash_key = hash_encap_info(&key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info.key, key)) { + e_key.ip_tun_key = &e->tun_info.key; + e_key.tunnel_type = e->tunnel_type; + if (!cmp_encap_info(&e_key, &key)) { found = true; break; } @@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, parse_attr, hdrs, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ecd2c747f726..8a67fd197b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); @@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } @@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) { int err; + memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); + err = esw_create_legacy_vepa_table(esw); if (err) return err; @@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, /* Star rule to forward all traffic to uplink vport */ memset(spec, 0, sizeof(*spec)); + memset(&dest, 0, sizeof(dest)); dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = MLX5_VPORT_UPLINK; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f2260391be5b..9b2d78ee22b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) { int err; + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); err = esw_create_offloads_fdb_tables(esw, nvports); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51f..22a2ef111514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, return ret; } -static void mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) +static void *mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) { unsigned long flags; + void *ptr; spin_lock_irqsave(idr_spinlock, flags); - idr_remove(idr, swid); + ptr = idr_remove(idr, swid); spin_unlock_irqrestore(idr_spinlock, flags); + return ptr; } static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, kfree(buf); } -struct mlx5_teardown_stream_context { - struct mlx5_fpga_tls_command_context cmd; - u32 swid; -}; - static void mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, struct mlx5_fpga_tls_command_context *cmd, struct mlx5_fpga_dma_buf *resp) { - struct mlx5_teardown_stream_context *ctx = - container_of(cmd, struct mlx5_teardown_stream_context, cmd); - if (resp) { u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) - mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->tx_idr_spinlock, - ctx->swid); - else - mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, - &fdev->tls->rx_idr_spinlock, - ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); } @@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, rcu_read_lock(); flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - rcu_read_unlock(); + if (unlikely(!flow)) { + rcu_read_unlock(); + WARN_ONCE(1, "Received NULL pointer for handle\n"); + kfree(buf); + return -EINVAL; + } mlx5_fpga_tls_flow_to_cmd(flow, cmd); + rcu_read_unlock(); MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); @@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, buf->complete = mlx_tls_kfree_complete; ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + if (ret < 0) + kfree(buf); return ret; } @@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { - struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_tls_command_context *ctx; struct mlx5_fpga_dma_buf *buf; void *cmd; @@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, if (!ctx) return; - buf = &ctx->cmd.buf; + buf = &ctx->buf; cmd = (ctx + 1); MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); MLX5_SET(tls_cmd, cmd, swid, swid); @@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, buf->sg[0].data = cmd; buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - ctx->swid = swid; - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, mlx5_fpga_tls_teardown_completion); } @@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; - rcu_read_lock(); if (direction_sx) - flow = idr_find(&tls->tx_idr, swid); + flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, + swid); else - flow = idr_find(&tls->rx_idr, swid); - - rcu_read_unlock(); + flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, + swid); if (!flow) { mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", @@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, return; } + synchronize_rcu(); /* before kfree(flow) */ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70cc906a102b..76716419370d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, - .mr_cache[16] = { - .size = 8, - .limit = 4 - }, - .mr_cache[17] = { - .size = 8, - .limit = 4 - }, - .mr_cache[18] = { - .size = 8, - .limit = 4 - }, - .mr_cache[19] = { - .size = 4, - .limit = 2 - }, - .mr_cache[20] = { - .size = 4, - .limit = 2 - }, }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 21b7f05b16a5..361468e0435d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; i2c_addr = MLX5_I2C_ADDR_LOW; - if (offset >= MLX5_EEPROM_PAGE_LENGTH) { - i2c_addr = MLX5_I2C_ADDR_HIGH; - offset -= MLX5_EEPROM_PAGE_LENGTH; - } MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 370ca94b6775..b8ba74de9555 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -40,6 +40,9 @@ #include "mlx5_core.h" #include "lib/eq.h" +static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct); + static struct mlx5_core_rsc_common * mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) { @@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev, wait_for_completion(&qp->common.free); } +static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct, bool need_cleanup) +{ + u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; + struct mlx5_core_qp *qp = &dct->mqp; + int err; + + err = mlx5_core_drain_dct(dev, dct); + if (err) { + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + goto destroy; + } else { + mlx5_core_warn( + dev, "failed drain DCT 0x%x with error 0x%x\n", + qp->qpn, err); + return err; + } + } + wait_for_completion(&dct->drained); +destroy: + if (need_cleanup) + destroy_resource_common(dev, &dct->mqp); + MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); + MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); + MLX5_SET(destroy_dct_in, in, uid, qp->uid); + err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + return err; +} + int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, - u32 *in, int inlen) + u32 *in, int inlen, + u32 *out, int outlen) { - u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0}; - u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; - u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; struct mlx5_core_qp *qp = &dct->mqp; int err; init_completion(&dct->drained); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); if (err) { mlx5_core_warn(dev, "create DCT failed, ret %d\n", err); return err; @@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, return 0; err_cmd: - MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); - MLX5_SET(destroy_dct_in, din, dctn, qp->qpn); - MLX5_SET(destroy_dct_in, din, uid, qp->uid); - mlx5_cmd_exec(dev, (void *)&in, sizeof(din), - (void *)&out, sizeof(dout)); + _mlx5_core_destroy_dct(dev, dct, false); return err; } EXPORT_SYMBOL_GPL(mlx5_core_create_dct); @@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct) { - u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0}; - struct mlx5_core_qp *qp = &dct->mqp; - int err; - - err = mlx5_core_drain_dct(dev, dct); - if (err) { - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - goto destroy; - } else { - mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err); - return err; - } - } - wait_for_completion(&dct->drained); -destroy: - destroy_resource_common(dev, &dct->mqp); - MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); - MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); - MLX5_SET(destroy_dct_in, in, uid, qp->uid); - err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), - (void *)&out, sizeof(out)); - return err; + return _mlx5_core_destroy_dct(dev, dct, true); } EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index d23d53c0e284..f26a4ca29363 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; - emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); + emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); if (!emad_wq) return -ENOMEM; mlxsw_core->emad_wq = emad_wq; @@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); if (!mlxsw_wq) return -ENOMEM; - mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, + mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, mlxsw_core_driver_name); if (!mlxsw_owq) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 7a15e932ed2f..c1c1965d7acc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, return 0; default: /* Do not consider thresholds for zero temperature. */ - if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { + if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) { *temp = 0; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index ffee38e36ce8..8648ca171254 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -27,7 +27,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9eb63300c1d3..6b8aa3761899 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, if (err) return err; + mlxsw_sp_port->link.autoneg = autoneg; + if (!netif_running(dev)) return 0; - mlxsw_sp_port->link.autoneg = autoneg; - mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); @@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HIERARCY_TC, i + 8, i, - false, 0); + true, 100); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9a79b5e11597..d633bef5f105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 1}, {MLXSW_REG_SBXX_DIR_EGRESS, 2}, {MLXSW_REG_SBXX_DIR_EGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 15}, }; #define MLXSW_SP_SB_ING_TC_COUNT 8 @@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, @@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), MLXSW_SP_SB_CM(1, 0xff, 4), }; @@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(10000, 90000), }; static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52fed8c7bf1e..902e766a8ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, /* A RIF is not created for macvlan netdevs. Their MAC is used to * populate the FDB */ - if (netif_is_macvlan(dev)) + if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) return 0; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index f6ce386c3036..50111f228d77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_index; int err = 0; - if (switchdev_trans_ph_prepare(trans)) + if (switchdev_trans_ph_commit(trans)) return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index bd6e9014bc74..7849119d407a 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -142,6 +142,12 @@ struct ks8851_net { static int msg_enable; +/* SPI frame opcodes */ +#define KS_SPIOP_RD (0x00) +#define KS_SPIOP_WR (0x40) +#define KS_SPIOP_RXFIFO (0x80) +#define KS_SPIOP_TXFIFO (0xC0) + /* shift for byte-enable data */ #define BYTE_EN(_x) ((_x) << 2) @@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) /* set dma read address */ ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); - /* start the packet dma process, and set auto-dequeue rx */ - ks8851_wrreg16(ks, KS_RXQCR, - ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); + /* start DMA access */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); if (rxlen > 4) { unsigned int rxalign; @@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) } } - ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + /* end DMA access and dequeue packet */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF); } } @@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work) static int ks8851_net_open(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + int ret; + + ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + dev->name, ks); + if (ret < 0) { + netdev_err(dev, "failed to get irq\n"); + return ret; + } /* lock the card, even if we may not actually be doing anything * else at the moment */ @@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev) netif_dbg(ks, ifup, ks->netdev, "network device up\n"); mutex_unlock(&ks->lock); + mii_check_link(&ks->mii); return 0; } @@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev) dev_kfree_skb(txb); } + free_irq(dev->irq, ks); + return 0; } @@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi) spi_set_drvdata(spi, ks); + netif_carrier_off(ks->netdev); ndev->if_port = IF_PORT_100BASET; ndev->netdev_ops = &ks8851_netdev_ops; ndev->irq = spi->irq; @@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi) ks8851_read_selftest(ks); ks8851_init_mac(ks); - ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - ndev->name, ks); - if (ret < 0) { - dev_err(&spi->dev, "failed to get irq\n"); - goto err_irq; - } - ret = register_netdev(ndev); if (ret) { dev_err(&spi->dev, "failed to register network device\n"); @@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi) return 0; - err_netdev: - free_irq(ndev->irq, ks); - -err_irq: +err_id: if (gpio_is_valid(gpio)) gpio_set_value(gpio, 0); -err_id: regulator_disable(ks->vdd_reg); err_reg: regulator_disable(ks->vdd_io); @@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi) dev_info(&spi->dev, "remove\n"); unregister_netdev(priv->netdev); - free_irq(spi->irq, priv); if (gpio_is_valid(priv->gpio)) gpio_set_value(priv->gpio, 0); regulator_disable(priv->vdd_reg); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index 852256ef1f22..23da1e3ee429 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -11,9 +11,15 @@ */ #define KS_CCR 0x08 +#define CCR_LE (1 << 10) /* KSZ8851-16MLL */ #define CCR_EEPROM (1 << 9) -#define CCR_SPI (1 << 8) -#define CCR_32PIN (1 << 0) +#define CCR_SPI (1 << 8) /* KSZ8851SNL */ +#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */ +#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */ +#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */ +#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */ +#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */ +#define CCR_32PIN (1 << 0) /* KSZ8851SNL */ /* MAC address registers */ #define KS_MAR(_m) (0x15 - (_m)) @@ -112,13 +118,13 @@ #define RXCR1_RXE (1 << 0) #define KS_RXCR2 0x76 -#define RXCR2_SRDBL_MASK (0x7 << 5) -#define RXCR2_SRDBL_SHIFT (5) -#define RXCR2_SRDBL_4B (0x0 << 5) -#define RXCR2_SRDBL_8B (0x1 << 5) -#define RXCR2_SRDBL_16B (0x2 << 5) -#define RXCR2_SRDBL_32B (0x3 << 5) -#define RXCR2_SRDBL_FRAME (0x4 << 5) +#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */ +#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */ #define RXCR2_IUFFP (1 << 4) #define RXCR2_RXIUFCEZ (1 << 3) #define RXCR2_UDPLFE (1 << 2) @@ -143,8 +149,10 @@ #define RXFSHR_RXCE (1 << 0) #define KS_RXFHBCR 0x7E +#define RXFHBCR_CNT_MASK (0xfff << 0) + #define KS_TXQCR 0x80 -#define TXQCR_AETFE (1 << 2) +#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */ #define TXQCR_TXQMAM (1 << 1) #define TXQCR_METFE (1 << 0) @@ -167,6 +175,10 @@ #define KS_RXFDPR 0x86 #define RXFDPR_RXFPAI (1 << 14) +#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */ +#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */ +#define RXFDPR_RXFP_MASK (0x7ff << 0) +#define RXFDPR_RXFP_SHIFT (0) #define KS_RXDTTR 0x8C #define KS_RXDBCTR 0x8E @@ -184,7 +196,7 @@ #define IRQ_RXMPDI (1 << 4) #define IRQ_LDI (1 << 3) #define IRQ_EDI (1 << 2) -#define IRQ_SPIBEI (1 << 1) +#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */ #define IRQ_DEDI (1 << 0) #define KS_RXFCTR 0x9C @@ -257,42 +269,37 @@ #define KS_P1ANLPR 0xEE #define KS_P1SCLMD 0xF4 -#define P1SCLMD_LEDOFF (1 << 15) -#define P1SCLMD_TXIDS (1 << 14) -#define P1SCLMD_RESTARTAN (1 << 13) -#define P1SCLMD_DISAUTOMDIX (1 << 10) -#define P1SCLMD_FORCEMDIX (1 << 9) -#define P1SCLMD_AUTONEGEN (1 << 7) -#define P1SCLMD_FORCE100 (1 << 6) -#define P1SCLMD_FORCEFDX (1 << 5) -#define P1SCLMD_ADV_FLOW (1 << 4) -#define P1SCLMD_ADV_100BT_FDX (1 << 3) -#define P1SCLMD_ADV_100BT_HDX (1 << 2) -#define P1SCLMD_ADV_10BT_FDX (1 << 1) -#define P1SCLMD_ADV_10BT_HDX (1 << 0) #define KS_P1CR 0xF6 -#define P1CR_HP_MDIX (1 << 15) -#define P1CR_REV_POL (1 << 13) -#define P1CR_OP_100M (1 << 10) -#define P1CR_OP_FDX (1 << 9) -#define P1CR_OP_MDI (1 << 7) -#define P1CR_AN_DONE (1 << 6) -#define P1CR_LINK_GOOD (1 << 5) -#define P1CR_PNTR_FLOW (1 << 4) -#define P1CR_PNTR_100BT_FDX (1 << 3) -#define P1CR_PNTR_100BT_HDX (1 << 2) -#define P1CR_PNTR_10BT_FDX (1 << 1) -#define P1CR_PNTR_10BT_HDX (1 << 0) +#define P1CR_LEDOFF (1 << 15) +#define P1CR_TXIDS (1 << 14) +#define P1CR_RESTARTAN (1 << 13) +#define P1CR_DISAUTOMDIX (1 << 10) +#define P1CR_FORCEMDIX (1 << 9) +#define P1CR_AUTONEGEN (1 << 7) +#define P1CR_FORCE100 (1 << 6) +#define P1CR_FORCEFDX (1 << 5) +#define P1CR_ADV_FLOW (1 << 4) +#define P1CR_ADV_100BT_FDX (1 << 3) +#define P1CR_ADV_100BT_HDX (1 << 2) +#define P1CR_ADV_10BT_FDX (1 << 1) +#define P1CR_ADV_10BT_HDX (1 << 0) + +#define KS_P1SR 0xF8 +#define P1SR_HP_MDIX (1 << 15) +#define P1SR_REV_POL (1 << 13) +#define P1SR_OP_100M (1 << 10) +#define P1SR_OP_FDX (1 << 9) +#define P1SR_OP_MDI (1 << 7) +#define P1SR_AN_DONE (1 << 6) +#define P1SR_LINK_GOOD (1 << 5) +#define P1SR_PNTR_FLOW (1 << 4) +#define P1SR_PNTR_100BT_FDX (1 << 3) +#define P1SR_PNTR_100BT_HDX (1 << 2) +#define P1SR_PNTR_10BT_FDX (1 << 1) +#define P1SR_PNTR_10BT_HDX (1 << 0) /* TX Frame control */ - #define TXFR_TXIC (1 << 15) #define TXFR_TXFID_MASK (0x3f << 0) #define TXFR_TXFID_SHIFT (0) - -/* SPI frame opcodes */ -#define KS_SPIOP_RD (0x00) -#define KS_SPIOP_WR (0x40) -#define KS_SPIOP_RXFIFO (0x80) -#define KS_SPIOP_TXFIFO (0xC0) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 35f8c9ef204d..c946841c0a06 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -40,6 +40,8 @@ #include <linux/of_device.h> #include <linux/of_net.h> +#include "ks8851.h" + #define DRV_NAME "ks8851_mll" static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; @@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; #define TX_BUF_SIZE 2000 #define RX_BUF_SIZE 2000 -#define KS_CCR 0x08 -#define CCR_EEPROM (1 << 9) -#define CCR_SPI (1 << 8) -#define CCR_8BIT (1 << 7) -#define CCR_16BIT (1 << 6) -#define CCR_32BIT (1 << 5) -#define CCR_SHARED (1 << 4) -#define CCR_32PIN (1 << 0) - -/* MAC address registers */ -#define KS_MARL 0x10 -#define KS_MARM 0x12 -#define KS_MARH 0x14 - -#define KS_OBCR 0x20 -#define OBCR_ODS_16MA (1 << 6) - -#define KS_EEPCR 0x22 -#define EEPCR_EESA (1 << 4) -#define EEPCR_EESB (1 << 3) -#define EEPCR_EEDO (1 << 2) -#define EEPCR_EESCK (1 << 1) -#define EEPCR_EECS (1 << 0) - -#define KS_MBIR 0x24 -#define MBIR_TXMBF (1 << 12) -#define MBIR_TXMBFA (1 << 11) -#define MBIR_RXMBF (1 << 4) -#define MBIR_RXMBFA (1 << 3) - -#define KS_GRR 0x26 -#define GRR_QMU (1 << 1) -#define GRR_GSR (1 << 0) - -#define KS_WFCR 0x2A -#define WFCR_MPRXE (1 << 7) -#define WFCR_WF3E (1 << 3) -#define WFCR_WF2E (1 << 2) -#define WFCR_WF1E (1 << 1) -#define WFCR_WF0E (1 << 0) - -#define KS_WF0CRC0 0x30 -#define KS_WF0CRC1 0x32 -#define KS_WF0BM0 0x34 -#define KS_WF0BM1 0x36 -#define KS_WF0BM2 0x38 -#define KS_WF0BM3 0x3A - -#define KS_WF1CRC0 0x40 -#define KS_WF1CRC1 0x42 -#define KS_WF1BM0 0x44 -#define KS_WF1BM1 0x46 -#define KS_WF1BM2 0x48 -#define KS_WF1BM3 0x4A - -#define KS_WF2CRC0 0x50 -#define KS_WF2CRC1 0x52 -#define KS_WF2BM0 0x54 -#define KS_WF2BM1 0x56 -#define KS_WF2BM2 0x58 -#define KS_WF2BM3 0x5A - -#define KS_WF3CRC0 0x60 -#define KS_WF3CRC1 0x62 -#define KS_WF3BM0 0x64 -#define KS_WF3BM1 0x66 -#define KS_WF3BM2 0x68 -#define KS_WF3BM3 0x6A - -#define KS_TXCR 0x70 -#define TXCR_TCGICMP (1 << 8) -#define TXCR_TCGUDP (1 << 7) -#define TXCR_TCGTCP (1 << 6) -#define TXCR_TCGIP (1 << 5) -#define TXCR_FTXQ (1 << 4) -#define TXCR_TXFCE (1 << 3) -#define TXCR_TXPE (1 << 2) -#define TXCR_TXCRC (1 << 1) -#define TXCR_TXE (1 << 0) - -#define KS_TXSR 0x72 -#define TXSR_TXLC (1 << 13) -#define TXSR_TXMC (1 << 12) -#define TXSR_TXFID_MASK (0x3f << 0) -#define TXSR_TXFID_SHIFT (0) -#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) - - -#define KS_RXCR1 0x74 -#define RXCR1_FRXQ (1 << 15) -#define RXCR1_RXUDPFCC (1 << 14) -#define RXCR1_RXTCPFCC (1 << 13) -#define RXCR1_RXIPFCC (1 << 12) -#define RXCR1_RXPAFMA (1 << 11) -#define RXCR1_RXFCE (1 << 10) -#define RXCR1_RXEFE (1 << 9) -#define RXCR1_RXMAFMA (1 << 8) -#define RXCR1_RXBE (1 << 7) -#define RXCR1_RXME (1 << 6) -#define RXCR1_RXUE (1 << 5) -#define RXCR1_RXAE (1 << 4) -#define RXCR1_RXINVF (1 << 1) -#define RXCR1_RXE (1 << 0) #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ RXCR1_RXMAFMA | RXCR1_RXPAFMA) - -#define KS_RXCR2 0x76 -#define RXCR2_SRDBL_MASK (0x7 << 5) -#define RXCR2_SRDBL_SHIFT (5) -#define RXCR2_SRDBL_4B (0x0 << 5) -#define RXCR2_SRDBL_8B (0x1 << 5) -#define RXCR2_SRDBL_16B (0x2 << 5) -#define RXCR2_SRDBL_32B (0x3 << 5) -/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */ -#define RXCR2_IUFFP (1 << 4) -#define RXCR2_RXIUFCEZ (1 << 3) -#define RXCR2_UDPLFE (1 << 2) -#define RXCR2_RXICMPFCC (1 << 1) -#define RXCR2_RXSAF (1 << 0) - -#define KS_TXMIR 0x78 - -#define KS_RXFHSR 0x7C -#define RXFSHR_RXFV (1 << 15) -#define RXFSHR_RXICMPFCS (1 << 13) -#define RXFSHR_RXIPFCS (1 << 12) -#define RXFSHR_RXTCPFCS (1 << 11) -#define RXFSHR_RXUDPFCS (1 << 10) -#define RXFSHR_RXBF (1 << 7) -#define RXFSHR_RXMF (1 << 6) -#define RXFSHR_RXUF (1 << 5) -#define RXFSHR_RXMR (1 << 4) -#define RXFSHR_RXFT (1 << 3) -#define RXFSHR_RXFTL (1 << 2) -#define RXFSHR_RXRF (1 << 1) -#define RXFSHR_RXCE (1 << 0) -#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\ - RXFSHR_RXFTL | RXFSHR_RXMR |\ - RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\ - RXFSHR_RXTCPFCS) -#define KS_RXFHBCR 0x7E -#define RXFHBCR_CNT_MASK 0x0FFF - -#define KS_TXQCR 0x80 -#define TXQCR_AETFE (1 << 2) -#define TXQCR_TXQMAM (1 << 1) -#define TXQCR_METFE (1 << 0) - -#define KS_RXQCR 0x82 -#define RXQCR_RXDTTS (1 << 12) -#define RXQCR_RXDBCTS (1 << 11) -#define RXQCR_RXFCTS (1 << 10) -#define RXQCR_RXIPHTOE (1 << 9) -#define RXQCR_RXDTTE (1 << 7) -#define RXQCR_RXDBCTE (1 << 6) -#define RXQCR_RXFCTE (1 << 5) -#define RXQCR_ADRFE (1 << 4) -#define RXQCR_SDA (1 << 3) -#define RXQCR_RRXEF (1 << 0) #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) -#define KS_TXFDPR 0x84 -#define TXFDPR_TXFPAI (1 << 14) -#define TXFDPR_TXFP_MASK (0x7ff << 0) -#define TXFDPR_TXFP_SHIFT (0) - -#define KS_RXFDPR 0x86 -#define RXFDPR_RXFPAI (1 << 14) - -#define KS_RXDTTR 0x8C -#define KS_RXDBCTR 0x8E - -#define KS_IER 0x90 -#define KS_ISR 0x92 -#define IRQ_LCI (1 << 15) -#define IRQ_TXI (1 << 14) -#define IRQ_RXI (1 << 13) -#define IRQ_RXOI (1 << 11) -#define IRQ_TXPSI (1 << 9) -#define IRQ_RXPSI (1 << 8) -#define IRQ_TXSAI (1 << 6) -#define IRQ_RXWFDI (1 << 5) -#define IRQ_RXMPDI (1 << 4) -#define IRQ_LDI (1 << 3) -#define IRQ_EDI (1 << 2) -#define IRQ_SPIBEI (1 << 1) -#define IRQ_DEDI (1 << 0) - -#define KS_RXFCTR 0x9C -#define RXFCTR_THRESHOLD_MASK 0x00FF - -#define KS_RXFC 0x9D -#define RXFCTR_RXFC_MASK (0xff << 8) -#define RXFCTR_RXFC_SHIFT (8) -#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) -#define RXFCTR_RXFCT_MASK (0xff << 0) -#define RXFCTR_RXFCT_SHIFT (0) - -#define KS_TXNTFSR 0x9E - -#define KS_MAHTR0 0xA0 -#define KS_MAHTR1 0xA2 -#define KS_MAHTR2 0xA4 -#define KS_MAHTR3 0xA6 - -#define KS_FCLWR 0xB0 -#define KS_FCHWR 0xB2 -#define KS_FCOWR 0xB4 - -#define KS_CIDER 0xC0 -#define CIDER_ID 0x8870 -#define CIDER_REV_MASK (0x7 << 1) -#define CIDER_REV_SHIFT (1) -#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) - -#define KS_CGCR 0xC6 -#define KS_IACR 0xC8 -#define IACR_RDEN (1 << 12) -#define IACR_TSEL_MASK (0x3 << 10) -#define IACR_TSEL_SHIFT (10) -#define IACR_TSEL_MIB (0x3 << 10) -#define IACR_ADDR_MASK (0x1f << 0) -#define IACR_ADDR_SHIFT (0) - -#define KS_IADLR 0xD0 -#define KS_IAHDR 0xD2 - -#define KS_PMECR 0xD4 -#define PMECR_PME_DELAY (1 << 14) -#define PMECR_PME_POL (1 << 12) -#define PMECR_WOL_WAKEUP (1 << 11) -#define PMECR_WOL_MAGICPKT (1 << 10) -#define PMECR_WOL_LINKUP (1 << 9) -#define PMECR_WOL_ENERGY (1 << 8) -#define PMECR_AUTO_WAKE_EN (1 << 7) -#define PMECR_WAKEUP_NORMAL (1 << 6) -#define PMECR_WKEVT_MASK (0xf << 2) -#define PMECR_WKEVT_SHIFT (2) -#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) -#define PMECR_WKEVT_ENERGY (0x1 << 2) -#define PMECR_WKEVT_LINK (0x2 << 2) -#define PMECR_WKEVT_MAGICPKT (0x4 << 2) -#define PMECR_WKEVT_FRAME (0x8 << 2) -#define PMECR_PM_MASK (0x3 << 0) -#define PMECR_PM_SHIFT (0) -#define PMECR_PM_NORMAL (0x0 << 0) -#define PMECR_PM_ENERGY (0x1 << 0) -#define PMECR_PM_SOFTDOWN (0x2 << 0) -#define PMECR_PM_POWERSAVE (0x3 << 0) - -/* Standard MII PHY data */ -#define KS_P1MBCR 0xE4 -#define P1MBCR_FORCE_FDX (1 << 8) - -#define KS_P1MBSR 0xE6 -#define P1MBSR_AN_COMPLETE (1 << 5) -#define P1MBSR_AN_CAPABLE (1 << 3) -#define P1MBSR_LINK_UP (1 << 2) - -#define KS_PHY1ILR 0xE8 -#define KS_PHY1IHR 0xEA -#define KS_P1ANAR 0xEC -#define KS_P1ANLPR 0xEE - -#define KS_P1SCLMD 0xF4 -#define P1SCLMD_LEDOFF (1 << 15) -#define P1SCLMD_TXIDS (1 << 14) -#define P1SCLMD_RESTARTAN (1 << 13) -#define P1SCLMD_DISAUTOMDIX (1 << 10) -#define P1SCLMD_FORCEMDIX (1 << 9) -#define P1SCLMD_AUTONEGEN (1 << 7) -#define P1SCLMD_FORCE100 (1 << 6) -#define P1SCLMD_FORCEFDX (1 << 5) -#define P1SCLMD_ADV_FLOW (1 << 4) -#define P1SCLMD_ADV_100BT_FDX (1 << 3) -#define P1SCLMD_ADV_100BT_HDX (1 << 2) -#define P1SCLMD_ADV_10BT_FDX (1 << 1) -#define P1SCLMD_ADV_10BT_HDX (1 << 0) - -#define KS_P1CR 0xF6 -#define P1CR_HP_MDIX (1 << 15) -#define P1CR_REV_POL (1 << 13) -#define P1CR_OP_100M (1 << 10) -#define P1CR_OP_FDX (1 << 9) -#define P1CR_OP_MDI (1 << 7) -#define P1CR_AN_DONE (1 << 6) -#define P1CR_LINK_GOOD (1 << 5) -#define P1CR_PNTR_FLOW (1 << 4) -#define P1CR_PNTR_100BT_FDX (1 << 3) -#define P1CR_PNTR_100BT_HDX (1 << 2) -#define P1CR_PNTR_10BT_FDX (1 << 1) -#define P1CR_PNTR_10BT_HDX (1 << 0) - -/* TX Frame control */ - -#define TXFR_TXIC (1 << 15) -#define TXFR_TXFID_MASK (0x3f << 0) -#define TXFR_TXFID_SHIFT (0) - -#define KS_P1SR 0xF8 -#define P1SR_HP_MDIX (1 << 15) -#define P1SR_REV_POL (1 << 13) -#define P1SR_OP_100M (1 << 10) -#define P1SR_OP_FDX (1 << 9) -#define P1SR_OP_MDI (1 << 7) -#define P1SR_AN_DONE (1 << 6) -#define P1SR_LINK_GOOD (1 << 5) -#define P1SR_PNTR_FLOW (1 << 4) -#define P1SR_PNTR_100BT_FDX (1 << 3) -#define P1SR_PNTR_100BT_HDX (1 << 2) -#define P1SR_PNTR_10BT_FDX (1 << 1) -#define P1SR_PNTR_10BT_HDX (1 << 0) - #define ENUM_BUS_NONE 0 #define ENUM_BUS_8BIT 1 #define ENUM_BUS_16BIT 2 @@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks) ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ - ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); + ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK); /* Setup RxQ Command Control (RXQCR) */ ks->rc_rxqcr = RXQCR_CMD_CNTL; @@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks) */ w = ks_rdreg16(ks, KS_P1MBCR); - w &= ~P1MBCR_FORCE_FDX; + w &= ~BMCR_FULLDPLX; ks_wrreg16(ks, KS_P1MBCR, w); w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; @@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev) ks_setup_int(ks); data = ks_rdreg16(ks, KS_OBCR); - ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); + ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA); /* overwriting the default MAC address */ if (pdev->dev.of_node) { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a1d0d6e42533..d715ef4fc92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, struct netdev_hw_addr *hw_addr) { struct ocelot *ocelot = port->ocelot; - struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); if (!ha) return -ENOMEM; @@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); } -static void ocelot_check_stats(struct work_struct *work) +static void ocelot_update_stats(struct ocelot *ocelot) { - struct delayed_work *del_work = to_delayed_work(work); - struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); int i, j; mutex_lock(&ocelot->stats_lock); @@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work) } } - cancel_delayed_work(&ocelot->stats_work); + mutex_unlock(&ocelot->stats_lock); +} + +static void ocelot_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, + stats_work); + + ocelot_update_stats(ocelot); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_get_ethtool_stats(struct net_device *dev, @@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, int i; /* check and update now */ - ocelot_check_stats(&ocelot->stats_work.work); + ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) @@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot) ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), ANA_CPUQ_8021_CFG, i); - INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); return 0; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 7cde387e5ec6..51cd57ab3d95 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + memblock = NULL; goto exit; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index 9852080cf454..ff3913085665 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, } if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || knode->sel->offoff || knode->fshift) { - NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported"); + NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported"); return false; } if (knode->sel->hoff || knode->sel->hmask) { @@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, k = &knode->sel->keys[0]; if (k->offmask) { - NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported"); + NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported"); return false; } if (k->off) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index eeda4ed98333..e336f6ee94f5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, tmp_push_vlan_tci = FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | - NFP_FL_PUSH_VLAN_CFI; + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4fcaf11ed56e..0ed51e79db00 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -26,7 +26,7 @@ #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) -#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) +#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) @@ -82,7 +82,6 @@ #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) -#define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e03c8ef2c28c..9b8b843d0340 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - if (match.key->vlan_id || match.key->vlan_priority) { - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - match.key->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - match.key->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; - ext->tci = cpu_to_be16(tmp_tci); - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - match.mask->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - match.mask->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; - msk->tci = cpu_to_be16(tmp_tci); - } + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.key->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.key->vlan_id); + ext->tci = cpu_to_be16(tmp_tci); + + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.mask->vlan_id); + msk->tci = cpu_to_be16(tmp_tci); } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d2c803bb4e56..94d228c04496 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) ret = dev_queue_xmit(skb); nfp_repr_inc_tx_stats(netdev, len, ret); - return ret; + return NETDEV_TX_OK; } static int nfp_repr_stop(struct net_device *netdev) @@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; - netdev->priv_flags |= IFF_NO_QUEUE; + netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; netdev->features |= NETIF_F_LLTX; if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 43a57ec296fd..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -431,12 +431,16 @@ struct qed_qm_info { u8 num_pf_rls; }; +#define QED_OVERFLOW_BIT 1 + struct qed_db_recovery_info { struct list_head list; /* Lock to protect the doorbell recovery mechanism list */ spinlock_t lock; + bool dorq_attn; u32 db_recovery_counter; + unsigned long overflow; }; struct storm_stats { @@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* doorbell recovery mechanism */ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); /* Other Linux specific common definitions */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9df8c4b3b54e..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, /* Doorbell address sanity (address within doorbell bar range) */ static bool qed_db_rec_sanity(struct qed_dev *cdev, - void __iomem *db_addr, void *db_data) + void __iomem *db_addr, + enum qed_db_rec_width db_width, + void *db_data) { + u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; + /* Make sure doorbell address is within the doorbell bar */ if (db_addr < cdev->doorbells || - (u8 __iomem *)db_addr > + (u8 __iomem *)db_addr + width > (u8 __iomem *)cdev->doorbells + cdev->db_size) { WARN(true, "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", @@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev, } /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) return -EINVAL; /* Obtain hwfn from doorbell address */ @@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev, return 0; } - /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) - return -EINVAL; - /* Obtain hwfn from doorbell address */ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); @@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) /* Ring the doorbell of a single doorbell recovery entry */ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, - struct qed_db_recovery_entry *db_entry, - enum qed_db_rec_exec db_exec) -{ - if (db_exec != DB_REC_ONCE) { - /* Print according to width */ - if (db_entry->db_width == DB_REC_WIDTH_32B) { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %x\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u32 *)db_entry->db_data); - } else { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %llx\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + struct qed_db_recovery_entry *db_entry) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %x\n", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %llx\n", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); } /* Sanity */ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, - db_entry->db_data)) + db_entry->db_width, db_entry->db_data)) return; /* Flush the write combined buffer. Since there are multiple doorbelling @@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, wmb(); /* Ring the doorbell */ - if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { - if (db_entry->db_width == DB_REC_WIDTH_32B) - DIRECT_REG_WR(db_entry->db_addr, - *(u32 *)(db_entry->db_data)); - else - DIRECT_REG_WR64(db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); /* Flush the write combined buffer. Next doorbell may come from a * different entity to the same address... @@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, } /* Traverse the doorbell recovery entry list and ring all the doorbells */ -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec) +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; - if (db_exec != DB_REC_ONCE) { - DP_NOTICE(p_hwfn, - "Executing doorbell recovery. Counter was %d\n", - p_hwfn->db_recovery_info.db_recovery_counter); + DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); - /* Track amount of times recovery was executed */ - p_hwfn->db_recovery_info.db_recovery_counter++; - } + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, - &p_hwfn->db_recovery_info.list, list_entry) { - qed_db_recovery_ring(p_hwfn, db_entry, db_exec); - if (db_exec == DB_REC_ONCE) - break; - } - + &p_hwfn->db_recovery_info.list, list_entry) + qed_db_recovery_ring(p_hwfn, db_entry); spin_unlock_bh(&p_hwfn->db_recovery_info.lock); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..8848d5bed6e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, u32 count = QED_DB_REC_COUNT; u32 usage = 1; + /* Flush any pending (e)dpms as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + /* wait for usage to zero or count to run out. This is necessary since * EDPM doorbell transactions can take multiple 64b cycles, and as such * can "split" over the pci. Possibly, the doorbell drop can happen with @@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 overflow; + u32 attn_ovfl, cur_ovfl; int rc; - overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); - DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); - if (!overflow) { - qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); + attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, + &p_hwfn->db_recovery_info.overflow); + cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!cur_ovfl && !attn_ovfl) return 0; - } - if (qed_edpm_enabled(p_hwfn)) { + DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", + attn_ovfl, cur_ovfl); + + if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) return rc; } - /* Flush any pending (e)dpm as they may never arrive */ - qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); - /* Release overflow sticky indication (stop silently dropping everything) */ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); /* Repeat all last doorbells (doorbell drop recovery) */ - qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + qed_db_recovery_execute(p_hwfn); return 0; } -static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) { - u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + u32 overflow; int rc; - int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); - DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!overflow) + goto out; + + /* Run PF doorbell recovery in next periodic handler */ + set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); + + if (!p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + goto out; + } + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); +out: + /* Schedule the handler even if overflow was not detected */ + qed_periodic_db_rec_start(p_hwfn); +} + +static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + /* check if db_drop or overflow happened */ if (int_sts & (DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { @@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, first_drop_reason, all_drops_reason); - rc = qed_db_rec_handler(p_hwfn, p_ptt); - qed_periodic_db_rec_start(p_hwfn); - if (rc) - return rc; - /* Clear the doorbell drop details and prepare for next drop */ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); @@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) return -EINVAL; } +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + p_hwfn->db_recovery_info.dorq_attn = true; + qed_dorq_attn_overflow(p_hwfn); + + return qed_dorq_attn_int_sts(p_hwfn); +} + +static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->db_recovery_info.dorq_attn) + goto out; + + /* Call DORQ callback if the attention was missed */ + qed_dorq_attn_cb(p_hwfn); +out: + p_hwfn->db_recovery_info.dorq_attn = false; +} + /* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters. */ @@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, } } + /* Handle missed DORQ attention */ + qed_dorq_attn_handler(p_hwfn); + /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 1f356ed4f761..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** * @brief - Doorbell Recovery handler. - * Run DB_REAL_DEAL doorbell recovery in case of PF overflow - * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * Run doorbell recovery in case of PF overflow (and flush DORQ if + * needed). * * @param p_hwfn * @param p_ptt diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f164d4acebcb..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } -#define QED_PERIODIC_DB_REC_COUNT 100 +#define QED_PERIODIC_DB_REC_COUNT 10 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9faaa6df78ed..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; } else { DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", vf->abs_vf_id, req->vfdev_info.eth_fp_hsi_major, req->vfdev_info.eth_fp_hsi_minor, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 5f3f42a25361..bddb2b5982dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - rc = -EINVAL; DP_ERR(edev, "PTP clock registration failed\n"); + qede_ptp_disable(edev); + rc = -EINVAL; goto err2; } return 0; -err2: - qede_ptp_disable(edev); - ptp->clock = NULL; err1: kfree(ptp); +err2: edev->ptp = NULL; return rc; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 0c443ea98479..374a4d4371f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -497,7 +497,7 @@ struct qlcnic_hardware_context { u16 board_type; u16 supported_type; - u16 link_speed; + u32 link_speed; u16 link_duplex; u16 link_autoneg; u16 module_type; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 3b0adda7cc9c..a4cd6f2cfb86 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); + if (!skb) + break; qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); skb_put(skb, QLCNIC_ILB_PKT_SIZE); adapter->ahw->diag_cnt = 0; diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index cfb67b746595..58e0ca9093d3 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev) write_reg_high(ioaddr, IMR, ISRh_RxErr); lp->tx_unit_busy = 0; - lp->pac_cnt_in_tx_buf = 0; + lp->pac_cnt_in_tx_buf = 0; lp->saved_tx_size = 0; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c29dde064078..ed651dde6ef9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/firmware.h> #include <linux/prefetch.h> +#include <linux/pci-aspm.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -678,6 +679,7 @@ struct rtl8169_private { struct work_struct work; } wk; + unsigned irq_enabled:1; unsigned supports_gmii:1; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; @@ -1293,6 +1295,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) static void rtl_irq_disable(struct rtl8169_private *tp) { RTL_W16(tp, IntrMask, 0); + tp->irq_enabled = 0; } #define RTL_EVENT_NAPI_RX (RxOK | RxErr) @@ -1301,6 +1304,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { + tp->irq_enabled = 1; RTL_W16(tp, IntrMask, tp->irq_mask); } @@ -5457,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_W16(tp, IntrMitigate, 0x5151); + RTL_W16(tp, IntrMitigate, 0x5100); /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { @@ -6520,9 +6524,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; u16 status = RTL_R16(tp, IntrStatus); - u16 irq_mask = RTL_R16(tp, IntrMask); - if (status == 0xffff || !(status & irq_mask)) + if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask)) return IRQ_NONE; if (unlikely(status & SYSErr)) { @@ -6540,7 +6543,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); } - if (status & RTL_EVENT_NAPI) { + if (status & (RTL_EVENT_NAPI | LinkChg)) { rtl_irq_disable(tp); napi_schedule_irqoff(&tp->napi); } @@ -7350,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 6073387511f8..67f9bb6e941b 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev) status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); /* Link ON & Not select default PHY & not ghost PHY */ - if ((status & MII_STAT_LINK) && !default_phy && - (phy->phy_types != UNKNOWN)) - default_phy = phy; - else { + if ((status & MII_STAT_LINK) && !default_phy && + (phy->phy_types != UNKNOWN)) { + default_phy = phy; + } else { status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); mdio_write(net_dev, phy->phy_addr, MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); @@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev) phy_home = phy; else if(phy->phy_types == LAN) phy_lan = phy; - } + } } if (!default_phy && phy_home) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index a18149720aa2..cba5881b2746 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv) } static void *netsec_alloc_rx_data(struct netsec_priv *priv, - dma_addr_t *dma_handle, u16 *desc_len) + dma_addr_t *dma_handle, u16 *desc_len, + bool napi) { size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); size_t payload_len = NETSEC_RX_BUF_SZ; @@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv, total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); - buf = napi_alloc_frag(total_len); + buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len); if (!buf) return NULL; @@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) /* allocate a fresh buffer and map it to the hardware. * This will eventually replace the old buffer in the hardware */ - buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len, + true); if (unlikely(!buf_addr)) break; @@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv) void *buf; u16 len; - buf = netsec_alloc_rx_data(priv, &dma_handle, &len); + buf = netsec_alloc_rx_data(priv, &dma_handle, &len, + false); if (!buf) { netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); goto err_out; diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -29,11 +29,13 @@ /* Specific functions used for Ring mode */ /* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) { - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - << ERDES1_BUFFER2_SIZE_SHIFT) - & ERDES1_BUFFER2_SIZE_MASK); + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); if (end) p->des1 |= cpu_to_le32(ERDES1_END_RING); @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) } /* Normal descriptors */ -static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) { - p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) - << RDES1_BUFFER2_SIZE_SHIFT) - & RDES1_BUFFER2_SIZE_MASK); + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + } if (end) p->des1 |= cpu_to_le32(RDES1_END_RING); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 062a600fa5a7..21428537e231 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, */ dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev, "stm32_pwr_wakeup"); + if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) { err = device_init_wakeup(&pdev->dev, true); if (err) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7fbb6a4dbf51..e061e9f5fad7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -296,7 +296,7 @@ exit: } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwmac4_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, } static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwxgmac2_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(rdes0 & RDES0_OWN)) return dma_own; + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; @@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), - !!(rdes0 & RDES0_FRAME_TYPE), - !!(rdes0 & ERDES0_RX_MAC_ADDR)); + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; @@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); else - ehn_desc_rx_set_on_ring(p, end); + ehn_desc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -33,7 +33,7 @@ struct dma_extended_desc; struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, - int end); + int end, int bfsize); /* DMA TX descriptor ring initialization */ void (*init_tx_desc)(struct dma_desc *p, int mode, int end); /* Invoked by the xmit function to prepare the tx descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..6d690678c20e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, return dma_own; if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - pr_warn("%s: Oversized frame spanned multiple buffers\n", - __func__); stats->rx_length_errors++; return discard_frame; } @@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, - int end) + int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); else - ndesc_rx_set_on_ring(p, end); + ndesc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index d8c5bc412219..4d9bcb4d0378 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, - STMMAC_RING_MODE, 1, false, skb->len); + STMMAC_RING_MODE, 0, false, skb->len); tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 0, len, csum, - STMMAC_RING_MODE, 1, true, skb->len); + STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), + skb->len); } else { des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 1, true, skb->len); + STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), + skb->len); } tx_q->cur_tx = entry; @@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc) static void refill_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + struct stmmac_rx_queue *rx_q = priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + if (priv->dma_buf_sz == BUF_SIZE_16KiB) p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 97c5e1aad88f..48712437d0da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); } /** @@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - stmmac_check_ether_addr(priv); - if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { @@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, csum_insertion, priv->mode, 1, last_segment, skb->len); - - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); + } else { + stmmac_set_tx_owner(priv, first); } + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -3350,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; - unsigned int entry = rx_q->cur_rx; + unsigned int next_entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; - unsigned int next_entry; unsigned int count = 0; bool xmac; @@ -3370,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { - int status; + int entry, status; struct dma_desc *p; struct dma_desc *np; + entry = next_entry; + if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else @@ -3429,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * ignored */ if (frame_len > priv->dma_buf_sz) { - netdev_err(priv->dev, - "len %d larger than size (%d)\n", - frame_len, priv->dma_buf_sz); + if (net_ratelimit()) + netdev_err(priv->dev, + "len %d larger than size (%d)\n", + frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; - break; + continue; } /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 @@ -3468,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dev_warn(priv->device, "packet dropped\n"); priv->dev->stats.rx_dropped++; - break; + continue; } dma_sync_single_for_cpu(priv->device, @@ -3488,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else { skb = rx_q->rx_skbuff[entry]; if (unlikely(!skb)) { - netdev_err(priv->dev, - "%s: Inconsistent Rx chain\n", - priv->dev->name); + if (net_ratelimit()) + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", + priv->dev->name); priv->dev->stats.rx_dropped++; - break; + continue; } prefetch(skb->data - NET_IP_ALIGN); rx_q->rx_skbuff[entry] = NULL; @@ -3527,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } - entry = next_entry; } stmmac_rx_refill(priv, queue); @@ -4297,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + stmmac_check_ether_addr(priv); + /* Configure real RX and TX queues */ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index d819e8eaba12..26db6aa002d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = { }, .driver_data = (void *)&galileo_stmmac_dmi_data, }, + /* + * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which + * has only one pci network device while other asset tags are + * for IOT2040 which has two. + */ { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = { { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), - DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, - "6ES7647-0AA00-1YA2"), }, .driver_data = (void *)&iot2040_stmmac_dmi_data, }, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 5174d318901e..0a920c5936b2 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } ret = netcp_txpipe_open(&gbe_dev->tx_pipe); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } /* Create network interfaces */ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index ec7e7ec24ff9..4041c75997ba 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &dmares); if (ret) { dev_err(&pdev->dev, "unable to get DMA resource\n"); + of_node_put(np); goto free_netdev; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); + of_node_put(np); goto free_netdev; } lp->rx_irq = irq_of_parse_and_map(np, 1); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e859ae2e42d5..49f41b64077b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -987,6 +987,7 @@ struct netvsc_device { wait_queue_head_t wait_drain; bool destroy; + bool tx_disable; /* if true, do not wake up queue again */ /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 813d195bbd57..e0dce373cdd9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; + net_device->tx_disable = false; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, } else { struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(txq) && + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && (hv_get_avail_to_write_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { netif_tx_wake_queue(txq); @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( } else if (ret == -EAGAIN) { netif_tx_stop_queue(txq); ndev_ctx->eth_stats.stop_queue++; - if (atomic_read(&nvchan->queue_sends) < 1) { + if (atomic_read(&nvchan->queue_sends) < 1 && + !net_device->tx_disable) { netif_tx_wake_queue(txq); ndev_ctx->eth_stats.wake_queue++; ret = -ENOSPC; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf4897043e83..b20fb0fb595b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) rcu_read_unlock(); } +static void netvsc_tx_enable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + nvscdev->tx_disable = false; + virt_wmb(); /* ensure queue wake up mechanism is on */ + + netif_tx_wake_all_queues(ndev); +} + static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) rdev = nvdev->extension; if (!rdev->link_state) { netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(nvdev, net); } if (vf_netdev) { @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) } } +static void netvsc_tx_disable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + if (nvscdev) { + nvscdev->tx_disable = true; + virt_wmb(); /* ensure txq will not wake up after stop */ + } + + netif_tx_disable(ndev); +} + static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; - netif_tx_disable(net); + netvsc_tx_disable(nvdev, net); /* No need to close rndis filter if it is removed already */ if (!nvdev) @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, /* If device was up (receiving) then shutdown */ if (netif_running(ndev)) { - netif_tx_disable(ndev); + netvsc_tx_disable(nvdev, ndev); ret = rndis_filter_close(nvdev); if (ret) { @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) if (rdev->link_state) { rdev->link_state = false; netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(net_device, net); } else { notify = true; } @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); } kfree(event); break; @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); event->event = RNDIS_STATUS_MEDIA_CONNECT; spin_lock_irqsave(&ndev_ctx->lock, flags); list_add(&event->list, &ndev_ctx->reconfig_events); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index cd1d8faccca5..cd6b95e673a5 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi) INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), WQ_MEM_RECLAIM); + if (unlikely(!lp->wqueue)) { + ret = -ENOMEM; + goto err_hw_init; + } ret = adf7242_hw_init(lp); if (ret) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index b6743f03dce0..3b88846de31b 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index c589f5ae75bb..8bb53ec8d9cf 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw) dev_dbg(printdev(lp), "no slotted operation\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_SLOTTED, 0x0); + if (ret < 0) + return ret; /* enable irq */ enable_irq(lp->spi->irq); @@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw) /* Unmask SEQ interrupt */ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, DAR_PHY_CTRL2_SEQMSK, 0x0); + if (ret < 0) + return ret; /* Start the RX sequence */ dev_dbg(printdev(lp), "start the RX sequence\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); + if (ret < 0) + return ret; return 0; } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 071869db44cf..520657945b82 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE help MDIO devices and driver infrastructure code. +if MDIO_DEVICE + config MDIO_BUS tristate default m if PHYLIB=m @@ -179,6 +181,7 @@ config MDIO_XGENE APM X-Gene SoC's. endif +endif config PHYLINK tristate diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 9605d4fe540b..cb86a3e90c7d 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev) bcm54xx_phydsp_config(phydev); + /* Encode link speed into LED1 and LED3 pair (green/amber). + * Also flash these two LEDs on activity. This means configuring + * them for MULTICOLOR and encoding link/activity into them. + */ + val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) | + BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1); + bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val); + + val = BCM_LED_MULTICOLOR_IN_PHASE | + BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) | + BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT); + bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val); + return 0; } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index bbd8c22067f3..97d45bd5b38e 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -15,6 +15,8 @@ #include <linux/netdevice.h> #define DP83822_PHY_ID 0x2000a240 +#define DP83825I_PHY_ID 0x2000a150 + #define DP83822_DEVADDR 0x1f #define MII_DP83822_PHYSCR 0x11 @@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev) return 0; } +#define DP83822_PHY_DRIVER(_id, _name) \ + { \ + PHY_ID_MATCH_MODEL(_id), \ + .name = (_name), \ + .features = PHY_BASIC_FEATURES, \ + .soft_reset = dp83822_phy_reset, \ + .config_init = dp83822_config_init, \ + .get_wol = dp83822_get_wol, \ + .set_wol = dp83822_set_wol, \ + .ack_interrupt = dp83822_ack_interrupt, \ + .config_intr = dp83822_config_intr, \ + .suspend = dp83822_suspend, \ + .resume = dp83822_resume, \ + } + static struct phy_driver dp83822_driver[] = { - { - .phy_id = DP83822_PHY_ID, - .phy_id_mask = 0xfffffff0, - .name = "TI DP83822", - .features = PHY_BASIC_FEATURES, - .config_init = dp83822_config_init, - .soft_reset = dp83822_phy_reset, - .get_wol = dp83822_get_wol, - .set_wol = dp83822_set_wol, - .ack_interrupt = dp83822_ack_interrupt, - .config_intr = dp83822_config_intr, - .suspend = dp83822_suspend, - .resume = dp83822_resume, - }, + DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"), + DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), }; module_phy_driver(dp83822_driver); static struct mdio_device_id __maybe_unused dp83822_tbl[] = { { DP83822_PHY_ID, 0xfffffff0 }, + { DP83825I_PHY_ID, 0xfffffff0 }, { }, }; MODULE_DEVICE_TABLE(mdio, dp83822_tbl); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 3ccba37bd6dd..f76c4048b978 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1489,9 +1489,10 @@ static int marvell_get_sset_count(struct phy_device *phydev) static void marvell_get_strings(struct phy_device *phydev, u8 *data) { + int count = marvell_get_sset_count(phydev); int i; - for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { + for (i = 0; i < count; i++) { strlcpy(data + i * ETH_GSTRING_LEN, marvell_hw_stats[i].string, ETH_GSTRING_LEN); } @@ -1519,9 +1520,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) static void marvell_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data) { + int count = marvell_get_sset_count(phydev); int i; - for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) + for (i = 0; i < count; i++) data[i] = marvell_get_stat(phydev, i); } diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index a238388eb1a5..0eec2913c289 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) static int meson_gxl_config_intr(struct phy_device *phydev) { u16 val; + int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { val = INTSRC_ANEG_PR @@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev) val = 0; } + /* Ack any pending IRQ */ + ret = meson_gxl_ack_interrupt(phydev); + if (ret) + return ret; + return phy_write(phydev, INTSRC_MASK, val); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 49fdd1ee798e..77068c545de0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev) { int ret; - ret = phy_write(phydev, MII_BMCR, BMCR_RESET); + ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET); if (ret < 0) return ret; diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 92b64e254b44..7475cef17cf7 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = { }; MODULE_DEVICE_TABLE(spi, ks8995_id); +static const struct of_device_id ks8895_spi_of_match[] = { + { .compatible = "micrel,ks8995" }, + { .compatible = "micrel,ksz8864" }, + { .compatible = "micrel,ksz8795" }, + { }, + }; +MODULE_DEVICE_TABLE(of, ks8895_spi_of_match); + static inline u8 get_chip_id(u8 val) { return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; @@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi) static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", + .of_match_table = of_match_ptr(ks8895_spi_of_match), }, .probe = ks8995_probe, .remove = ks8995_remove, diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index f4e93f5fc204..ea90db3c7705 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -153,7 +153,7 @@ out_fail: void slhc_free(struct slcompress *comp) { - if ( comp == NULLSLCOMPR ) + if ( IS_ERR_OR_NULL(comp) ) return; if ( comp->tstate != NULLSLSTATE ) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6ed96fdfd96d..16963f7a88f7 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1156,6 +1156,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return -EINVAL; } + if (netdev_has_upper_dev(dev, port_dev)) { + NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface"); + netdev_err(dev, "Device %s is already an upper device of the team interface\n", + portname); + return -EBUSY; + } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); @@ -1246,6 +1253,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_option_port_add; } + /* set promiscuity level to new slave */ + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(port_dev, 1); + if (err) + goto err_set_slave_promisc; + } + + /* set allmulti level to new slave */ + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(port_dev, 1); + if (err) { + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + goto err_set_slave_promisc; + } + } + netif_addr_lock_bh(dev); dev_uc_sync_multiple(port_dev, dev); dev_mc_sync_multiple(port_dev, dev); @@ -1262,6 +1286,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return 0; +err_set_slave_promisc: + __team_option_inst_del_port(team, port); + err_option_port_add: team_upper_dev_unlink(team, port); @@ -1307,6 +1334,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); + + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(port_dev, -1); + team_upper_dev_unlink(team, port); netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 1d68921723dc..e9ca1c088d0b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); - if (!(tun->dev->flags & IFF_UP)) - return -EIO; - if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; @@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { + err = -EFAULT; +drop: this_cpu_inc(tun->pcpu_stats->rx_dropped); kfree_skb(skb); if (frags) { @@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, mutex_unlock(&tfile->napi_mutex); } - return -EFAULT; + return err; } } @@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); + rcu_read_lock(); + if (unlikely(!(tun->dev->flags & IFF_UP))) { + err = -EIO; + rcu_read_unlock(); + goto drop; + } + if (frags) { /* Exercise flow dissector code path. */ u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); @@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (unlikely(headlen > skb_headlen(skb))) { this_cpu_inc(tun->pcpu_stats->rx_dropped); napi_free_frags(&tfile->napi); + rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); WARN_ON(1); return -ENOMEM; @@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } else { netif_rx_ni(skb); } + rcu_read_unlock(); stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 820a2fe7d027..aff995be2a31 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = { .tx_fixup = aqc111_tx_fixup, }; +static const struct driver_info qnap_info = { + .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, + {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5512a1038721..3e9b2c319e45 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -851,6 +851,14 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 74bebbdb4b15..679e404a5224 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */ + {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ @@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */ {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ @@ -1200,9 +1208,12 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ + {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7c1430ed0244..9ee4d7402ca2 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -875,6 +875,7 @@ static const struct net_device_ops vrf_netdev_ops = { .ndo_init = vrf_dev_init, .ndo_uninit = vrf_dev_uninit, .ndo_start_xmit = vrf_xmit, + .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = vrf_get_stats64, .ndo_add_slave = vrf_add_slave, .ndo_del_slave = vrf_del_slave, @@ -1273,9 +1274,15 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_RX_HANDLER; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - dev->min_mtu = 0; - dev->max_mtu = 0; + /* VRF devices do not care about MTU, but if the MTU is set + * too low then the ipv4 and ipv6 protocols are disabled + * which breaks networking. + */ + dev->min_mtu = IPV6_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 077f1b9f2761..d76dfed8d9bb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) /* If vxlan->dev is in the same netns, it has already been added * to the list by the previous loop. */ - if (!net_eq(dev_net(vxlan->dev), net)) { - gro_cells_destroy(&vxlan->gro_cells); + if (!net_eq(dev_net(vxlan->dev), net)) unregister_netdevice_queue(vxlan->dev, head); - } } for (h = 0; h < PORT_HASH_SIZE; ++h) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 24b983edb357..eca87f7c5b6c 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_ce_crash_data ce_data; u32 addr, id; - lockdep_assert_held(&ar->data_lock); + lockdep_assert_held(&ar->dump_mutex); ath10k_err(ar, "Copy Engine register dump:\n"); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 835b8de92d55..aff585658fc0 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, goto err_free_wq; mutex_init(&ar->conf_mutex); + mutex_init(&ar->dump_mutex); spin_lock_init(&ar->data_lock); INIT_LIST_HEAD(&ar->peers); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index e08a17b01e03..e35aae5146f1 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1063,6 +1063,9 @@ struct ath10k { /* prevents concurrent FW reconfiguration */ struct mutex conf_mutex; + /* protects coredump data */ + struct mutex dump_mutex; + /* protects shared structure data */ spinlock_t data_lock; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 33838d9c1cb6..45a355fb62b9 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; - lockdep_assert_held(&ar->data_lock); + lockdep_assert_held(&ar->dump_mutex); if (ath10k_coredump_mask == 0) /* coredump disabled */ @@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) if (!buf) return NULL; - spin_lock_bh(&ar->data_lock); + mutex_lock(&ar->dump_mutex); dump_data = (struct ath10k_dump_file_data *)(buf); strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", @@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; } - spin_unlock_bh(&ar->data_lock); + mutex_unlock(&ar->dump_mutex); return dump_data; } diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index a20ea270d519..1acc622d2183 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) num_msdus++; num_bytes += ret; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ieee80211_txq_schedule_end(hw, txq->ac); record->num_msdus = cpu_to_le16(num_msdus); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b73c23d4ce86..9c703d287333 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) if (ret < 0) break; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ath10k_htt_tx_txq_update(hw, txq); if (ret == -EBUSY) break; @@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, if (ret < 0) break; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ath10k_htt_tx_txq_update(hw, txq); out: ieee80211_txq_schedule_end(hw, ac); @@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_MCAST_RATE && - !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) { + !ath10k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; rateidx = vif->bss_conf.mcast_rate[band] - 1; @@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_BASIC_RATES) { - if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) { + if (ath10k_mac_vif_chan(vif, &def)) { mutex_unlock(&ar->conf_mutex); return; } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 271f92c24d44..2c27f407a851 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar, __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; int i, ret; - lockdep_assert_held(&ar->data_lock); + lockdep_assert_held(&ar->dump_mutex); ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], hi_failure_state, @@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar, int ret, i; u8 *buf; - lockdep_assert_held(&ar->data_lock); + lockdep_assert_held(&ar->dump_mutex); if (!crash_data) return; @@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar, } } -static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) +static void ath10k_pci_fw_dump_work(struct work_struct *work) { + struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, + dump_work); struct ath10k_fw_crash_data *crash_data; + struct ath10k *ar = ar_pci->ar; char guid[UUID_STRING_LEN + 1]; - spin_lock_bh(&ar->data_lock); + mutex_lock(&ar->dump_mutex); + spin_lock_bh(&ar->data_lock); ar->stats.fw_crash_counter++; + spin_unlock_bh(&ar->data_lock); crash_data = ath10k_coredump_new(ar); @@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) ath10k_ce_dump_registers(ar, crash_data); ath10k_pci_dump_memory(ar, crash_data); - spin_unlock_bh(&ar->data_lock); + mutex_unlock(&ar->dump_mutex); queue_work(ar->workqueue, &ar->restart_work); } +static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + queue_work(ar->workqueue, &ar_pci->dump_work); +} + void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { @@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar) spin_lock_init(&ar_pci->ps_lock); mutex_init(&ar_pci->ce_diag_mutex); + INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); + timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 3773c79f322f..4455ed6c5275 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -121,6 +121,8 @@ struct ath10k_pci { /* For protecting ce_diag */ struct mutex ce_diag_mutex; + struct work_struct dump_work; + struct ath10k_ce ce; struct timer_list rx_post_retry; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 773d428ff1b0..b17e1ca40995 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) goto out; while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { + bool force; + tid = (struct ath_atx_tid *)queue->drv_priv; ret = ath_tx_sched_aggr(sc, txq, tid); ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); - ieee80211_return_txq(hw, queue); + force = !skb_queue_empty(&tid->retry_q); + ieee80211_return_txq(hw, queue, force); } out: diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index fdc56f821b5a..0a87d87fbb4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -82,6 +82,7 @@ #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" +#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" @@ -105,8 +106,8 @@ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" -#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ @@ -200,7 +201,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { #define IWL_DEVICE_AX210 \ IWL_DEVICE_AX200_COMMON, \ .device_family = IWL_DEVICE_FAMILY_AX210, \ - .base_params = &iwl_22000_base_params, \ + .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v1, \ .min_txq_size = 128 @@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22260_2ax_cfg = { - .name = "Intel(R) Wireless-AX 22260", +const struct iwl_cfg iwl_ax101_cfg_quz_hr = { + .name = "Intel(R) Wi-Fi 6 AX101", + .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg iwl_ax200_cfg_cc = { + .name = "Intel(R) Wi-Fi 6 AX200 160MHz", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = { }; const struct iwl_cfg killer1650x_2ax_cfg = { - .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = { }; const struct iwl_cfg killer1650w_2ax_cfg = { - .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 575a7022d045..3846064d51a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = { .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, .internal_wimax_coex = true, + .csr = &iwl_csr_v1, }; #define IWL_DEVICE_5150 \ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f119c49cd39c..d7380016f1c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, if (!range) { IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); + memset(*data, 0, le32_to_cpu((*data)->len)); return; } @@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, if (range_size < 0) { IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); + memset(*data, 0, le32_to_cpu((*data)->len)); return; } range = range + range_size; @@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, trigger = fwrt->dump.active_trigs[id].trig; - size = sizeof(*dump_file); - size += iwl_fw_ini_get_trigger_len(fwrt, trigger); - + size = iwl_fw_ini_get_trigger_len(fwrt, trigger); if (!size) return NULL; + size += sizeof(*dump_file); + dump_file = vzalloc(size); if (!dump_file) return NULL; @@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); - if (ret) { + if (ret) kfree(iwl_dump_error_desc); - } else { - set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); - - /* trigger nmi to halt the fw */ - iwl_force_nmi(fwrt->trans); - } + else + iwl_trans_sync_nmi(fwrt->trans); return ret; } @@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) { - /* if the wait event timeout elapses instead of wake up then - * the driver did not receive NMI interrupt and can not assume the FW - * is halted - */ - int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq, - !test_bit(STATUS_FW_WAIT_DUMP, - &fwrt->trans->status), - msecs_to_jiffies(2000)); - if (!ret) { - /* failed to receive NMI interrupt, assuming the FW is stuck */ - set_bit(STATUS_FW_ERROR, &fwrt->trans->status); - - clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); - } - - /* Assuming the op mode mutex is held at this point */ iwl_fw_dbg_collect_sync(fwrt); iwl_trans_stop_device(fwrt->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 641c95d03b15..e06407dc088b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -93,7 +93,7 @@ struct iwl_ucode_header { } u; }; -#define IWL_UCODE_INI_TLV_GROUP BIT(24) +#define IWL_UCODE_INI_TLV_GROUP 0x1000000 /* * new TLV uCode file layout @@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, - IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1, - IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2, - IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3, - IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4, - IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5, + + IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1, + IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION, + IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2, + IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3, + IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4, + IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5, + IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 7adf4e4e841a..12310e3d2fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, fwrt->ops_ctx = ops_ctx; INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); - init_waitqueue_head(&fwrt->trans->fw_halt_waitq); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f5f87773667b..93070848280a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; +extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; -extern const struct iwl_cfg iwl22260_2ax_cfg; +extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index aea6d03e545a..e539bc94eff7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -327,6 +327,7 @@ enum { #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) +#define CSR_HW_REV_TYPE_QUZ (0x0000354) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 5798f434f68f..c7070760a10a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -126,7 +126,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); - if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP)) + if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE || + tlv_type > IWL_UCODE_TLV_DEBUG_MAX) continue; hdr = (void *)&tlv->data[0]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index bbebbf3efd57..d8690acee40c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -338,7 +338,6 @@ enum iwl_d3_status { * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation - * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -351,7 +350,6 @@ enum iwl_trans_status { STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, - STATUS_FW_WAIT_DUMP, }; static inline int @@ -618,6 +616,7 @@ struct iwl_trans_ops { struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, u32 dump_mask); void (*debugfs_cleanup)(struct iwl_trans *trans); + void (*sync_nmi)(struct iwl_trans *trans); }; /** @@ -831,7 +830,6 @@ struct iwl_trans { u32 lmac_error_event_table[2]; u32 umac_error_event_table; unsigned int error_event_table_tlv_status; - wait_queue_head_t fw_halt_waitq; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) iwl_op_mode_nic_error(trans->op_mode); +} - if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) - wake_up(&trans->fw_halt_waitq); - +static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) +{ + if (trans->ops->sync_nmi) + trans->ops->sync_nmi(trans); } /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 2453ceabf00d..6925527d8457 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -774,8 +774,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); - - if (!mvmvif->dbgfs_dir) { + if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", dbgfs_dir); return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index e9822a3ec373..94132cfd1f56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { - s64 rtt_avg = res->ftm.rtt_avg * 100; - - do_div(rtt_avg, 6666); + s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); IWL_DEBUG_INFO(mvm, "entry %d\n", index); IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 00a47f6f1d81..ab68b5d53ec9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); + if (ret != -ERFKILL) + iwl_fw_dbg_error_collect(&mvm->fwrt, + FW_DBG_TRIGGER_DRIVER); goto error; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3a92c09d4692..6a3b11dd2edf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); - kfree(mvmvif->ap_wep_key); - mvmvif->ap_wep_key = NULL; - mutex_unlock(&mvm->mutex); } @@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { - /* if wep is used, need to set the key for the station now */ - if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { - mvm_sta->wep_key = - kmemdup(mvmvif->ap_wep_key, - sizeof(*mvmvif->ap_wep_key) + - mvmvif->ap_wep_key->keylen, - GFP_KERNEL); - if (!mvm_sta->wep_key) { - ret = -ENOMEM; - goto out_unlock; - } - - ret = iwl_mvm_set_sta_key(mvm, vif, sta, - mvm_sta->wep_key, - STA_KEY_IDX_INVALID); - } else { - ret = 0; - } + ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) @@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, NL80211_TDLS_DISABLE_LINK); } - /* Remove STA key if this is an AP using WEP */ - if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { - int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, - mvm_sta->wep_key); - - if (!ret) - ret = rm_ret; - kfree(mvm_sta->wep_key); - mvm_sta->wep_key = NULL; - } - if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) @@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (changed & (IEEE80211_RC_BW_CHANGED | + IEEE80211_RC_SUPP_RATES_CHANGED | + IEEE80211_RC_NSS_CHANGED)) + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) @@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - if (vif->type == NL80211_IFTYPE_AP) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(vif); - - mvmvif->ap_wep_key = kmemdup(key, - sizeof(*key) + key->keylen, - GFP_KERNEL); - if (!mvmvif->ap_wep_key) - return -ENOMEM; - } - - if (vif->type != NL80211_IFTYPE_STATION) - return 0; - break; + if (vif->type == NL80211_IFTYPE_STATION) + break; + if (iwl_mvm_has_new_tx_api(mvm)) + return -EOPNOTSUPP; + /* support HW crypto on TX */ + return 0; default: /* currently FW supports only one optional cipher scheme */ if (hw->n_cipher_schemes && @@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); + key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it - * in the device for TX so still return 0 + * in the device for TX so still return 0, + * unless we have new TX API where we cannot + * put key material into the TX_CMD */ - key->hw_key_idx = STA_KEY_IDX_INVALID; - ret = 0; + if (iwl_mvm_has_new_tx_api(mvm)) + ret = -EOPNOTSUPP; + else + ret = 0; } break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index bca6f6b536d9..a50dc53df086 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -498,7 +498,6 @@ struct iwl_mvm_vif { netdev_features_t features; struct iwl_probe_resp_data __rcu *probe_resp_data; - struct ieee80211_key_conf *ap_wep_key; }; static inline struct iwl_mvm_vif * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ba27dce4c2bb..13681b03c10e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); - if (err) + if (err && err != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 1e03acf30762..b516fd1867ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, } /* iwl_mvm_create_skb Adds the rxb to a new skb */ -static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, - u16 len, u8 crypt_len, - struct iwl_rx_cmd_buffer *rxb) +static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; @@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, * present before copying packet data. */ hdrlen += crypt_len; + + if (WARN_ONCE(headlen < hdrlen, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len)) { + /* + * We warn and trace because we want to be able to see + * it in trace-cmd as well. + */ + IWL_DEBUG_RX(mvm, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len); + return -EINVAL; + } + skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } + + return 0; } static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, @@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->boottime_ns = ktime_get_boot_ns(); } - iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); + if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { + kfree_skb(skb); + goto out; + } + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, csi); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 498c315291cf..98d123dd7177 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); list_del_init(&mvmtxq->list); + local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); + local_bh_enable(); } mutex_unlock(&mvm->mutex); @@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); - if (mvmvif->ap_wep_key) { - u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); - - __set_bit(key_offset, mvm->fw_key_table); - - if (key_offset == STA_KEY_IDX_INVALID) - return -ENOSPC; - - ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, - mvmvif->ap_wep_key, true, 0, NULL, 0, - key_offset, 0); - if (ret) - return ret; - } - return 0; } @@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); - if (mvmvif->ap_wep_key) { - int i; - - if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx, - mvm->fw_key_table)) { - IWL_ERR(mvm, "offset %d not used in fw key table.\n", - mvmvif->ap_wep_key->hw_key_idx); - return -ENOENT; - } - - /* track which key was deleted last */ - for (i = 0; i < STA_KEY_MAX_NUM; i++) { - if (mvm->fw_key_deleted[i] < U8_MAX) - mvm->fw_key_deleted[i]++; - } - mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0; - ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id, - mvmvif->ap_wep_key, true); - if (ret) - return ret; - } - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 79700c7310a1..b4d4071b865d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data - * @wep_key: used in AP mode. Is a duplicate of the WEP key. * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake @@ -426,8 +425,6 @@ struct iwl_mvm_sta { struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; - struct ieee80211_key_conf *wep_key; - u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2b94e4cef56c..9f1af8da9dc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, - {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index bf8b61a476c5..59213164f35e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); -void iwl_trans_sync_nmi(struct iwl_trans *trans); +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index fe8269d023de..c4375b868901 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .unref = iwl_trans_pcie_unref, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ - .d3_resume = iwl_trans_pcie_d3_resume + .d3_resume = iwl_trans_pcie_d3_resume, \ + .sync_nmi = iwl_trans_pcie_sync_nmi #ifdef CONFIG_PM_SLEEP #define IWL_TRANS_PM_OPS \ @@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (cfg == &iwl_ax101_cfg_qu_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { trans->cfg = &iwl_ax101_cfg_qu_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == @@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - (trans->cfg != &iwl22260_2ax_cfg || + (trans->cfg != &iwl_ax200_cfg_cc || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; @@ -3637,22 +3642,29 @@ out_no_pci: return ERR_PTR(ret); } -void iwl_trans_sync_nmi(struct iwl_trans *trans) +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; + u32 inta_addr, sw_err_bit; + + if (trans_pcie->msix_enabled) { + inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; + sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; + } else { + inta_addr = CSR_INT; + sw_err_bit = CSR_INT_BIT_SW_ERR; + } iwl_disable_interrupts(trans); iwl_force_nmi(trans); while (time_after(timeout, jiffies)) { - u32 inta_hw = iwl_read32(trans, - CSR_MSIX_HW_INT_CAUSES_AD); + u32 inta_hw = iwl_read32(trans, inta_addr); /* Error detected by uCode */ - if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) { + if (inta_hw & sw_err_bit) { /* Clear causes register */ - iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, - inta_hw & - MSIX_HW_INT_CAUSES_REG_SW_ERR); + iwl_write32(trans, inta_addr, inta_hw & sw_err_bit); break; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 88530d9f4a54..38d110338987 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, cmd_str); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fbd37d23e85..7be73e2c4681 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0838af04d681..524eb5805995 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; struct net *net; - int idx; + int idx, i; int n_limits = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) @@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, goto failed_hw; } + data->if_combination.max_interfaces = 0; + for (i = 0; i < n_limits; i++) + data->if_combination.max_interfaces += + data->if_limits[i].max; + data->if_combination.n_limits = n_limits; - data->if_combination.max_interfaces = 2048; data->if_combination.limits = data->if_limits; - hw->wiphy->iface_combinations = &data->if_combination; - hw->wiphy->n_iface_combinations = 1; + /* + * If we actually were asked to support combinations, + * advertise them - if there's only a single thing like + * only IBSS then don't advertise it as combinations. + */ + if (data->if_combination.max_interfaces > 1) { + hw->wiphy->iface_combinations = &data->if_combination; + hw->wiphy->n_iface_combinations = 1; + } if (param->ciphers) { memcpy(data->ciphers, param->ciphers, diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a85648342d15..d5a70340a945 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev) adapter = card->adapter; - if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { + if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, WARN, "device already resumed\n"); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6eedc0ec7661..76629b98c78d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, static void mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) { + iowrite32(q->desc_dma, &q->regs->desc_base); + iowrite32(q->ndesc, &q->regs->ring_size); q->head = ioread32(&q->regs->dma_idx); q->tail = q->head; iowrite32(q->head, &q->regs->cpu_idx); @@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) else mt76_dma_sync_idx(dev, q); - wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + wake = wake && q->stopped && + qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + if (wake) + q->stopped = false; if (!q->queued) wake_up(&dev->tx_wait); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index a033745adb2f..316167404729 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -679,19 +679,15 @@ out: return ret; } -static void -mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - int idx = wcid->idx; - int i; + int i, idx = wcid->idx; rcu_assign_pointer(dev->wcid[idx], NULL); synchronize_rcu(); - mutex_lock(&dev->mutex); - if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); @@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) mt76_txq_remove(dev, sta->txq[i]); mt76_wcid_free(dev->wcid_mask, idx); +} +EXPORT_SYMBOL_GPL(__mt76_sta_remove); +static void +mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + mutex_lock(&dev->mutex); + __mt76_sta_remove(dev, vif, sta); mutex_unlock(&dev->mutex); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5dfb0601f101..bcbfd3c4a44b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -126,6 +126,7 @@ struct mt76_queue { int ndesc; int queued; int buf_size; + bool stopped; u8 buf_offset; u8 hw_idx; @@ -143,6 +144,7 @@ struct mt76_mcu_ops { const struct mt76_reg_pair *rp, int len); int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *rp, int len); + int (*mcu_restart)(struct mt76_dev *dev); }; struct mt76_queue_ops { @@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); +void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index afcd86f735b4..4dcb465095d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) out: mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); - if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > - __sw_hweight8(dev->beacon_mask)) + if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask)) dev->beacon_check++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index d69e82c66ab2..b3ae0aaea62a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -27,12 +27,16 @@ static void mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) { __le32 *txd = (__le32 *)skb->data; + struct ieee80211_hdr *hdr; + struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; + void *priv; int idx; u32 val; + u8 tid; - if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) + if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) goto free; val = le32_to_cpu(txd[1]); @@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (!wcid) goto free; - msta = container_of(wcid, struct mt7603_sta, wcid); + priv = msta = container_of(wcid, struct mt7603_sta, wcid); val = le32_to_cpu(txd[0]); skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); + val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); + val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); + txd[0] = cpu_to_le32(val); + + sta = container_of(priv, struct ieee80211_sta, drv_priv); + hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + ieee80211_sta_set_buffered(sta, tid, true); + spin_lock_bh(&dev->ps_lock); __skb_queue_tail(&msta->psq, skb); if (skb_queue_len(&msta->psq) >= 64) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 15cc8f33b34d..3af45949e868 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -112,7 +112,7 @@ static void mt7603_phy_init(struct mt7603_dev *dev) { int rx_chains = dev->mt76.antenna_mask; - int tx_chains = __sw_hweight8(rx_chains) - 1; + int tx_chains = hweight8(rx_chains) - 1; mt76_rmw(dev, MT_WF_RMAC_RMCR, (MT_WF_RMAC_RMCR_SMPS_MODE | @@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev) bus_ops->rmw = mt7603_rmw; dev->mt76.bus = bus_ops; + spin_lock_init(&dev->ps_lock); + INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, (unsigned long)dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 0a0115861b51..5abc02b57818 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) MT_BA_CONTROL_1_RESET)); } -void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ba_size) { u32 addr = mt7603_wtbl2_addr(wcid); @@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, mt76_clear(dev, addr + (15 * 4), tid_mask); return; } - mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); - - mt7603_mac_stop(dev); - switch (tid) { - case 0: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); - break; - case 1: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); - break; - case 2: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, - ssn); - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, - ssn >> 8); - break; - case 3: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); - break; - case 4: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); - break; - case 5: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, - ssn); - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, - ssn >> 4); - break; - case 6: - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); - break; - case 7: - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); - break; - } - mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); - mt7603_mac_start(dev); for (i = 7; i > 0; i--) { if (ba_size >= MT_AGG_SIZE_LIMIT(i)) @@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct ieee80211_vif *vif = info->control.vif; struct mt7603_vif *mvif; int wlan_idx; @@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, int tx_count = 8; u8 frame_type, frame_subtype; u16 fc = le16_to_cpu(hdr->frame_control); + u16 seqno = 0; u8 vif_idx = 0; u32 val; u8 bw; @@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, tx_count = 0x1f; val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | - FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); + MT_TXD3_SN_VALID; + + if (ieee80211_is_data_qos(hdr->frame_control)) + seqno = le16_to_cpu(hdr->seq_ctrl); + else if (ieee80211_is_back_req(hdr->frame_control)) + seqno = le16_to_cpu(bar->start_seq_num); + else + val &= ~MT_TXD3_SN_VALID; + + val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); + txwi[3] = cpu_to_le32(val); if (key) { @@ -1072,7 +1047,7 @@ out: case MT_PHY_TYPE_HT: final_rate_flags |= IEEE80211_TX_RC_MCS; final_rate &= GENMASK(5, 0); - if (i > 15) + if (final_rate > 15) return false; break; default: diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index b10775ed92e6..a3c4ef198bfe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/module.h> #include "mt7603.h" +#include "mac.h" #include "eeprom.h" static int @@ -371,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; struct sk_buff_head list; - mt76_stop_tx_queues(&dev->mt76, sta, false); + mt76_stop_tx_queues(&dev->mt76, sta, true); mt7603_wtbl_set_ps(dev, msta, ps); if (ps) return; @@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) } static void +mt7603_ps_set_more_data(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); +} + +static void mt7603_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int nframes, @@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, __skb_queue_head_init(&list); + mt7603_wtbl_set_ps(dev, msta, false); + spin_lock_bh(&dev->ps_lock); skb_queue_walk_safe(&msta->psq, skb, tmp) { if (!nframes) @@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw, skb_set_queue_mapping(skb, MT_TXQ_PSD); __skb_unlink(skb, &msta->psq); + mt7603_ps_set_more_data(skb); __skb_queue_tail(&list, skb); nframes--; } spin_unlock_bh(&dev->ps_lock); + if (!skb_queue_empty(&list)) + ieee80211_sta_eosp(sta); + mt7603_ps_tx_list(dev, &list); if (nframes) @@ -568,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); break; case IEEE80211_AMPDU_TX_START: mtxq->agg_ssn = *ssn << 4; @@ -582,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 4b0713f1fd5e..d06905ea8cc6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) { struct cfg80211_chan_def *chandef = &dev->mt76.chandef; struct ieee80211_hw *hw = mt76_hw(dev); - int n_chains = __sw_hweight8(dev->mt76.antenna_mask); + int n_chains = hweight8(dev->mt76.antenna_mask); struct { u8 control_chan; u8 center_chan; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 79f332429432..6049f3b7c8fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); -void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ba_size); void mt7603_pse_client_reset(struct mt7603_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index e13fea80d970..b920be1f5718 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev) } mem_base = devm_ioremap_resource(&pdev->dev, res); - if (!mem_base) { + if (IS_ERR(mem_base)) { dev_err(&pdev->dev, "Failed to get memory resource\n"); - return -EINVAL; + return PTR_ERR(mem_base); } mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 0290ba5869a5..736f81752b5b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = { { MT_MM20_PROT_CFG, 0x01742004 }, { MT_MM40_PROT_CFG, 0x03f42084 }, { MT_TXOP_CTRL_CFG, 0x0000583f }, - { MT_TX_RTS_CFG, 0x00092b20 }, + { MT_TX_RTS_CFG, 0x00ffff20 }, { MT_EXP_ACK_TIME, 0x002400ca }, { MT_TXOP_HLDR_ET, 0x00000002 }, { MT_XIFS_TIME_CFG, 0x33a41010 }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 91718647da02..e5a06f74a6f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt76x02_dev *dev; struct mt76_dev *mdev; - u32 asic_rev, mac_rev; + u32 mac_rev; int ret; mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, @@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, goto err; } - asic_rev = mt76_rr(dev, MT_ASIC_VERSION); + mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); mac_rev = mt76_rr(dev, MT_MAC_CSR0); dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", - asic_rev, mac_rev); + mdev->rev, mac_rev); + if (!is_mt76x0(dev)) { + ret = -ENODEV; + goto err; + } /* Note: vendor driver skips this check for MT76X0U */ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 6915cce5def9..07061eb4d1e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -51,6 +51,7 @@ struct mt76x02_calibration { u16 false_cca; s8 avg_rssi_all; s8 agc_gain_adjust; + s8 agc_lowest_gain; s8 low_gain; s8 temp_vco; @@ -114,8 +115,11 @@ struct mt76x02_dev { struct mt76x02_dfs_pattern_detector dfs_pd; /* edcca monitor */ + unsigned long ed_trigger_timeout; bool ed_tx_blocked; bool ed_monitor; + u8 ed_monitor_enabled; + u8 ed_monitor_learning; u8 ed_trigger; u8 ed_silent; ktime_t ed_time; @@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_init_debugfs(struct mt76x02_dev *dev); +static inline bool is_mt76x0(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7610 || + mt76_chip(&dev->mt76) == 0x7630 || + mt76_chip(&dev->mt76) == 0x7650; +} + static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index 7580c5c986ff..b1d6fd4861e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data) return 0; } +static int +mt76_edcca_set(void *data, u64 val) +{ + struct mt76x02_dev *dev = data; + enum nl80211_dfs_regions region = dev->dfs_pd.region; + + dev->ed_monitor_enabled = !!val; + dev->ed_monitor = dev->ed_monitor_enabled && + region == NL80211_DFS_ETSI; + mt76x02_edcca_init(dev, true); + + return 0; +} + +static int +mt76_edcca_get(void *data, u64 *val) +{ + struct mt76x02_dev *dev = data; + + *val = dev->ed_monitor_enabled; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set, + "%lld\n"); + void mt76x02_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; @@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); + debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca); debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index e4649103efd4..17d12d212d1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev, if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); - dev->ed_monitor = region == NL80211_DFS_ETSI; + dev->ed_monitor = dev->ed_monitor_enabled && + region == NL80211_DFS_ETSI; mt76x02_edcca_init(dev, true); dfs_pd->region = region; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 91ff6598eccf..4fe5a83ca5a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, } EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); +void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76x02_cipher_type cipher; + u8 key_data[32]; + u32 iv, eiv; + u64 pn; + + cipher = mt76x02_mac_get_key_info(key, key_data); + iv = mt76_rr(dev, MT_WCID_IV(idx)); + eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4); + + pn = (u64)eiv << 16; + if (cipher == MT_CIPHER_TKIP) { + pn |= (iv >> 16) & 0xff; + pn |= (iv & 0xff) << 8; + } else if (cipher >= MT_CIPHER_AES_CCMP) { + pn |= iv & 0xffff; + } else { + return; + } + + atomic64_set(&key->tx_pn, pn); +} + + int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, struct ieee80211_key_conf *key) { enum mt76x02_cipher_type cipher; u8 key_data[32]; u8 iv_data[8]; + u64 pn; cipher = mt76x02_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) @@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, if (key) { mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + + pn = atomic64_read(&key->tx_pn); + iv_data[3] = key->keyidx << 6; - if (cipher >= MT_CIPHER_TKIP) + if (cipher >= MT_CIPHER_TKIP) { iv_data[3] |= 0x20; + put_unaligned_le32(pn >> 16, &iv_data[4]); + } + + if (cipher == MT_CIPHER_TKIP) { + iv_data[0] = (pn >> 8) & 0xff; + iv_data[1] = (iv_data[0] | 0x20) & 0x7f; + iv_data[2] = pn & 0xff; + } else if (cipher >= MT_CIPHER_AES_CCMP) { + put_unaligned_le16((pn & 0xffff), &iv_data[0]); + } } mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); @@ -426,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, return; rcu_read_lock(); - mt76_tx_status_lock(mdev, &list); if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); @@ -439,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, drv_priv); } + mt76_tx_status_lock(mdev, &list); + if (wcid) { if (stat->pktid >= MT_PACKET_ID_FIRST) status.skb = mt76_tx_status_skb_get(mdev, wcid, @@ -458,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, if (*update == 0 && stat_val == stat_cache && stat->wcid == msta->status.wcid && msta->n_frames < 32) { msta->n_frames++; - goto out; + mt76_tx_status_unlock(mdev, &list); + rcu_read_unlock(); + return; } mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, @@ -474,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, if (status.skb) mt76_tx_status_skb_done(mdev, status.skb, &list); - else - ieee80211_tx_status_ext(mt76_hw(dev), &status); - -out: mt76_tx_status_unlock(mdev, &list); + + if (!status.skb) + ieee80211_tx_status_ext(mt76_hw(dev), &status); rcu_read_unlock(); } @@ -920,6 +962,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable) } } mt76x02_edcca_tx_enable(dev, true); + dev->ed_monitor_learning = true; /* clear previous CCA timer value */ mt76_rr(dev, MT_ED_CCA_TIMER); @@ -929,6 +972,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init); #define MT_EDCCA_TH 92 #define MT_EDCCA_BLOCK_TH 2 +#define MT_EDCCA_LEARN_TH 50 +#define MT_EDCCA_LEARN_CCA 180 +#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ) + static void mt76x02_edcca_check(struct mt76x02_dev *dev) { ktime_t cur_time; @@ -951,11 +998,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev) dev->ed_trigger = 0; } - if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && - !dev->ed_tx_blocked) + if (dev->cal.agc_lowest_gain && + dev->cal.false_cca > MT_EDCCA_LEARN_CCA && + dev->ed_trigger > MT_EDCCA_LEARN_TH) { + dev->ed_monitor_learning = false; + dev->ed_trigger_timeout = jiffies + 20 * HZ; + } else if (!dev->ed_monitor_learning && + time_is_after_jiffies(dev->ed_trigger_timeout)) { + dev->ed_monitor_learning = true; + mt76x02_edcca_tx_enable(dev, true); + } + + if (dev->ed_monitor_learning) + return; + + if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked) mt76x02_edcca_tx_enable(dev, false); - else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && - dev->ed_tx_blocked) + else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked) mt76x02_edcca_tx_enable(dev, true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 6b1f25d2f64c..caeeef96c42f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key); int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, struct ieee80211_key_conf *key); +void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key); void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, u8 *mac); void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 1229f19f2b02..daaed1220147 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -19,6 +19,7 @@ #include <linux/irq.h> #include "mt76x02.h" +#include "mt76x02_mcu.h" #include "mt76x02_trace.h" struct beacon_bc_data { @@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev) return i < 4; } +static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, void *data) +{ + struct mt76x02_dev *dev = hw->priv; + struct mt76_wcid *wcid; + + if (!sta) + return; + + wcid = (struct mt76_wcid *) sta->drv_priv; + + if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv) + return; + + mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); +} + +static void mt76x02_reset_state(struct mt76x02_dev *dev) +{ + int i; + + lockdep_assert_held(&dev->mt76.mutex); + + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + + rcu_read_lock(); + ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); + rcu_read_unlock(); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct mt76x02_sta *msta; + struct mt76_wcid *wcid; + void *priv; + + wcid = rcu_dereference_protected(dev->mt76.wcid[i], + lockdep_is_held(&dev->mt76.mutex)); + if (!wcid) + continue; + + priv = msta = container_of(wcid, struct mt76x02_sta, wcid); + sta = container_of(priv, struct ieee80211_sta, drv_priv); + + priv = msta->vif; + vif = container_of(priv, struct ieee80211_vif, drv_priv); + + __mt76_sta_remove(&dev->mt76, vif, sta); + memset(msta, 0, sizeof(*msta)); + } + + dev->vif_mask = 0; + dev->beacon_mask = 0; +} + static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) { u32 mask = dev->mt76.mmio.irqmask; + bool restart = dev->mt76.mcu_ops->mcu_restart; int i; ieee80211_stop_queues(dev->mt76.hw); @@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) mutex_lock(&dev->mt76.mutex); + if (restart) + mt76x02_reset_state(dev); + if (dev->beacon_mask) mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_BEACON_TX | @@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) /* let fw reset DMA */ mt76_set(dev, 0x734, 0x3); + if (restart) + dev->mt76.mcu_ops->mcu_restart(&dev->mt76); + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) mt76_queue_tx_cleanup(dev, i, true); for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) mt76_queue_rx_reset(dev, i); - mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); - mt76_set(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + mt76x02_mac_start(dev); + if (dev->ed_monitor) mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); - if (dev->beacon_mask) + if (dev->beacon_mask && !restart) mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_BEACON_TX | MT_BEACON_TIME_CFG_TBTT_EN); @@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) napi_schedule(&dev->mt76.napi[i]); } - ieee80211_wake_queues(dev->mt76.hw); - - mt76_txq_schedule_all(&dev->mt76); + if (restart) { + mt76x02_mcu_function_select(dev, Q_SELECT, 1); + ieee80211_restart_hw(dev->mt76.hw); + } else { + ieee80211_wake_queues(dev->mt76.hw); + mt76_txq_schedule_all(&dev->mt76); + } } static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index a020c757ba5c..a54b63a96eae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) ret = true; } + dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit; + return ret; } EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 43f07461c8d3..6fb52b596d42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, mt76x02_insert_hdr_pad(skb); - txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); + txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi)); mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); + skb_push(skb, sizeof(struct mt76x02_txwi)); pid = mt76_tx_status_skb_add(mdev, wcid, skb); txwi->pktid = pid; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index a48c261b0c63..cd072ac614f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; int idx = 0; + memset(msta, 0, sizeof(*msta)); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); if (idx < 0) return -ENOSPC; @@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76_txq *mtxq; + memset(mvif, 0, sizeof(*mvif)); + mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; @@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct mt76x02_dev *dev = hw->priv; unsigned int idx = 0; + /* Allow to change address in HW if we create first interface. */ + if (!dev->vif_mask && + (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || + memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) + mt76x02_mac_setaddr(dev, vif->addr); + if (vif->addr[0] & BIT(1)) idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); @@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) if (dev->vif_mask & BIT(idx)) return -EBUSY; - /* Allow to change address in HW if we create first interface. */ - if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr)) - mt76x02_mac_setaddr(dev, vif->addr); - dev->vif_mask |= BIT(idx); mt76x02_vif_init(dev, vif, idx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index f8534362e2c8..a30ef2c5a9db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) { MT_TX_SW_CFG1, 0x00010000 }, { MT_TX_SW_CFG2, 0x00000000 }, { MT_TXOP_CTRL_CFG, 0x0400583f }, - { MT_TX_RTS_CFG, 0x00100020 }, + { MT_TX_RTS_CFG, 0x00ffff20 }, { MT_TX_TIMEOUT_CFG, 0x000a2290 }, { MT_TX_RETRY_CFG, 0x47f01f0f }, { MT_EXP_ACK_TIME, 0x002c00dc }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index 6c619f1c65c9..d7abe3d73bad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, void mt76x2_cleanup(struct mt76x02_dev *dev); +int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard); void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 984d9c4c2e1a..d3927a13e92e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev) } } -static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) +int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { const u8 *macaddr = dev->mt76.macaddr; u32 val; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index 03e24ae7f66c..605dc66ae83b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -165,9 +165,30 @@ error: return -ENOENT; } +static int +mt76pci_mcu_restart(struct mt76_dev *mdev) +{ + struct mt76x02_dev *dev; + int ret; + + dev = container_of(mdev, struct mt76x02_dev, mt76); + + mt76x02_mcu_cleanup(dev); + mt76x2_mac_reset(dev, true); + + ret = mt76pci_load_firmware(dev); + if (ret) + return ret; + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + + return 0; +} + int mt76x2_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x2_mcu_ops = { + .mcu_restart = mt76pci_mcu_restart, .mcu_send_msg = mt76x02_mcu_msg_send, }; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index 1848e8ab2e21..769a9b972044 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; - if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) + val = 0x1836 << 16; + if (!mt76x2_has_ext_lna(dev) && + dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) val = 0x1e42 << 16; - else - val = 0x1836 << 16; + + if (mt76x2_has_ext_lna(dev) && + dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ && + dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40) + val = 0x0f36 << 16; val |= 0xf8; @@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 *gain = dev->cal.agc_gain_init; u8 low_gain_delta, gain_delta; + u32 agc_35, agc_37; bool gain_change; int low_gain; u32 val; @@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) else low_gain_delta = 14; + agc_37 = 0x2121262c; + if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) + agc_35 = 0x11111516; + else if (low_gain == 2) + agc_35 = agc_37 = 0x08080808; + else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + agc_35 = 0x10101014; + else + agc_35 = 0x11111116; + if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); @@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) dev->cal.agc_gain_adjust = 0; } else { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) - mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); - else - mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); - mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); gain_delta = 0; dev->cal.agc_gain_adjust = low_gain_delta; } + mt76_wr(dev, MT_BBP(AGC, 35), agc_35); + mt76_wr(dev, MT_BBP(AGC, 37), agc_37); + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; mt76x2_phy_set_gain_val(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index ddb6b2c48e01..ac0f13d46299 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -21,11 +21,10 @@ #include "mt76x2u.h" static const struct usb_device_id mt76x2u_device_table[] = { - { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ - { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ + { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ @@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf, mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); + if (!is_mt76x2(dev)) { + err = -ENODEV; + goto err; + } err = mt76x2u_register_device(dev); if (err < 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index 5e84b4535cb1..3b82345756ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev) mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); - mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20); mt76_wr(dev, MT_WMM_AIFSN, 0x2273); mt76_wr(dev, MT_WMM_CWMIN, 0x2344); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5a349fe3e576..2585df512335 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); dev->queue_ops->kick(dev, q); - if (q->queued > q->ndesc - 8) + if (q->queued > q->ndesc - 8 && !q->stopped) { ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); + q->stopped = true; + } + spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, if (last_skb) { mt76_queue_ps_skb(dev, sta, last_skb, true); dev->queue_ops->kick(dev, hwq); + } else { + ieee80211_sta_eosp(sta); } + spin_unlock_bh(&hwq->lock); } EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); @@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; struct mt76_queue *hwq = mtxq->hwq; + if (!test_bit(MT76_STATE_RUNNING, &dev->state)) + return; + spin_lock_bh(&hwq->lock); if (list_empty(&mtxq->list)) list_add_tail(&mtxq->list, &hwq->swq); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index ae6ada370597..4c1abd492405 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data) spin_lock_bh(&q->lock); } mt76_txq_schedule(dev, q); - wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + + wake = q->stopped && q->queued < q->ndesc - 8; + if (wake) + q->stopped = false; + if (!q->queued) wake_up(&dev->tx_wait); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index d8b7863f7926..6ae7f14dc9bf 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf, mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", asic_rev, mac_rev); + if ((asic_rev >> 16) != 0x7601) { + ret = -ENODEV; + goto err; + } /* Note: vendor driver skips this check for MT7601U */ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 4b1744e9fb78..50b92ca92bd7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -673,7 +673,6 @@ enum rt2x00_state_flags { CONFIG_CHANNEL_HT40, CONFIG_POWERSAVING, CONFIG_HT_DISABLED, - CONFIG_QOS_DISABLED, CONFIG_MONITORING, /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 2825560e2424..e8462f25d252 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); - - clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); } /* - * Check for access point which do not support 802.11e . We have to - * generate data frames sequence number in S/W for such AP, because - * of H/W bug. - */ - if (changes & BSS_CHANGED_QOS && !bss_conf->qos) - set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); - - /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 92ddc19e7bf7..4834b4eb0206 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { /* * rt2800 has a H/W (or F/W) bug, device incorrectly increase - * seqno on retransmited data (non-QOS) frames. To workaround - * the problem let's generate seqno in software if QOS is - * disabled. + * seqno on retransmitted data (non-QOS) and management frames. + * To workaround the problem let's generate seqno in software. + * Except for beacons which are transmitted periodically by H/W + * hence hardware has to assign seqno for them. */ - if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) - __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); - else + if (ieee80211_is_beacon(hdr->frame_control)) { + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); /* H/W will generate sequence number */ return; + } + + __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } /* diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 2b26f762fbc3..01acb6e53365 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = { }; MODULE_DEVICE_TABLE(spi, st95hf_id); +static const struct of_device_id st95hf_spi_of_match[] = { + { .compatible = "st,st95hf" }, + { }, +}; +MODULE_DEVICE_TABLE(of, st95hf_spi_of_match); + static int st95hf_probe(struct spi_device *nfc_spi_dev) { int ret; @@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = { .driver = { .name = "st95hf", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(st95hf_spi_of_match), }, .id_table = st95hf_id, .probe = st95hf_probe, diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index b72a303176c7..9486acc08402 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); - if (nd_btt->id < 0) { - kfree(nd_btt); - return NULL; - } + if (nd_btt->id < 0) + goto out_nd_btt; nd_btt->lbasize = lbasize; - if (uuid) + if (uuid) { uuid = kmemdup(uuid, 16, GFP_KERNEL); + if (!uuid) + goto out_put_id; + } nd_btt->uuid = uuid; dev = &nd_btt->dev; dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); @@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; } return dev; + +out_put_id: + ida_simple_remove(&nd_region->btt_ida, nd_btt->id); + +out_nd_btt: + kfree(nd_btt); + return NULL; } struct device *nd_btt_create(struct nd_region *nd_region) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 7849bf1812c4..f293556cbbf6 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, if (!nsblk->uuid) goto blk_err; memcpy(name, nd_label->name, NSLABEL_NAME_LEN); - if (name[0]) + if (name[0]) { nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); + if (!nsblk->alt_name) + goto blk_err; + } res = nsblk_add_resource(nd_region, ndd, nsblk, __le64_to_cpu(nd_label->dpa)); if (!res) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index bc2f700feef8..0279eb1da3ef 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); memcpy_flushcache(pmem_addr, mem + off, chunk); kunmap_atomic(mem); len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } } @@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); kunmap_atomic(mem); if (rem) @@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } return BLK_STS_OK; } diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index f8bb746a549f..a570f2263a42 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -22,6 +22,8 @@ static bool key_revalidate = true; module_param(key_revalidate, bool, 0444); MODULE_PARM_DESC(key_revalidate, "Require key validation at init."); +static const char zero_key[NVDIMM_PASSPHRASE_LEN]; + static void *key_data(struct key *key) { struct encrypted_key_payload *epayload = dereference_key_locked(key); @@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm) return key; } +static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm, + struct key **key) +{ + *key = nvdimm_request_key(nvdimm); + if (!*key) + return zero_key; + + return key_data(*key); +} + static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, key_serial_t id, int subclass) { @@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, return key; } -static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm) +static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm, + key_serial_t id, int subclass, struct key **key) +{ + *key = NULL; + if (id == 0) { + if (subclass == NVDIMM_BASE_KEY) + return zero_key; + else + return NULL; + } + + *key = nvdimm_lookup_user_key(nvdimm, id, subclass); + if (!*key) + return NULL; + + return key_data(*key); +} + + +static int nvdimm_key_revalidate(struct nvdimm *nvdimm) { struct key *key; int rc; + const void *data; if (!nvdimm->sec.ops->change_key) - return NULL; + return -EOPNOTSUPP; - key = nvdimm_request_key(nvdimm); - if (!key) - return NULL; + data = nvdimm_get_key_payload(nvdimm, &key); /* * Send the same key to the hardware as new and old key to * verify that the key is good. */ - rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key), - key_data(key), NVDIMM_USER); + rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER); if (rc < 0) { nvdimm_put_key(key); - key = NULL; + return rc; } - return key; + + nvdimm_put_key(key); + nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER); + return 0; } static int __nvdimm_security_unlock(struct nvdimm *nvdimm) { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key = NULL; + struct key *key; + const void *data; int rc; /* The bus lock should be held at the top level of the call stack */ @@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) if (!key_revalidate) return 0; - key = nvdimm_key_revalidate(nvdimm); - if (!key) - return nvdimm_security_freeze(nvdimm); + return nvdimm_key_revalidate(nvdimm); } else - key = nvdimm_request_key(nvdimm); + data = nvdimm_get_key_payload(nvdimm, &key); - if (!key) - return -ENOKEY; - - rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key)); + rc = nvdimm->sec.ops->unlock(nvdimm, data); dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); @@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct key *key; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) return -EBUSY; } - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) return -ENOKEY; - rc = nvdimm->sec.ops->disable(nvdimm, key_data(key)); + rc = nvdimm->sec.ops->disable(nvdimm, data); dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); @@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct key *key, *newkey; int rc; + const void *data, *newdata; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, return -EIO; } - if (keyid == 0) - key = NULL; - else { - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) - return -ENOKEY; - } + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) + return -ENOKEY; - newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY); - if (!newkey) { + newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid, + NVDIMM_NEW_KEY, &newkey); + if (!newdata) { nvdimm_put_key(key); return -ENOKEY; } - rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL, - key_data(newkey), pass_type); + rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type); dev_dbg(dev, "key: %d %d update%s: %s\n", key_serial(key), key_serial(newkey), pass_type == NVDIMM_MASTER ? "(master)" : "(user)", @@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key; + struct key *key = NULL; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, return -EOPNOTSUPP; } - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) return -ENOKEY; - rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type); + rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), pass_type == NVDIMM_MASTER ? "(master)" : "(user)", rc == 0 ? "success" : "fail"); @@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key; + struct key *key = NULL; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) return -EBUSY; } - if (keyid == 0) - key = NULL; - else { - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) - return -ENOKEY; - } + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) + return -ENOKEY; - rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL); + rc = nvdimm->sec.ops->overwrite(nvdimm, data); dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 470601980794..2c43e12b70af 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) "Cancelling I/O %d", req->tag); nvme_req(req)->status = NVME_SC_ABORT_REQ; - blk_mq_complete_request(req); + blk_mq_complete_request_sync(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f3b9d91ba0df..6d8451356eac 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) memset(queue, 0, sizeof(*queue)); queue->ctrl = ctrl; queue->qnum = idx; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); queue->dev = ctrl->dev; if (idx > 0) @@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); } static void @@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, { struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; - u32 csn; int ret, opstate; /* @@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, /* format the FC-NVME CMD IU and fcp_req */ cmdiu->connection_id = cpu_to_be64(queue->connection_id); - csn = atomic_inc_return(&queue->csn); - cmdiu->csn = cpu_to_be32(csn); cmdiu->data_len = cpu_to_be32(data_len); switch (io_dir) { case NVMEFC_FCP_WRITE: @@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (!(op->flags & FCOP_FLAGS_AEN)) blk_mq_start_request(op->rq); + cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, &ctrl->rport->remoteport, queue->lldd_handle, &op->fcp_req); if (ret) { + /* + * If the lld fails to send the command is there an issue with + * the csn value? If the command that fails is the Connect, + * no - as the connection won't be live. If it is a command + * post-connect, it's possible a gap in csn may be created. + * Does this matter? As Linux initiators don't send fused + * commands, no. The gap would exist, but as there's nothing + * that depends on csn order to be delivered on the target + * side, it shouldn't hurt. It would be difficult for a + * target to even detect the csn gap as it has no idea when the + * cmd with the csn was supposed to arrive. + */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2839bb70badf..f0716f6ce41f 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - enum nvme_ana_state old; - mutex_lock(&ns->head->lock); - old = ns->ana_state; ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); - if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) + if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); mutex_unlock(&ns->head->lock); } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index e7e08889865e..68c49dd67210 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return ret; } -static inline void nvme_tcp_end_request(struct request *rq, __le16 status) +static inline void nvme_tcp_end_request(struct request *rq, u16 status) { union nvme_result res = {}; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 76250181fee0..9f72d515fc4b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +u64 nvmet_get_log_page_offset(struct nvme_command *cmd) +{ + return le64_to_cpu(cmd->get_log_page.lpo); +} + static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 2d73b66e3686..b3e765a95af8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) ret = nvmet_p2pmem_ns_enable(ns); if (ret) - goto out_unlock; + goto out_dev_disable; list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_p2pmem_ns_add_p2p(ctrl, ns); @@ -550,7 +550,7 @@ out_unlock: out_dev_put: list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); - +out_dev_disable: nvmet_ns_dev_disable(ns); goto out_unlock; } diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index c872b47a88f3..33ed95e72d6b 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); } +static size_t discovery_log_entries(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_subsys_link *p; + struct nvmet_port *r; + size_t entries = 0; + + list_for_each_entry(p, &req->port->subsystems, entry) { + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) + continue; + entries++; + } + list_for_each_entry(r, &req->port->referrals, entry) + entries++; + return entries; +} + static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_disc_rsp_page_hdr *hdr; + u64 offset = nvmet_get_log_page_offset(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd); - size_t alloc_len = max(data_len, sizeof(*hdr)); - int residual_len = data_len - sizeof(*hdr); + size_t alloc_len; struct nvmet_subsys_link *p; struct nvmet_port *r; u32 numrec = 0; u16 status = 0; + void *buffer; + + /* Spec requires dword aligned offsets */ + if (offset & 0x3) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out; + } /* * Make sure we're passing at least a buffer of response header size. * If host provided data len is less than the header size, only the * number of bytes requested by host will be sent to host. */ - hdr = kzalloc(alloc_len, GFP_KERNEL); - if (!hdr) { + down_read(&nvmet_config_sem); + alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); + buffer = kzalloc(alloc_len, GFP_KERNEL); + if (!buffer) { + up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; goto out; } - down_read(&nvmet_config_sem); + hdr = buffer; list_for_each_entry(p, &req->port->subsystems, entry) { + char traddr[NVMF_TRADDR_SIZE]; + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) continue; - if (residual_len >= entry_size) { - char traddr[NVMF_TRADDR_SIZE]; - - nvmet_set_disc_traddr(req, req->port, traddr); - nvmet_format_discovery_entry(hdr, req->port, - p->subsys->subsysnqn, traddr, - NVME_NQN_NVME, numrec); - residual_len -= entry_size; - } + + nvmet_set_disc_traddr(req, req->port, traddr); + nvmet_format_discovery_entry(hdr, req->port, + p->subsys->subsysnqn, traddr, + NVME_NQN_NVME, numrec); numrec++; } list_for_each_entry(r, &req->port->referrals, entry) { - if (residual_len >= entry_size) { - nvmet_format_discovery_entry(hdr, r, - NVME_DISC_SUBSYS_NAME, - r->disc_addr.traddr, - NVME_NQN_DISC, numrec); - residual_len -= entry_size; - } + nvmet_format_discovery_entry(hdr, r, + NVME_DISC_SUBSYS_NAME, + r->disc_addr.traddr, + NVME_NQN_DISC, numrec); numrec++; } @@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) up_read(&nvmet_config_sem); - status = nvmet_copy_to_sgl(req, 0, hdr, data_len); - kfree(hdr); + status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); + kfree(buffer); out: nvmet_req_complete(req, status); } diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 3e43212d3c1c..bc6ebb51b0bf 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -75,11 +75,11 @@ err: return ret; } -static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) +static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg) { - bv->bv_page = sg_page_iter_page(iter); - bv->bv_offset = iter->sg->offset; - bv->bv_len = PAGE_SIZE - iter->sg->offset; + bv->bv_page = sg_page(sg); + bv->bv_offset = sg->offset; + bv->bv_len = sg->length; } static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, @@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) { - ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); - struct sg_page_iter sg_pg_iter; + ssize_t nr_bvec = req->sg_cnt; unsigned long bv_cnt = 0; bool is_sync = false; size_t len = 0, total_len = 0; ssize_t ret = 0; loff_t pos; - + int i; + struct scatterlist *sg; if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) is_sync = true; @@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) } memset(&req->f.iocb, 0, sizeof(struct kiocb)); - for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { - nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); + for_each_sg(req->sg, sg, req->sg_cnt, i) { + nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); len += req->f.bvec[bv_cnt].bv_len; total_len += req->f.bvec[bv_cnt].bv_len; bv_cnt++; @@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req) static void nvmet_file_execute_rw(struct nvmet_req *req) { - ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); + ssize_t nr_bvec = req->sg_cnt; if (!req->sg_cnt || !nr_bvec) { nvmet_req_complete(req, 0); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 51e49efd7849..1653d19b187f 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); u32 nvmet_get_log_page_len(struct nvme_command *cmd); +u64 nvmet_get_log_page_offset(struct nvme_command *cmd); extern struct list_head *nvmet_ports; void nvmet_port_disc_changed(struct nvmet_port *port, diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index 810ab0fbcccb..d820f3edd431 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -7,7 +7,6 @@ */ #include <linux/etherdevice.h> #include <linux/kernel.h> -#include <linux/nvmem-consumer.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/export.h> diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 1be571c20062..6bad04cbb1d3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -157,8 +157,12 @@ #define DBG_IRT(x...) #endif +#ifdef CONFIG_64BIT +#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) +#else #define COMPARE_IRTE_ADDR(irte, hpa) \ - ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) + ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) +#endif #define IOSAPIC_REG_SELECT 0x00 #define IOSAPIC_REG_WINDOW 0x10 diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 56dd83a45e55..5484a46dafda 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port) struct pardevice *parport_open(int devnum, const char *name) { struct daisydev *p = topology; - struct pardev_cb par_cb; struct parport *port; struct pardevice *dev; int daisy; - memset(&par_cb, 0, sizeof(par_cb)); spin_lock(&topology_lock); while (p && p->devnum != devnum) p = p->next; @@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name) port = parport_get_port(p->port); spin_unlock(&topology_lock); - dev = parport_register_dev_model(port, name, &par_cb, devnum); + dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); parport_put_port(port); if (!dev) return NULL; @@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port) kfree(deviceid); return detected; } - -static int daisy_drv_probe(struct pardevice *par_dev) -{ - struct device_driver *drv = par_dev->dev.driver; - - if (strcmp(drv->name, "daisy_drv")) - return -ENODEV; - if (strcmp(par_dev->name, daisy_dev_name)) - return -ENODEV; - - return 0; -} - -static struct parport_driver daisy_driver = { - .name = "daisy_drv", - .probe = daisy_drv_probe, - .devmodel = true, -}; - -int daisy_drv_init(void) -{ - return parport_register_driver(&daisy_driver); -} - -void daisy_drv_exit(void) -{ - parport_unregister_driver(&daisy_driver); -} diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index e5e6a463a941..e035174ba205 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; - struct pardevice *dev = parport_open(devnum, daisy_dev_name); + struct pardevice *dev = parport_open (devnum, "Device ID probe"); if (!dev) return -ENXIO; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 0171b8dbcdcd..5dc53d420ca8 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = { int parport_bus_init(void) { - int retval; - - retval = bus_register(&parport_bus_type); - if (retval) - return retval; - daisy_drv_init(); - - return 0; + return bus_register(&parport_bus_type); } void parport_bus_exit(void) { - daisy_drv_exit(); bus_unregister(&parport_bus_type); } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 3f3df4c29f6e..905282a8ddaa 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal) * removed from the slot/adapter. */ msleep(1000); + + /* Ignore link or presence changes caused by power off */ + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), + &ctrl->pending_events); } /* turn off Green LED */ diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7c1b362f599a..766f5779db92 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -6262,8 +6262,7 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { - disable_acs_redir_param = - kstrdup(str + 18, GFP_KERNEL); + disable_acs_redir_param = str + 18; } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); @@ -6274,3 +6273,19 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); + +/* + * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point + * to data in the __initdata section which will be freed after the init + * sequence is complete. We can't allocate memory in pci_setup() because some + * architectures do not have any memory allocation service available during + * an early_param() call. So we allocate memory and copy the variable here + * before the init section is freed. + */ +static int __init pci_realloc_setup_params(void) +{ + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); + + return 0; +} +pure_initcall(pci_realloc_setup_params); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 224d88634115..d994839a3e24 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width); void __pcie_print_link_status(struct pci_dev *dev, bool verbose); +void pcie_report_downtraining(struct pci_dev *dev); /* Single Root I/O Virtualization */ struct pci_sriov { diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 5cbdbca904ac..362eb8cfa53b 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -142,3 +142,11 @@ config PCIE_PTM This is only useful if you have devices that support PTM, but it is safe to enable even if you don't. + +config PCIE_BW + bool "PCI Express Bandwidth Change Notification" + depends on PCIEPORTBUS + help + This enables PCI Express Bandwidth Change Notification. If + you know link width or rate changes occur only to correct + unreliable links, you may answer Y. diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index f1d7bc1e5efa..efb9d2e71e9e 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -3,7 +3,6 @@ # Makefile for PCI Express features and port driver pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o -pcieportdrv-y += bw_notification.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o @@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o obj-$(CONFIG_PCIE_PME) += pme.o obj-$(CONFIG_PCIE_DPC) += dpc.o obj-$(CONFIG_PCIE_PTM) += ptm.o +obj-$(CONFIG_PCIE_BW) += bw_notification.o diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c index d2eae3b7cc0f..4fa9e3523ee1 100644 --- a/drivers/pci/pcie/bw_notification.c +++ b/drivers/pci/pcie/bw_notification.c @@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev) { u16 lnk_ctl; + pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); @@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev) pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); } -static irqreturn_t pcie_bw_notification_handler(int irq, void *context) +static irqreturn_t pcie_bw_notification_irq(int irq, void *context) { struct pcie_device *srv = context; struct pci_dev *port = srv->port; - struct pci_dev *dev; u16 link_status, events; int ret; @@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context) if (ret != PCIBIOS_SUCCESSFUL || !events) return IRQ_NONE; + pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); + pcie_update_link_speed(port->subordinate, link_status); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t pcie_bw_notification_handler(int irq, void *context) +{ + struct pcie_device *srv = context; + struct pci_dev *port = srv->port; + struct pci_dev *dev; + /* * Print status from downstream devices, not this root port or * downstream switch port. */ down_read(&pci_bus_sem); list_for_each_entry(dev, &port->subordinate->devices, bus_list) - __pcie_print_link_status(dev, false); + pcie_report_downtraining(dev); up_read(&pci_bus_sem); - pcie_update_link_speed(port->subordinate, link_status); - pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); return IRQ_HANDLED; } @@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv) if (!pcie_link_bandwidth_notification_supported(srv->port)) return -ENODEV; - ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, + ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq, + pcie_bw_notification_handler, IRQF_SHARED, "PCIe BW notif", srv); if (ret) return ret; diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 1d50dc58ac40..944827a8c7d3 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -49,7 +49,11 @@ int pcie_dpc_init(void); static inline int pcie_dpc_init(void) { return 0; } #endif +#ifdef CONFIG_PCIE_BW int pcie_bandwidth_notification_init(void); +#else +static inline int pcie_bandwidth_notification_init(void) { return 0; } +#endif /* Port Type */ #define PCIE_ANY_PORT (~0) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 7d04f9d087a6..1b330129089f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, * 7.8.2, 7.10.10, 7.31.2. */ - if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { + if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | + PCIE_PORT_SERVICE_BWNOTIF)) { pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; nvec = *pme + 1; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2ec0df04e0dc..7e12d0163863 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) return dev; } -static void pcie_report_downtraining(struct pci_dev *dev) +void pcie_report_downtraining(struct pci_dev *dev) { if (!pci_is_pcie(dev)) return; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a59ad09ce911..a077f67fe1da 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 5163097b43df..4bbd9ede38c8 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); int new_mode; - if (phy->index != 0) + if (phy->index != 0) { + if (mode == PHY_MODE_USB_HOST) + return 0; return -EINVAL; + } switch (mode) { case PHY_MODE_USB_HOST: diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 900c7073c46f..71308766e891 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd) ret = cros_ec_create_pdinfo(debug_info); if (ret) - goto remove_debugfs; + goto remove_log; ec->debug_info = debug_info; @@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd) return 0; +remove_log: + cros_ec_cleanup_console_log(debug_info); remove_debugfs: debugfs_remove_recursive(debug_info->dir); return ret; @@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev) { struct cros_ec_dev *ec = dev_get_drvdata(dev); - cancel_delayed_work_sync(&ec->debug_info->log_poll_work); + if (ec->debug_info->log_buffer.buf) + cancel_delayed_work_sync(&ec->debug_info->log_poll_work); return 0; } @@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev) { struct cros_ec_dev *ec = dev_get_drvdata(dev); - schedule_delayed_work(&ec->debug_info->log_poll_work, 0); + if (ec->debug_info->log_buffer.buf) + schedule_delayed_work(&ec->debug_info->log_poll_work, 0); return 0; } diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c index f6ff29a11f1a..14355668ddfa 100644 --- a/drivers/platform/chrome/wilco_ec/mailbox.c +++ b/drivers/platform/chrome/wilco_ec/mailbox.c @@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg) msg->command, msg->type, msg->flags, msg->response_size, msg->request_size); + mutex_lock(&ec->mailbox_lock); /* Prepare request packet */ rq = ec->data_buffer; wilco_ec_prepare(msg, rq); - mutex_lock(&ec->mailbox_lock); ret = wilco_ec_transfer(ec, msg, rq); mutex_unlock(&ec->mailbox_lock); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 8f018b3f3cd4..c7039f52ad51 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/dmi.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_data/x86/clk-pmc-atom.h> @@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) } #endif /* CONFIG_DEBUG_FS */ +/* + * Some systems need one or more of their pmc_plt_clks to be + * marked as critical. + */ +static const struct dmi_system_id critclk_systems[] = { + { + .ident = "MPL CEC1x", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), + }, + }, + { /*sentinel*/ } +}; + static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, const struct pmc_data *pmc_data) { struct platform_device *clkdev; struct pmc_clk_data *clk_data; + const struct dmi_system_id *d = dmi_first_match(critclk_systems); clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) @@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, clk_data->base = pmc_regmap; /* offset is added by client */ clk_data->clks = pmc_data->clks; + if (d) { + clk_data->critical = true; + pr_info("%s critclks quirk enabled\n", d->ident); + } clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", PLATFORM_DEVID_NONE, diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 08d5037fd052..6887870ba32c 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata, int avg_current; u32 cc_lsb; + if (!divider) + return 0; + sample &= 0xffffff; /* 24-bits, unsigned */ offset &= 0x7ff; /* 10-bits, signed */ diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c index ad969d9fc981..c2644a9fe80f 100644 --- a/drivers/power/supply/goldfish_battery.c +++ b/drivers/power/supply/goldfish_battery.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL +// SPDX-License-Identifier: GPL-2.0 /* * Power supply driver for the goldfish emulator * diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index dce24f596160..5358a80d854f 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) char *prop_buf; char *attrname; - dev_dbg(dev, "uevent\n"); - if (!psy || !psy->desc) { dev_dbg(dev, "No power supply yet\n"); return ret; } - dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name); - ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name); if (ret) return ret; @@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) goto out; } - dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37a..c53a2185a039 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); arb->rstc.ops = &meson_audio_arb_rstc_ops; arb->rstc.of_node = dev->of_node; + arb->rstc.owner = THIS_MODULE; /* * Enable general : diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index a71734c41693..f933c06bff4f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -667,9 +667,9 @@ config RTC_DRV_S5M will be called rtc-s5m. config RTC_DRV_SD3078 - tristate "ZXW Crystal SD3078" + tristate "ZXW Shenzhen whwave SD3078" help - If you say yes here you get support for the ZXW Crystal + If you say yes here you get support for the ZXW Shenzhen whwave SD3078 RTC chips. This driver can also be built as a module. If so, the module diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075e..4d6bf9304ceb 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - enable_irq_wake(cros_ec_rtc->cros_ec->irq); + return enable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - disable_irq_wake(cros_ec_rtc->cros_ec->irq); + return disable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad..69b54e5556c0 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) da9063_data_to_tm(data, &rtc->alarm_time, rtc); rtc->rtc_sync = false; + /* + * TODO: some models have alarms on a minute boundary but still support + * real hardware interrupts. Add this once the core supports it. + */ + if (config->rtc_data_start != RTC_SEC) + rtc->rtc_dev->uie_unsupported = 1; + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, da9063_alarm_event, diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index d417b203cbc5..1d3de2a3d1a4 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; - int value = 0xff; /* return 0xff for ignored values */ + int value = -1; /* return -1 for ignored values */ byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6e294b4d3635..f89f9d02e788 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); raw: - block->blocks = (private->real_cyl * + block->blocks = ((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); dev_info(&device->cdev->dev, - "DASD with %d KB/block, %d KB total size, %d KB/track, " + "DASD with %u KB/block, %lu KB total size, %u KB/track, " "%s\n", (block->bp_block >> 10), - ((private->real_cyl * + (((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk * (block->bp_block >> 9)) >> 1), ((blk_per_trk * block->bp_block) >> 10), diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index fd2146bcc0ad..e17364e13d2f 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -629,7 +629,7 @@ con3270_init(void) (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); - raw3270_add_view(&condev->view, &con3270_fn, 1); + raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ); INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 8f3a2eeb28dc..8b48ba9c598e 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp) init_waitqueue_head(&fp->wait); fp->fs_pid = get_pid(task_pid(current)); - rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); + rc = raw3270_add_view(&fp->view, &fs3270_fn, minor, + RAW3270_VIEW_LOCK_BH); if (rc) { fs3270_free_view(&fp->view); goto out; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index f8cd2935fbfd..63a41b168761 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view) * Add view to device with minor "minor". */ int -raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) +raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass) { unsigned long flags; struct raw3270 *rp; @@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) view->cols = rp->cols; view->ascebc = rp->ascebc; spin_lock_init(&view->lock); + lockdep_set_subclass(&view->lock, subclass); list_add(&view->list, &rp->view_list); rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 114ca7cbf889..3afaa35f7351 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -150,6 +150,8 @@ struct raw3270_fn { struct raw3270_view { struct list_head list; spinlock_t lock; +#define RAW3270_VIEW_LOCK_IRQ 0 +#define RAW3270_VIEW_LOCK_BH 1 atomic_t ref_count; struct raw3270 *dev; struct raw3270_fn *fn; @@ -158,7 +160,7 @@ struct raw3270_view { unsigned char *ascebc; /* ascii -> ebcdic table */ }; -int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int); +int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); int raw3270_activate_view(struct raw3270_view *); void raw3270_del_view(struct raw3270_view *); void raw3270_deactivate_view(struct raw3270_view *); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 2b0c36c2c568..98d7fc152e32 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return PTR_ERR(tp); rc = raw3270_add_view(&tp->view, &tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + tty->index + RAW3270_FIRSTMINOR, + RAW3270_VIEW_LOCK_BH); if (rc) { tty3270_free_view(tp); return rc; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4159c63a5fd2..a835b31aad99 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -24,6 +24,7 @@ #include <asm/crw.h> #include <asm/isc.h> #include <asm/ebcdic.h> +#include <asm/ap.h> #include "css.h" #include "cio.h" @@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) " failed (rc=%d).\n", ret); } +static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) +{ + CIO_CRW_EVENT(3, "chsc: ap config changed\n"); + if (sei_area->rs != 5) + return; + + ap_bus_cfg_chg(); +} + static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { switch (sei_area->cc) { @@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) case 2: /* i/o resource accessibility */ chsc_process_sei_res_acc(sei_area); break; + case 3: /* ap config changed */ + chsc_process_sei_ap_cfg_chg(sei_area); + break; case 7: /* channel-path-availability information */ chsc_process_sei_chp_avail(sei_area); break; diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index a10cec0e86eb..0b3b9de45c60 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) { struct vfio_ccw_private *private; struct irb *irb; + bool is_final; private = container_of(work, struct vfio_ccw_private, io_work); irb = &private->irb; + is_final = !(scsw_actl(&irb->scsw) & + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - cp_free(&private->cp); + if (is_final) + cp_free(&private->cp); } memcpy(private->io_region->irb_area, irb, sizeof(*irb)); if (private->io_trigger) eventfd_signal(private->io_trigger, 1); - if (private->mdev) + if (private->mdev && is_final) private->state = VFIO_CCW_STATE_IDLE; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e15816ff1265..1546389d71db 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev) struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = ap_dev->drv; + /* prepare ap queue device removal */ if (is_queue_dev(dev)) - ap_queue_remove(to_ap_queue(dev)); + ap_queue_prepare_remove(to_ap_queue(dev)); + + /* driver's chance to clean up gracefully */ if (ap_drv->remove) ap_drv->remove(ap_dev); + /* now do the ap queue device remove */ + if (is_queue_dev(dev)) + ap_queue_remove(to_ap_queue(dev)); + /* Remove queue/card from list of active queues/cards */ spin_lock_bh(&ap_list_lock); if (is_card_dev(dev)) @@ -861,6 +868,16 @@ void ap_bus_force_rescan(void) EXPORT_SYMBOL(ap_bus_force_rescan); /* +* A config change has happened, force an ap bus rescan. +*/ +void ap_bus_cfg_chg(void) +{ + AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__); + + ap_bus_force_rescan(); +} + +/* * hex2bitmap() - parse hex mask string and set bitmap. * Valid strings are "0x012345678" with at least one valid hex number. * Rest of the bitmap to the right is padded with 0. No spaces allowed diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index d0059eae5d94..15a98a673c5c 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -91,6 +91,7 @@ enum ap_state { AP_STATE_WORKING, AP_STATE_QUEUE_FULL, AP_STATE_SUSPEND_WAIT, + AP_STATE_REMOVE, /* about to be removed from driver */ AP_STATE_UNBOUND, /* momentary not bound to a driver */ AP_STATE_BORKED, /* broken */ NR_AP_STATES @@ -252,6 +253,7 @@ void ap_bus_force_rescan(void); void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); +void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index ba261210c6da..5ea83dc4f1d7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { [AP_EVENT_POLL] = ap_sm_suspend_read, [AP_EVENT_TIMEOUT] = ap_sm_nop, }, + [AP_STATE_REMOVE] = { + [AP_EVENT_POLL] = ap_sm_nop, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, [AP_STATE_UNBOUND] = { [AP_EVENT_POLL] = ap_sm_nop, [AP_EVENT_TIMEOUT] = ap_sm_nop, @@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq) } EXPORT_SYMBOL(ap_flush_queue); -void ap_queue_remove(struct ap_queue *aq) +void ap_queue_prepare_remove(struct ap_queue *aq) { - ap_flush_queue(aq); + spin_lock_bh(&aq->lock); + /* flush queue */ + __ap_flush_queue(aq); + /* set REMOVE state to prevent new messages are queued in */ + aq->state = AP_STATE_REMOVE; + spin_unlock_bh(&aq->lock); del_timer_sync(&aq->timeout); +} - /* reset with zero, also clears irq registration */ +void ap_queue_remove(struct ap_queue *aq) +{ + /* + * all messages have been flushed and the state is + * AP_STATE_REMOVE. Now reset with zero which also + * clears the irq registration and move the state + * to AP_STATE_UNBOUND to signal that this queue + * is not used by any driver currently. + */ spin_lock_bh(&aq->lock); ap_zapq(aq->qid); aq->state = AP_STATE_UNBOUND; spin_unlock_bh(&aq->lock); } -EXPORT_SYMBOL(ap_queue_remove); void ap_queue_reinit_state(struct ap_queue *aq) { @@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq) ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } -EXPORT_SYMBOL(ap_queue_reinit_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3e85d665c572..45eb0c14b880 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -51,7 +51,8 @@ static debug_info_t *debug_info; static void __init pkey_debug_init(void) { - debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); + /* 5 arguments per dbf entry (including the format string ptr) */ + debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); debug_register_view(debug_info, &debug_sprintf_view); debug_set_level(debug_info, 3); } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index eb93c2d27d0a..689c2af7026a 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, struct zcrypt_queue *zq, + struct module **pmod, unsigned int weight) { if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) @@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, atomic_add(weight, &zc->load); atomic_add(weight, &zq->load); zq->request_count++; + *pmod = zq->queue->ap_dev.drv->driver.owner; return zq; } static inline void zcrypt_drop_queue(struct zcrypt_card *zc, struct zcrypt_queue *zq, + struct module *mod, unsigned int weight) { - struct module *mod = zq->queue->ap_dev.drv->driver.owner; - zq->request_count--; atomic_sub(weight, &zc->load); atomic_sub(weight, &zq->load); @@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, unsigned int weight, pref_weight; unsigned int func_code; int qid = 0, rc = -ENODEV; + struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); @@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, pref_weight = weight; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); spin_unlock(&zcrypt_list_lock); out: @@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, unsigned int weight, pref_weight; unsigned int func_code; int qid = 0, rc = -ENODEV; + struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); @@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, pref_weight = weight; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); spin_unlock(&zcrypt_list_lock); out: @@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, unsigned int func_code; unsigned short *domain; int qid = 0, rc = -ENODEV; + struct module *mod; trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); @@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, pref_weight = weight; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); spin_unlock(&zcrypt_list_lock); out: @@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, unsigned int func_code; struct ap_message ap_msg; int qid = 0, rc = -ENODEV; + struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); @@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, pref_weight = weight; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); spin_unlock(&zcrypt_list_lock); out_free: @@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer) struct ap_message ap_msg; unsigned int domain; int qid = 0, rc = -ENODEV; + struct module *mod; trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); @@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer) pref_weight = weight; } } - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); spin_unlock(&zcrypt_list_lock); if (!pref_zq) { @@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer) rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); spin_lock(&zcrypt_list_lock); - zcrypt_drop_queue(pref_zc, pref_zq, weight); + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); spin_unlock(&zcrypt_list_lock); out: diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 7617d21cb296..f63c5c871d3d 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) if (priv->channel[direction] == NULL) { if (direction == CTCM_WRITE) channel_free(priv->channel[CTCM_READ]); + result = -ENODEV; goto out_dev; } priv->channel[direction]->netdev = dev; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 197b0f5b63e7..44bd6f04c145 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { + struct sk_buff *skb; + /* release may never happen from within CQ tasklet scope */ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); - __skb_queue_purge(&buf->skb_list); + while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) + consume_skb(skb); } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8efb2e8ff8f4..c3067fd3bd9e 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, } /* else fall through */ QETH_TXQ_STAT_INC(queue, tx_dropped); - QETH_TXQ_STAT_INC(queue, tx_errors); - dev_kfree_skb_any(skb); + kfree_skb(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } @@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc; + qeth_l2_vnicc_set_defaults(card); + if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l2_create_device_attributes(&gdev->dev); if (rc) @@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) } hash_init(card->mac_htable); - card->info.hwtrap = 0; - qeth_l2_vnicc_set_defaults(card); return 0; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7e68d9d16859..53712cf26406 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); - QETH_TXQ_STAT_INC(queue, tx_errors); - dev_kfree_skb_any(skb); + kfree_skb(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } @@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc; + hash_init(card->ip_htable); + if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l3_create_device_attributes(&gdev->dev); if (rc) return rc; } - hash_init(card->ip_htable); + hash_init(card->ip_mc_htable); - card->info.hwtrap = 0; return 0; } diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 744a64680d5b..e8fc28dba8df 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) add_timer(&erp_action->timer); } +void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, + int clear, char *dbftag) +{ + unsigned long flags; + struct zfcp_port *port; + + write_lock_irqsave(&adapter->erp_lock, flags); + read_lock(&adapter->port_list_lock); + list_for_each_entry(port, &adapter->port_list, list) + _zfcp_erp_port_forced_reopen(port, clear, dbftag); + read_unlock(&adapter->port_list_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear, char *dbftag) { @@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); int lun_status; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL) + continue; if (zsdev->port != port) continue; /* LUN under port of interest */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3fce47b0b21b..c6acca521ffe 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag); extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); +extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, + int clear, char *dbftag); extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index db00b5e3abbe..33eddb02ee30 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, list_for_each_entry(port, &adapter->port_list, list) { if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) zfcp_fc_test_link(port); - if (!port->d_id) - zfcp_erp_port_reopen(port, - ZFCP_STATUS_COMMON_ERP_FAILED, - "fcrscn1"); } read_unlock_irqrestore(&adapter->port_list_lock, flags); } @@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) { struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; + struct zfcp_adapter *adapter = fsf_req->adapter; struct fc_els_rscn *head; struct fc_els_rscn_page *page; u16 i; @@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) no_entries = be16_to_cpu(head->rscn_plen) / sizeof(struct fc_els_rscn_page); + if (no_entries > 1) { + /* handle failed ports */ + unsigned long flags; + struct zfcp_port *port; + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { + if (port->d_id) + continue; + zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + "fcrscn1"); + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); + } + for (i = 1; i < no_entries; i++) { /* skip head and start with 1st element */ page++; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index f4f6a07c5222..221d0dfb8493 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret = SUCCESS, fc_ret; + if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { + zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p"); + zfcp_erp_wait(adapter); + } zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); fc_ret = fc_block_scsi_eh(scpnt); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1df5171594b8..11fb68d7e60d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) return capacity; } +static inline int aac_pci_offline(struct aac_dev *dev) +{ + return pci_channel_offline(dev->pdev) || dev->handle_pci_error; +} + static inline int aac_adapter_check_health(struct aac_dev *dev) { - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -1; return (dev)->a_ops.adapter_check_health(dev); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e67e032936ef..78430a7b294c 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, return -ETIMEDOUT; } - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -EFAULT; if ((blink = aac_adapter_check_health(dev)) > 0) { @@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, spin_unlock_irqrestore(&fibptr->event_lock, flags); - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -EFAULT; fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 3d401d02c019..bdd177e3d762 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c @@ -91,6 +91,7 @@ aic7770_probe(struct device *dev) ahc = ahc_alloc(&aic7xxx_driver_template, name); if (ahc == NULL) return (ENOMEM); + ahc->dev = dev; error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, eisaBase); if (error != 0) { diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 5614921b4041..88b90f9806c9 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -943,6 +943,7 @@ struct ahc_softc { * Platform specific device information. */ ahc_dev_softc_t dev_softc; + struct device *dev; /* * Bus specific device information. diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 3c9c17450bb3..d5c4a0d23706 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -860,8 +860,8 @@ int ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { - *vaddr = pci_alloc_consistent(ahc->dev_softc, - dmat->maxsize, mapp); + /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ + *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); if (*vaddr == NULL) return ENOMEM; return 0; @@ -871,8 +871,7 @@ void ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { - pci_free_consistent(ahc->dev_softc, dmat->maxsize, - vaddr, map); + dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); } int @@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa host->transportt = ahc_linux_transport_template; - retval = scsi_add_host(host, - (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); + retval = scsi_add_host(host, ahc->dev); if (retval) { printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); scsi_host_put(host); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 0fc14dac7070..717d8d1082ce 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } ahc->dev_softc = pci; + ahc->dev = &pci->dev; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 462560b2855e..469d0bc9f5fe 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) } out: - if (req->nsge > 0) + if (req->nsge > 0) { scsi_dma_unmap(cmnd); + if (req->dcopy && (host_status == DID_OK)) + host_status = csio_scsi_copy_to_sgl(hw, req); + } cmnd->result = (((host_status) << 16) | scsi_status); cmnd->scsi_done(cmnd); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 3c3cf89f713f..14bac4966c87 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); + if (dev_is_sata(device)) { + rc = hisi_sas_softreset_ata_disk(device); + if (rc) + return TMF_RESP_FUNC_FAILED; + } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index dbaa4f131433..3ad997ac3510 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -139,6 +139,7 @@ static const struct { { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, + { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." }, }; static void ibmvfc_npiv_login(struct ibmvfc_host *); @@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) if (rsp->flags & FCP_RSP_LEN_VALID) rsp_code = rsp->data.info.rsp_code; - scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " + scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) " "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", - cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, + cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error), rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); } @@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), - rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else @@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " "flags: %x fcp_rsp: %x, scsi_status: %x\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), - rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else @@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ - dev_info(vhost->dev, "Re-enabling adapter\n"); + dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; ibmvfc_purge_requests(vhost, DID_REQUEUE); ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); - } else { - dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); + } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { + dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); + } else { + dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); } return; case IBMVFC_CRQ_CMD_RSP: @@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), - rsp->status, rsp->error, status); + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status); break; } @@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, - ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, - ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); break; } @@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), - mad->iu.status, mad->iu.error, + be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error), ibmvfc_get_fc_type(fc_reason), fc_reason, ibmvfc_get_ls_explain(fc_explain), fc_explain, status); break; @@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), - rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), - rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), - rsp->fc_explain, status); + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), + status); break; } @@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) level += ibmvfc_retry_host_init(vhost); ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), - rsp->status, rsp->error); + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), - rsp->status, rsp->error); + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index b81a53c4a9a8..459cc288ba1d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -78,9 +78,14 @@ enum ibmvfc_crq_valid { IBMVFC_CRQ_XPORT_EVENT = 0xFF, }; -enum ibmvfc_crq_format { +enum ibmvfc_crq_init_msg { IBMVFC_CRQ_INIT = 0x01, IBMVFC_CRQ_INIT_COMPLETE = 0x02, +}; + +enum ibmvfc_crq_xport_evts { + IBMVFC_PARTNER_FAILED = 0x01, + IBMVFC_PARTNER_DEREGISTER = 0x02, IBMVFC_PARTITION_MIGRATED = 0x06, }; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 1135e74646e2..8cec5230fe31 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -96,6 +96,7 @@ static int client_reserve = 1; static char partition_name[96] = "UNKNOWN"; static unsigned int partition_number = -1; static LIST_HEAD(ibmvscsi_head); +static DEFINE_SPINLOCK(ibmvscsi_driver_lock); static struct scsi_transport_template *ibmvscsi_transport_template; @@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) } dev_set_drvdata(&vdev->dev, hostdata); + spin_lock(&ibmvscsi_driver_lock); list_add_tail(&hostdata->host_list, &ibmvscsi_head); + spin_unlock(&ibmvscsi_driver_lock); return 0; add_srp_port_failed: @@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - list_del(&hostdata->host_list); - unmap_persist_bufs(hostdata); + unsigned long flags; + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); + + spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); kthread_stop(hostdata->work_thread); - srp_remove_host(hostdata->host); - scsi_remove_host(hostdata->host); + unmap_persist_bufs(hostdata); + + spin_lock(&ibmvscsi_driver_lock); + list_del(&hostdata->host_list); + spin_unlock(&ibmvscsi_driver_lock); + scsi_host_put(hostdata->host); return 0; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index dfba4921b265..5bf61431434b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); - rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c98f264f1d83..a497b2c0cb79 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock(&lpfc_cmd->buf_lock); - if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { - lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; - if (lpfc_cmd->waitq) - wake_up(lpfc_cmd->waitq); + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (lpfc_cmd->waitq) { + wake_up(lpfc_cmd->waitq); lpfc_cmd->waitq = NULL; } spin_unlock(&lpfc_cmd->buf_lock); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e57774472e75..1d8c584ec1e9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) if (smid < ioc->hi_priority_smid) { struct scsiio_tracker *st; + void *request; st = _get_st_from_smid(ioc, smid); if (!st) { _base_recovery_check(ioc); return; } + + /* Clear MPI request frame */ + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + mpt3sas_base_clear_st(ioc, st); _base_recovery_check(ioc); return; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8bb5b8f9f4d2..1ccfbc7eebe0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) { struct scsi_cmnd *scmd = NULL; struct scsiio_tracker *st; + Mpi25SCSIIORequest_t *mpi_request; if (smid > 0 && smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { u32 unique_tag = smid - 1; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* + * If SCSI IO request is outstanding at driver level then + * DevHandle filed must be non-zero. If DevHandle is zero + * then it means that this smid is free at driver level, + * so return NULL. + */ + if (!mpi_request->DevHandle) + return scmd; + scmd = scsi_host_find_tag(ioc->shost, unique_tag); if (scmd) { st = scsi_cmd_priv(scmd); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e74a62448ba4..e5db9a9954dc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) { - struct qedi_nvm_iscsi_image nvm_image; - qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, - sizeof(nvm_image), + sizeof(struct qedi_nvm_iscsi_image), &qedi->nvm_buf_dma, GFP_KERNEL); if (!qedi->iscsi_image) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); @@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data) static int qedi_get_boot_info(struct qedi_ctx *qedi) { int ret = 1; - struct qedi_nvm_iscsi_image nvm_image; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Get NVM iSCSI CFG image\n"); ret = qedi_ops->common->nvm_get_image(qedi->cdev, QED_NVM_IMAGE_ISCSI_CFG, (char *)qedi->iscsi_image, - sizeof(nvm_image)); + sizeof(struct qedi_nvm_iscsi_image)); if (ret) QEDI_ERR(&qedi->dbg_ctx, "Could not get NVM image. ret = %d\n", ret); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 420045155ba0..0c700b140ce7 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if ((domain & 0xf0) == 0xf0) continue; + /* Bypass if not same domain and area of adapter. */ + if (area && domain && ((area != vha->d_id.b.area) || + (domain != vha->d_id.b.domain)) && + (ha->current_topology == ISP_CFG_NL)) + continue; + + /* Bypass invalid local loop ID. */ if (loop_id > LAST_LOCAL_LOOP_ID) continue; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 677f82fdf56f..91f576d743fe 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, goto eh_reset_failed; } err = 2; - if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1) + if (do_reset(fcport, cmd->device->lun, 1) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800c, "do_reset failed for cmd=%p.\n", cmd); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 16a18d5d856f..6e4f4931ae17 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b916..a08ff3bd6310 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -238,6 +238,7 @@ static struct { {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75d..c14006ac98f9 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"NETAPP", "INF-01-00", "rdac", }, {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, + {"LENOVO", "DE_Series", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 20189675677a..07dfc17d4824 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -585,10 +585,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; - destroy_rcu_head(&cmd->rcu); } /* + * Calling rcu_barrier() is not necessary here because the + * SCSI error handler guarantees that the function called by + * call_rcu() has been called before scsi_end_request() is + * called. + */ + destroy_rcu_head(&cmd->rcu); + + /* * In the MQ case the command gets freed by __blk_mq_end_request, * so we have to do all cleanup that depends on it earlier. * @@ -1699,8 +1706,12 @@ out_put_budget: ret = BLK_STS_DEV_RESOURCE; break; default: + if (unlikely(!scsi_device_online(sdev))) + scsi_req(req)->result = DID_NO_CONNECT << 16; + else + scsi_req(req)->result = DID_ERROR << 16; /* - * Make sure to release all allocated ressources when + * Make sure to release all allocated resources when * we hit an error, as we will never see this command * again. */ @@ -2541,8 +2552,10 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - sdev->quiesced_by = NULL; - blk_clear_pm_only(sdev->request_queue); + if (sdev->quiesced_by) { + sdev->quiesced_by = NULL; + blk_clear_pm_only(sdev->request_queue); + } if (sdev->sdev_state == SDEV_QUIESCE) scsi_device_set_state(sdev, SDEV_RUNNING); mutex_unlock(&sdev->state_mutex); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 6a9040faed00..3b119ca0cc0c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr, mutex_lock(&sdev->state_mutex); ret = scsi_device_set_state(sdev, state); + /* + * If the device state changes to SDEV_RUNNING, we need to run + * the queue to avoid I/O hang. + */ + if (ret == 0 && state == SDEV_RUNNING) + blk_mq_run_hw_queues(sdev->request_queue, true); mutex_unlock(&sdev->state_mutex); return ret == 0 ? count : -EINVAL; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0508831d6fb9..0a82e93566dc 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session) scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* flush running scans then delete devices */ flush_work(&session->scan_work); + /* flush running unbind operations */ + flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 251db30d0882..2b2bc4b49d78 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } - /* - * XXX and what if there are packets in flight and this close() - * XXX is followed by a "rmmod sd_mod"? - */ - scsi_disk_put(sdkp); } @@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, unsigned int opt_xfer_bytes = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + if (sdkp->opt_xfer_blocks == 0) + return false; + if (sdkp->opt_xfer_blocks > dev_max) { sd_first_printk(KERN_WARNING, sdkp, "Optimal transfer size %u logical blocks " \ @@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct gendisk *disk = sdkp->disk; - + struct request_queue *q = disk->queue; + ida_free(&sd_index_ida, sdkp->index); + /* + * Wait until all requests that are in progress have completed. + * This is necessary to avoid that e.g. scsi_end_request() crashes + * due to clearing the disk->private_data pointer. Wait from inside + * scsi_disk_release() instead of from sd_release() to avoid that + * freezing and unfreezing the request queue affects user space I/O + * in case multiple processes open a /dev/sd... node concurrently. + */ + blk_mq_freeze_queue(q); + blk_mq_unfreeze_queue(q); + disk->private_data = NULL; put_disk(disk); put_device(&sdkp->device->sdev_gendev); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 84380bae20f1..8472de1007ff 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -385,7 +385,7 @@ enum storvsc_request_type { * This is the end of Protocol specific defines. */ -static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); +static int storvsc_ringbuffer_size = (128 * 1024); static u32 max_outstanding_req_per_channel; static int storvsc_vcpus_per_sub_channel = 4; @@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) { struct device *dev = &device->device; struct storvsc_device *stor_device; - int num_cpus = num_online_cpus(); int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; - num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); + /* + * If the number of CPUs is artificially restricted, such as + * with maxcpus=1 on the kernel boot line, Hyper-V could offer + * sub-channels >= the number of CPUs. These sub-channels + * should not be created. The primary channel is already created + * and assigned to one CPU, so check against # CPUs - 1. + */ + num_sc = min((int)(num_online_cpus() - 1), max_chns); + if (!num_sc) + return; + stor_device = get_out_stor_device(device); if (!stor_device) return; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8af01777d09c..f8cb7c23305b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c index 9351349cf0a9..1e0041ec8132 100644 --- a/drivers/soc/bcm/bcm2835-power.c +++ b/drivers/soc/bcm/bcm2835-power.c @@ -150,7 +150,12 @@ struct bcm2835_power { static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) { - u64 start = ktime_get_ns(); + u64 start; + + if (!reg) + return 0; + + start = ktime_get_ns(); /* Enable the module's async AXI bridges. */ ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); @@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) { - u64 start = ktime_get_ns(); + u64 start; + + if (!reg) + return 0; + + start = ktime_get_ns(); /* Enable the module's async AXI bridges. */ ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); @@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain) } } -static void +static int bcm2835_init_power_domain(struct bcm2835_power *power, int pd_xlate_index, const char *name) { @@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power, struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; dom->clk = devm_clk_get(dev->parent, name); + if (IS_ERR(dom->clk)) { + int ret = PTR_ERR(dom->clk); + + if (ret == -EPROBE_DEFER) + return ret; + + /* Some domains don't have a clk, so make sure that we + * don't deref an error pointer later. + */ + dom->clk = NULL; + } dom->base.name = name; dom->base.power_on = bcm2835_power_pd_power_on; @@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power, pm_genpd_init(&dom->base, NULL, true); power->pd_xlate.domains[pd_xlate_index] = &dom->base; + + return 0; } /** bcm2835_reset_reset - Resets a block that has a reset line in the @@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev) { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, }; - int ret, i; + int ret = 0, i; u32 id; power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); @@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev) power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); - for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) - bcm2835_init_power_domain(power, i, power_domain_names[i]); + for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) { + ret = bcm2835_init_power_domain(power, i, power_domain_names[i]); + if (ret) + goto fail; + } for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, @@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev) ret = devm_reset_controller_register(dev, &power->reset); if (ret) - return ret; + goto fail; of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); dev_info(dev, "Broadcom BCM2835 power domains driver"); return 0; + +fail: + for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) { + struct generic_pm_domain *dom = &power->domains[i].base; + + if (dom->name) + pm_genpd_remove(dom); + } + return ret; } static int bcm2835_power_remove(struct platform_device *pdev) diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index c0901b96cfe4..62951e836cbc 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig" source "drivers/staging/mt7621-mmc/Kconfig" -source "drivers/staging/mt7621-eth/Kconfig" - source "drivers/staging/mt7621-dts/Kconfig" source "drivers/staging/gasket/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 57c6bce13ff4..d1b17ddcd354 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/ obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ -obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 687537203d9c..d9725888af6f 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig @@ -3,6 +3,7 @@ # config XIL_AXIS_FIFO tristate "Xilinx AXI-Stream FIFO IP core driver" + depends on OF default n help This adds support for the Xilinx AXI-Stream diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index a7d569cfca5d..0dff1ac057cd 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev, unsigned int mask); unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data); +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, + struct comedi_cmd *cmd); unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); unsigned int comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index eefa62f42c0f..5a32b8fc000e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, EXPORT_SYMBOL_GPL(comedi_dio_update_state); /** - * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes + * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in + * bytes * @s: COMEDI subdevice. + * @cmd: COMEDI command. * * Determines the overall scan length according to the subdevice type and the - * number of channels in the scan. + * number of channels in the scan for the specified command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned @@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state); * * Returns the overall scan length in bytes. */ -unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, + struct comedi_cmd *cmd) { - struct comedi_cmd *cmd = &s->async->cmd; unsigned int num_samples; unsigned int bits_per_sample; @@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) } return comedi_samples_to_bytes(s, num_samples); } +EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); + +/** + * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes + * @s: COMEDI subdevice. + * + * Determines the overall scan length according to the subdevice type and the + * number of channels in the scan for the current command. + * + * For digital input, output or input/output subdevices, samples for + * multiple channels are assumed to be packed into one or more unsigned + * short or unsigned int values according to the subdevice's %SDF_LSAMPL + * flag. For other types of subdevice, samples are assumed to occupy a + * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. + * + * Returns the overall scan length in bytes. + */ +unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) +{ + struct comedi_cmd *cmd = &s->async->cmd; + + return comedi_bytes_per_scan_cmd(s, cmd); +} EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 5edf59ac6706..b04dad8c7092 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct ni_private *devpriv = dev->private; + unsigned int bytes_per_scan; int err = 0; /* Step 1 : check if triggers are trivially valid */ @@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); - err |= comedi_check_trigger_arg_max(&cmd->stop_arg, - s->async->prealloc_bufsz / - comedi_bytes_per_scan(s)); + bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd); + if (bytes_per_scan) { + err |= comedi_check_trigger_arg_max(&cmd->stop_arg, + s->async->prealloc_bufsz / + bytes_per_scan); + } if (err) return 3; diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c index 808ed92ed66f..1bb1cb651349 100644 --- a/drivers/staging/comedi/drivers/ni_usb6501.c +++ b/drivers/staging/comedi/drivers/ni_usb6501.c @@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (!devpriv) return -ENOMEM; + mutex_init(&devpriv->mut); + usb_set_intfdata(intf, devpriv); + ret = ni6501_find_endpoints(dev); if (ret) return ret; @@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (ret) return ret; - mutex_init(&devpriv->mut); - usb_set_intfdata(intf, devpriv); - ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 6234b649d887..65dc6c51037e 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, devpriv->model = board->model; + sema_init(&devpriv->limit_sem, 8); + ret = vmk80xx_find_usb_endpoints(dev); if (ret) return ret; @@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, if (ret) return ret; - sema_init(&devpriv->limit_sem, 8); - usb_set_intfdata(intf, devpriv); if (devpriv->model == VMK8055_MODEL) diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index 526e0dbea5b5..81af768e7248 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c @@ -298,7 +298,7 @@ submit_bio_retry: *last_block = current_block; /* shift in advance in case of it followed by too many gaps */ - if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) { + if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { /* err should reassign to 0 after submitting */ err = 0; goto submit_bio_out; diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c index 829f7b12e0dc..9bbc68729c11 100644 --- a/drivers/staging/erofs/dir.c +++ b/drivers/staging/erofs/dir.c @@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = { [EROFS_FT_SYMLINK] = DT_LNK, }; +static void debug_one_dentry(unsigned char d_type, const char *de_name, + unsigned int de_namelen) +{ +#ifdef CONFIG_EROFS_FS_DEBUG + /* since the on-disk name could not have the trailing '\0' */ + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; + + memcpy(dbg_namebuf, de_name, de_namelen); + dbg_namebuf[de_namelen] = '\0'; + + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, + de_namelen, d_type); +#endif +} + static int erofs_fill_dentries(struct dir_context *ctx, void *dentry_blk, unsigned int *ofs, unsigned int nameoff, unsigned int maxsize) @@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx, de = dentry_blk + *ofs; while (de < end) { const char *de_name; - int de_namelen; + unsigned int de_namelen; unsigned char d_type; -#ifdef CONFIG_EROFS_FS_DEBUG - unsigned int dbg_namelen; - unsigned char dbg_namebuf[EROFS_NAME_LEN]; -#endif - if (unlikely(de->file_type < EROFS_FT_MAX)) + if (de->file_type < EROFS_FT_MAX) d_type = erofs_filetype_table[de->file_type]; else d_type = DT_UNKNOWN; @@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx, nameoff = le16_to_cpu(de->nameoff); de_name = (char *)dentry_blk + nameoff; - de_namelen = unlikely(de + 1 >= end) ? - /* last directory entry */ - strnlen(de_name, maxsize - nameoff) : - le16_to_cpu(de[1].nameoff) - nameoff; + /* the last dirent in the block? */ + if (de + 1 >= end) + de_namelen = strnlen(de_name, maxsize - nameoff); + else + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; /* a corrupted entry is found */ - if (unlikely(de_namelen < 0)) { + if (unlikely(nameoff + de_namelen > maxsize || + de_namelen > EROFS_NAME_LEN)) { DBG_BUGON(1); return -EIO; } -#ifdef CONFIG_EROFS_FS_DEBUG - dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen); - memcpy(dbg_namebuf, de_name, dbg_namelen); - dbg_namebuf[dbg_namelen] = '\0'; - - debugln("%s, found de_name %s de_len %d d_type %d", __func__, - dbg_namebuf, de_namelen, d_type); -#endif - + debug_one_dentry(d_type, de_name, de_namelen); if (!dir_emit(ctx, de_name, de_namelen, le64_to_cpu(de->nid), d_type)) /* stopped by some reason */ diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 8715bc50e09c..31eef8395774 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -972,6 +972,7 @@ repeat: overlapped = false; compressed_pages = grp->compressed_pages; + err = 0; for (i = 0; i < clusterpages; ++i) { unsigned int pagenr; @@ -981,26 +982,39 @@ repeat: DBG_BUGON(!page); DBG_BUGON(!page->mapping); - if (z_erofs_is_stagingpage(page)) - continue; + if (!z_erofs_is_stagingpage(page)) { #ifdef EROFS_FS_HAS_MANAGED_CACHE - if (page->mapping == MNGD_MAPPING(sbi)) { - DBG_BUGON(!PageUptodate(page)); - continue; - } + if (page->mapping == MNGD_MAPPING(sbi)) { + if (unlikely(!PageUptodate(page))) + err = -EIO; + continue; + } #endif - /* only non-head page could be reused as a compressed page */ - pagenr = z_erofs_onlinepage_index(page); + /* + * only if non-head page can be selected + * for inplace decompression + */ + pagenr = z_erofs_onlinepage_index(page); - DBG_BUGON(pagenr >= nr_pages); - DBG_BUGON(pages[pagenr]); - ++sparsemem_pages; - pages[pagenr] = page; + DBG_BUGON(pagenr >= nr_pages); + DBG_BUGON(pages[pagenr]); + ++sparsemem_pages; + pages[pagenr] = page; - overlapped = true; + overlapped = true; + } + + /* PG_error needs checking for inplaced and staging pages */ + if (unlikely(PageError(page))) { + DBG_BUGON(PageUptodate(page)); + err = -EIO; + } } + if (unlikely(err)) + goto out; + llen = (nr_pages << PAGE_SHIFT) - work->pageofs; if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { @@ -1029,6 +1043,10 @@ repeat: skip_allocpage: vout = erofs_vmap(pages, nr_pages); + if (!vout) { + err = -ENOMEM; + goto out; + } err = z_erofs_vle_unzip_vmap(compressed_pages, clusterpages, vout, llen, work->pageofs, overlapped); @@ -1194,6 +1212,7 @@ repeat: if (page->mapping == mc) { WRITE_ONCE(grp->compressed_pages[nr], page); + ClearPageError(page); if (!PagePrivate(page)) { /* * impossible to be !PagePrivate(page) for diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c index 48b263a2731a..0daac9b984a8 100644 --- a/drivers/staging/erofs/unzip_vle_lz4.c +++ b/drivers/staging/erofs/unzip_vle_lz4.c @@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); - if (clusterpages == 1) + if (clusterpages == 1) { vin = kmap_atomic(compressed_pages[0]); - else + } else { vin = erofs_vmap(compressed_pages, clusterpages); + if (!vin) + return -ENOMEM; + } preempt_disable(); vout = erofs_pcpubuf[smp_processor_id()].data; diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index acdbc07fd259..2fc8bc22b57b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -109,10 +109,10 @@ #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ -#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ -#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ -#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ -#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ +#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */ +#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */ +#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */ +#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */ #define AD7193_CH_TEMP 0x100 /* Temp senseor */ #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c index 029c3bf42d4d..07774c000c5a 100644 --- a/drivers/staging/iio/meter/ade7854.c +++ b/drivers/staging/iio/meter/ade7854.c @@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644, static IIO_DEV_ATTR_IPEAK(0644, ade7854_read_32bit, ade7854_write_32bit, - ADE7854_VPEAK); + ADE7854_IPEAK); static IIO_DEV_ATTR_APHCAL(0644, ade7854_read_16bit, ade7854_write_16bit, diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c index 18936cdb1083..956daf8c3bd2 100644 --- a/drivers/staging/most/core.c +++ b/drivers/staging/most/core.c @@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface) INIT_LIST_HEAD(&iface->p->channel_list); iface->p->dev_id = id; - snprintf(iface->p->name, STRING_SIZE, "mdev%d", id); + strcpy(iface->p->name, iface->description); iface->dev.init_name = iface->p->name; iface->dev.bus = &mc.bus; iface->dev.parent = &mc.dev; diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index b73385540216..250c15ace2a7 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts @@ -117,22 +117,6 @@ status = "okay"; }; -ðernet { - //mtd-mac-address = <&factory 0xe000>; - gmac1: mac@0 { - compatible = "mediatek,eth-mac"; - reg = <0>; - phy-handle = <&phy1>; - }; - - mdio-bus { - phy1: ethernet-phy@1 { - reg = <1>; - phy-mode = "rgmii"; - }; - }; -}; - &pinctrl { state_default: pinctrl0 { gpio { @@ -141,3 +125,16 @@ }; }; }; + +&switch0 { + ports { + port@0 { + label = "ethblack"; + status = "ok"; + }; + port@4 { + label = "ethblue"; + status = "ok"; + }; + }; +}; diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index 6aff3680ce4b..17020e24abd2 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi @@ -372,16 +372,83 @@ mediatek,ethsys = <ðsys>; - mediatek,switch = <&gsw>; + gmac0: mac@0 { + compatible = "mediatek,eth-mac"; + reg = <0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + gmac1: mac@1 { + compatible = "mediatek,eth-mac"; + reg = <1>; + status = "off"; + phy-mode = "rgmii"; + phy-handle = <&phy5>; + }; mdio-bus { #address-cells = <1>; #size-cells = <0>; - phy1f: ethernet-phy@1f { - reg = <0x1f>; + phy5: ethernet-phy@5 { + reg = <5>; phy-mode = "rgmii"; }; + + switch0: switch0@0 { + compatible = "mediatek,mt7621"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + mediatek,mcm; + resets = <&rstctrl 2>; + reset-names = "mcm"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + port@0 { + status = "off"; + reg = <0>; + label = "lan0"; + }; + port@1 { + status = "off"; + reg = <1>; + label = "lan1"; + }; + port@2 { + status = "off"; + reg = <2>; + label = "lan2"; + }; + port@3 { + status = "off"; + reg = <3>; + label = "lan3"; + }; + port@4 { + status = "off"; + reg = <4>; + label = "lan4"; + }; + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "trgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; }; }; diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt deleted file mode 100644 index 596b38552697..000000000000 --- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt +++ /dev/null @@ -1,48 +0,0 @@ -Mediatek Gigabit Switch -======================= - -The mediatek gigabit switch can be found on Mediatek SoCs. - -Required properties: -- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw", - "mediatek,mt7623-gsw" -- reg: Address and length of the register set for the device -- interrupts: Should contain the gigabit switches interrupt - - -Additional required properties for ARM based SoCs: -- mediatek,reset-pin: phandle describing the reset GPIO -- clocks: the clocks used by the switch -- clock-names: the names of the clocks listed in the clocks property - these should be "trgpll", "esw", "gp2", "gp1" -- mt7530-supply: the phandle of the regulator used to power the switch -- mediatek,pctl-regmap: phandle to the port control regmap. this is used to - setup the drive current - - -Optional properties: -- interrupt-parent: Should be the phandle for the interrupt controller - that services interrupts for this device - -Example: - -gsw: switch@1b100000 { - compatible = "mediatek,mt7623-gsw"; - reg = <0 0x1b110000 0 0x300000>; - - interrupt-parent = <&pio>; - interrupts = <168 IRQ_TYPE_EDGE_RISING>; - - clocks = <&apmixedsys CLK_APMIXED_TRGPLL>, - <ðsys CLK_ETHSYS_ESW>, - <ðsys CLK_ETHSYS_GP2>, - <ðsys CLK_ETHSYS_GP1>; - clock-names = "trgpll", "esw", "gp2", "gp1"; - - mt7530-supply = <&mt6323_vpa_reg>; - - mediatek,pctl-regmap = <&syscfg_pctl_a>; - mediatek,reset-pin = <&pio 15 0>; - - status = "okay"; -}; diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig deleted file mode 100644 index 44ea86c7a96c..000000000000 --- a/drivers/staging/mt7621-eth/Kconfig +++ /dev/null @@ -1,39 +0,0 @@ -config NET_VENDOR_MEDIATEK_STAGING - bool "MediaTek ethernet driver - staging version" - depends on RALINK - ---help--- - If you have an MT7621 Mediatek SoC with ethernet, say Y. - -if NET_VENDOR_MEDIATEK_STAGING -choice - prompt "MAC type" - -config NET_MEDIATEK_MT7621 - bool "MT7621" - depends on MIPS && SOC_MT7621 - -endchoice - -config NET_MEDIATEK_SOC_STAGING - tristate "MediaTek SoC Gigabit Ethernet support" - depends on NET_VENDOR_MEDIATEK_STAGING - select PHYLIB - ---help--- - This driver supports the gigabit ethernet MACs in the - MediaTek SoC family. - -config NET_MEDIATEK_MDIO - def_bool NET_MEDIATEK_SOC_STAGING - depends on NET_MEDIATEK_MT7621 - select PHYLIB - -config NET_MEDIATEK_MDIO_MT7620 - def_bool NET_MEDIATEK_SOC_STAGING - depends on NET_MEDIATEK_MT7621 - select NET_MEDIATEK_MDIO - -config NET_MEDIATEK_GSW_MT7621 - def_tristate NET_MEDIATEK_SOC_STAGING - depends on NET_MEDIATEK_MT7621 - -endif #NET_VENDOR_MEDIATEK_STAGING diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile deleted file mode 100644 index 018bcc3596b3..000000000000 --- a/drivers/staging/mt7621-eth/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# -# Makefile for the Ralink SoCs built-in ethernet macs -# - -mtk-eth-soc-y += mtk_eth_soc.o ethtool.o - -mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o -mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o - -mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o - -obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o - -obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO deleted file mode 100644 index f9e47d4b4cd4..000000000000 --- a/drivers/staging/mt7621-eth/TODO +++ /dev/null @@ -1,13 +0,0 @@ - -- verify devicetree documentation is consistent with code -- fix ethtool - currently doesn't return valid data. -- general code review and clean up -- add support for second MAC on mt7621 -- convert gsw code to use switchdev interfaces -- md7620_mmi_write etc should probably be wrapped - in a regmap abstraction. -- Get soc_mt7621 to work with QDMA TX if possible. -- Ensure phys are correctly configured when a cable - is plugged in. - -Cc: NeilBrown <neil@brown.name> diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c deleted file mode 100644 index 8c4228e2c987..000000000000 --- a/drivers/staging/mt7621-eth/ethtool.c +++ /dev/null @@ -1,250 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include "mtk_eth_soc.h" -#include "ethtool.h" - -struct mtk_stat { - char name[ETH_GSTRING_LEN]; - unsigned int idx; -}; - -#define MTK_HW_STAT(stat) { \ - .name = #stat, \ - .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \ -} - -static const struct mtk_stat mtk_ethtool_hw_stats[] = { - MTK_HW_STAT(tx_bytes), - MTK_HW_STAT(tx_packets), - MTK_HW_STAT(tx_skip), - MTK_HW_STAT(tx_collisions), - MTK_HW_STAT(rx_bytes), - MTK_HW_STAT(rx_packets), - MTK_HW_STAT(rx_overflow), - MTK_HW_STAT(rx_fcs_errors), - MTK_HW_STAT(rx_short_errors), - MTK_HW_STAT(rx_long_errors), - MTK_HW_STAT(rx_checksum_errors), - MTK_HW_STAT(rx_flow_control_packets), -}; - -#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats) - -static int mtk_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct mtk_mac *mac = netdev_priv(dev); - int err; - - if (!mac->phy_dev) - return -ENODEV; - - if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) { - err = phy_read_status(mac->phy_dev); - if (err) - return -ENODEV; - } - - phy_ethtool_ksettings_get(mac->phy_dev, cmd); - return 0; -} - -static int mtk_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct mtk_mac *mac = netdev_priv(dev); - - if (!mac->phy_dev) - return -ENODEV; - - if (cmd->base.phy_address != mac->phy_dev->mdio.addr) { - if (mac->hw->phy->phy_node[cmd->base.phy_address]) { - mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address]; - mac->phy_flags = MTK_PHY_FLAG_PORT; - } else if (mac->hw->mii_bus) { - mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, - cmd->base.phy_address); - if (!mac->phy_dev) - return -ENODEV; - mac->phy_flags = MTK_PHY_FLAG_ATTACH; - } else { - return -ENODEV; - } - } - - return phy_ethtool_ksettings_set(mac->phy_dev, cmd); -} - -static void mtk_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_soc_data *soc = mac->hw->soc; - - strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); - - if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) - info->n_stats = MTK_HW_STATS_LEN; -} - -static u32 mtk_get_msglevel(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - - return mac->hw->msg_enable; -} - -static void mtk_set_msglevel(struct net_device *dev, u32 value) -{ - struct mtk_mac *mac = netdev_priv(dev); - - mac->hw->msg_enable = value; -} - -static int mtk_nway_reset(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - - if (!mac->phy_dev) - return -EOPNOTSUPP; - - return genphy_restart_aneg(mac->phy_dev); -} - -static u32 mtk_get_link(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - int err; - - if (!mac->phy_dev) - goto out_get_link; - - if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) { - err = genphy_update_link(mac->phy_dev); - if (err) - goto out_get_link; - } - - return mac->phy_dev->link; - -out_get_link: - return ethtool_op_get_link(dev); -} - -static int mtk_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mtk_mac *mac = netdev_priv(dev); - - if ((ring->tx_pending < 2) || - (ring->rx_pending < 2) || - (ring->rx_pending > mac->hw->soc->dma_ring_size) || - (ring->tx_pending > mac->hw->soc->dma_ring_size)) - return -EINVAL; - - dev->netdev_ops->ndo_stop(dev); - - mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1); - mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1); - - return dev->netdev_ops->ndo_open(dev); -} - -static void mtk_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mtk_mac *mac = netdev_priv(dev); - - ring->rx_max_pending = mac->hw->soc->dma_ring_size; - ring->tx_max_pending = mac->hw->soc->dma_ring_size; - ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size; - ring->tx_pending = mac->hw->tx_ring.tx_ring_size; -} - -static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < MTK_HW_STATS_LEN; i++) { - memcpy(data, mtk_ethtool_hw_stats[i].name, - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } - break; - } -} - -static int mtk_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return MTK_HW_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void mtk_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_hw_stats *hwstats = mac->hw_stats; - unsigned int start; - int i; - - if (netif_running(dev) && netif_device_present(dev)) { - if (spin_trylock(&hwstats->stats_lock)) { - mtk_stats_update_mac(mac); - spin_unlock(&hwstats->stats_lock); - } - } - - do { - start = u64_stats_fetch_begin_irq(&hwstats->syncp); - for (i = 0; i < MTK_HW_STATS_LEN; i++) - data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx]; - - } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); -} - -static struct ethtool_ops mtk_ethtool_ops = { - .get_link_ksettings = mtk_get_link_ksettings, - .set_link_ksettings = mtk_set_link_ksettings, - .get_drvinfo = mtk_get_drvinfo, - .get_msglevel = mtk_get_msglevel, - .set_msglevel = mtk_set_msglevel, - .nway_reset = mtk_nway_reset, - .get_link = mtk_get_link, - .set_ringparam = mtk_set_ringparam, - .get_ringparam = mtk_get_ringparam, -}; - -void mtk_set_ethtool_ops(struct net_device *netdev) -{ - struct mtk_mac *mac = netdev_priv(netdev); - struct mtk_soc_data *soc = mac->hw->soc; - - if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) { - mtk_ethtool_ops.get_strings = mtk_get_strings; - mtk_ethtool_ops.get_sset_count = mtk_get_sset_count; - mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats; - } - - netdev->ethtool_ops = &mtk_ethtool_ops; -} diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h deleted file mode 100644 index 0071469aea6c..000000000000 --- a/drivers/staging/mt7621-eth/ethtool.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#ifndef MTK_ETHTOOL_H -#define MTK_ETHTOOL_H - -#include <linux/ethtool.h> - -void mtk_set_ethtool_ops(struct net_device *netdev); - -#endif /* MTK_ETHTOOL_H */ diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h deleted file mode 100644 index 70f7e5481952..000000000000 --- a/drivers/staging/mt7621-eth/gsw_mt7620.h +++ /dev/null @@ -1,277 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#ifndef _RALINK_GSW_MT7620_H__ -#define _RALINK_GSW_MT7620_H__ - -#define GSW_REG_PHY_TIMEOUT (5 * HZ) - -#define MT7620_GSW_REG_PIAC 0x0004 - -#define GSW_NUM_VLANS 16 -#define GSW_NUM_VIDS 4096 -#define GSW_NUM_PORTS 7 -#define GSW_PORT6 6 - -#define GSW_MDIO_ACCESS BIT(31) -#define GSW_MDIO_READ BIT(19) -#define GSW_MDIO_WRITE BIT(18) -#define GSW_MDIO_START BIT(16) -#define GSW_MDIO_ADDR_SHIFT 20 -#define GSW_MDIO_REG_SHIFT 25 - -#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100)) -#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100)) -#define GSW_REG_SMACCR0 0x3fE4 -#define GSW_REG_SMACCR1 0x3fE8 -#define GSW_REG_CKGCR 0x3ff0 - -#define GSW_REG_IMR 0x7008 -#define GSW_REG_ISR 0x700c -#define GSW_REG_GPC1 0x7014 - -#define SYSC_REG_CHIP_REV_ID 0x0c -#define SYSC_REG_CFG 0x10 -#define SYSC_REG_CFG1 0x14 -#define RST_CTRL_MCM BIT(2) -#define SYSC_PAD_RGMII2_MDIO 0x58 -#define SYSC_GPIO_MODE 0x60 - -#define PORT_IRQ_ST_CHG 0x7f - -#define MT7621_ESW_PHY_POLLING 0x0000 -#define MT7620_ESW_PHY_POLLING 0x7000 - -#define PMCR_IPG BIT(18) -#define PMCR_MAC_MODE BIT(16) -#define PMCR_FORCE BIT(15) -#define PMCR_TX_EN BIT(14) -#define PMCR_RX_EN BIT(13) -#define PMCR_BACKOFF BIT(9) -#define PMCR_BACKPRES BIT(8) -#define PMCR_RX_FC BIT(5) -#define PMCR_TX_FC BIT(4) -#define PMCR_SPEED(_x) (_x << 2) -#define PMCR_DUPLEX BIT(1) -#define PMCR_LINK BIT(0) - -#define PHY_AN_EN BIT(31) -#define PHY_PRE_EN BIT(30) -#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24) - -/* ethernet subsystem config register */ -#define ETHSYS_SYSCFG0 0x14 -/* ethernet subsystem clock register */ -#define ETHSYS_CLKCFG0 0x2c -#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) - -/* p5 RGMII wrapper TX clock control register */ -#define MT7530_P5RGMIITXCR 0x7b04 -/* p5 RGMII wrapper RX clock control register */ -#define MT7530_P5RGMIIRXCR 0x7b00 -/* TRGMII TDX ODT registers */ -#define MT7530_TRGMII_TD0_ODT 0x7a54 -#define MT7530_TRGMII_TD1_ODT 0x7a5c -#define MT7530_TRGMII_TD2_ODT 0x7a64 -#define MT7530_TRGMII_TD3_ODT 0x7a6c -#define MT7530_TRGMII_TD4_ODT 0x7a74 -#define MT7530_TRGMII_TD5_ODT 0x7a7c -/* TRGMII TCK ctrl register */ -#define MT7530_TRGMII_TCK_CTRL 0x7a78 -/* TRGMII Tx ctrl register */ -#define MT7530_TRGMII_TXCTRL 0x7a40 -/* port 6 extended control register */ -#define MT7530_P6ECR 0x7830 -/* IO driver control register */ -#define MT7530_IO_DRV_CR 0x7810 -/* top signal control register */ -#define MT7530_TOP_SIG_CTRL 0x7808 -/* modified hwtrap register */ -#define MT7530_MHWTRAP 0x7804 -/* hwtrap status register */ -#define MT7530_HWTRAP 0x7800 -/* status interrupt register */ -#define MT7530_SYS_INT_STS 0x700c -/* system nterrupt register */ -#define MT7530_SYS_INT_EN 0x7008 -/* system control register */ -#define MT7530_SYS_CTRL 0x7000 -/* port MAC status register */ -#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100)) -/* port MAC control register */ -#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100)) - -#define MT7621_XTAL_SHIFT 6 -#define MT7621_XTAL_MASK 0x7 -#define MT7621_XTAL_25 6 -#define MT7621_XTAL_40 3 -#define MT7621_MDIO_DRV_MASK (3 << 4) -#define MT7621_GE1_MODE_MASK (3 << 12) - -#define TRGMII_TXCTRL_TXC_INV BIT(30) -#define P6ECR_INTF_MODE_RGMII BIT(1) -#define P5RGMIIRXCR_C_ALIGN BIT(8) -#define P5RGMIIRXCR_DELAY_2 BIT(1) -#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2)) - -/* TOP_SIG_CTRL bits */ -#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16)) - -/* MHWTRAP bits */ -#define MHWTRAP_MANUAL BIT(16) -#define MHWTRAP_P5_MAC_SEL BIT(13) -#define MHWTRAP_P6_DIS BIT(8) -#define MHWTRAP_P5_RGMII_MODE BIT(7) -#define MHWTRAP_P5_DIS BIT(6) -#define MHWTRAP_PHY_ACCESS BIT(5) - -/* HWTRAP bits */ -#define HWTRAP_XTAL_SHIFT 9 -#define HWTRAP_XTAL_MASK 0x3 - -/* SYS_CTRL bits */ -#define SYS_CTRL_SW_RST BIT(1) -#define SYS_CTRL_REG_RST BIT(0) - -/* PMCR bits */ -#define PMCR_IFG_XMIT_96 BIT(18) -#define PMCR_MAC_MODE BIT(16) -#define PMCR_FORCE_MODE BIT(15) -#define PMCR_TX_EN BIT(14) -#define PMCR_RX_EN BIT(13) -#define PMCR_BACK_PRES_EN BIT(9) -#define PMCR_BACKOFF_EN BIT(8) -#define PMCR_TX_FC_EN BIT(5) -#define PMCR_RX_FC_EN BIT(4) -#define PMCR_FORCE_SPEED_1000 BIT(3) -#define PMCR_FORCE_FDX BIT(1) -#define PMCR_FORCE_LNK BIT(0) -#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \ - PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \ - PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \ - PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \ - PMCR_FORCE_LNK) - -#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN) - -/* TRGMII control registers */ -#define GSW_INTF_MODE 0x390 -#define GSW_TRGMII_TD0_ODT 0x354 -#define GSW_TRGMII_TD1_ODT 0x35c -#define GSW_TRGMII_TD2_ODT 0x364 -#define GSW_TRGMII_TD3_ODT 0x36c -#define GSW_TRGMII_TXCTL_ODT 0x374 -#define GSW_TRGMII_TCK_ODT 0x37c -#define GSW_TRGMII_RCK_CTRL 0x300 - -#define INTF_MODE_TRGMII BIT(1) -#define TRGMII_RCK_CTRL_RX_RST BIT(31) - -/* Mac control registers */ -#define MTK_MAC_P2_MCR 0x200 -#define MTK_MAC_P1_MCR 0x100 - -#define MAC_MCR_MAX_RX_2K BIT(29) -#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) -#define MAC_MCR_FORCE_MODE BIT(15) -#define MAC_MCR_TX_EN BIT(14) -#define MAC_MCR_RX_EN BIT(13) -#define MAC_MCR_BACKOFF_EN BIT(9) -#define MAC_MCR_BACKPR_EN BIT(8) -#define MAC_MCR_FORCE_RX_FC BIT(5) -#define MAC_MCR_FORCE_TX_FC BIT(4) -#define MAC_MCR_SPEED_1000 BIT(3) -#define MAC_MCR_FORCE_DPX BIT(1) -#define MAC_MCR_FORCE_LINK BIT(0) -#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \ - MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \ - MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \ - MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \ - MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \ - MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK) -#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \ - MAC_MCR_FIXED_LINK) - -/* possible XTAL speed */ -#define MT7623_XTAL_40 0 -#define MT7623_XTAL_20 1 -#define MT7623_XTAL_25 3 - -/* GPIO port control registers */ -#define GPIO_OD33_CTRL8 0x4c0 -#define GPIO_BIAS_CTRL 0xed0 -#define GPIO_DRV_SEL10 0xf00 - -/* on MT7620 the functio of port 4 can be software configured */ -enum { - PORT4_EPHY = 0, - PORT4_EXT, -}; - -/* struct mt7620_gsw - the structure that holds the SoC specific data - * @dev: The Device struct - * @base: The base address - * @piac_offset: The PIAC base may change depending on SoC - * @irq: The IRQ we are using - * @port4: The port4 mode on MT7620 - * @autopoll: Is MDIO autopolling enabled - * @ethsys: The ethsys register map - * @pctl: The pin control register map - * @clk_gsw: The switch clock - * @clk_gp1: The gmac1 clock - * @clk_gp2: The gmac2 clock - * @clk_trgpll: The trgmii pll clock - */ -struct mt7620_gsw { - struct device *dev; - void __iomem *base; - u32 piac_offset; - int irq; - int port4; - unsigned long int autopoll; - - struct regmap *ethsys; - struct regmap *pctl; - - struct clk *clk_gsw; - struct clk *clk_gp1; - struct clk *clk_gp2; - struct clk *clk_trgpll; -}; - -/* switch register I/O wrappers */ -void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg); -u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg); - -/* the callback used by the driver core to bringup the switch */ -int mtk_gsw_init(struct mtk_eth *eth); - -/* MDIO access wrappers */ -int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); -int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); -void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port); -int mt7620_has_carrier(struct mtk_eth *eth); -void mt7620_print_link_state(struct mtk_eth *eth, int port, int link, - int speed, int duplex); -void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val); -u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg); -void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg); - -u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, - u32 phy_register, u32 write_data); -u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg); -void mt7620_handle_carrier(struct mtk_eth *eth); - -#endif diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c deleted file mode 100644 index 53767b17bad9..000000000000 --- a/drivers/staging/mt7621-eth/gsw_mt7621.c +++ /dev/null @@ -1,297 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/platform_device.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> - -#include <ralink_regs.h> - -#include "mtk_eth_soc.h" -#include "gsw_mt7620.h" - -void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg) -{ - iowrite32(val, gsw->base + reg); -} -EXPORT_SYMBOL_GPL(mtk_switch_w32); - -u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg) -{ - return ioread32(gsw->base + reg); -} -EXPORT_SYMBOL_GPL(mtk_switch_r32); - -static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth) -{ - struct mtk_eth *eth = (struct mtk_eth *)_eth; - struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; - u32 reg, i; - - reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS); - - for (i = 0; i < 5; i++) { - unsigned int link; - - if ((reg & BIT(i)) == 0) - continue; - - link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1; - - if (link == eth->link[i]) - continue; - - eth->link[i] = link; - if (link) - netdev_info(*eth->netdev, - "port %d link up\n", i); - else - netdev_info(*eth->netdev, - "port %d link down\n", i); - } - - mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f); - - return IRQ_HANDLED; -} - -static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw, - struct device_node *np) -{ - u32 i; - u32 val; - - /* hardware reset the switch */ - mtk_reset(eth, RST_CTRL_MCM); - mdelay(10); - - /* reduce RGMII2 PAD driving strength */ - rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO); - - /* gpio mux - RGMII1=Normal mode */ - rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE); - - /* set GMAC1 RGMII mode */ - rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1); - - /* enable MDIO to control MT7530 */ - rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE); - - /* turn off all PHYs */ - for (i = 0; i <= 4; i++) { - val = _mt7620_mii_read(gsw, i, 0x0); - val |= BIT(11); - _mt7620_mii_write(gsw, i, 0x0, val); - } - - /* reset the switch */ - mt7530_mdio_w32(gsw, MT7530_SYS_CTRL, - SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); - usleep_range(10, 20); - - if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) { - /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */ - mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR); - mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK); - } else { - /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */ - mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR); - mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC); - } - - /* GE2, Link down */ - mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR); - - /* Enable Port 6, P5 as GMAC5, P5 disable */ - val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP); - /* Enable Port 6 */ - val &= ~MHWTRAP_P6_DIS; - /* Disable Port 5 */ - val |= MHWTRAP_P5_DIS; - /* manual override of HW-Trap */ - val |= MHWTRAP_MANUAL; - mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val); - - val = rt_sysc_r32(SYSC_REG_CFG); - val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK; - if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) { - /* 40Mhz */ - - /* disable MT7530 core clock */ - _mt7620_mii_write(gsw, 0, 13, 0x1f); - _mt7620_mii_write(gsw, 0, 14, 0x410); - _mt7620_mii_write(gsw, 0, 13, 0x401f); - _mt7620_mii_write(gsw, 0, 14, 0x0); - - /* disable MT7530 PLL */ - _mt7620_mii_write(gsw, 0, 13, 0x1f); - _mt7620_mii_write(gsw, 0, 14, 0x40d); - _mt7620_mii_write(gsw, 0, 13, 0x401f); - _mt7620_mii_write(gsw, 0, 14, 0x2020); - - /* for MT7530 core clock = 500Mhz */ - _mt7620_mii_write(gsw, 0, 13, 0x1f); - _mt7620_mii_write(gsw, 0, 14, 0x40e); - _mt7620_mii_write(gsw, 0, 13, 0x401f); - _mt7620_mii_write(gsw, 0, 14, 0x119); - - /* enable MT7530 PLL */ - _mt7620_mii_write(gsw, 0, 13, 0x1f); - _mt7620_mii_write(gsw, 0, 14, 0x40d); - _mt7620_mii_write(gsw, 0, 13, 0x401f); - _mt7620_mii_write(gsw, 0, 14, 0x2820); - - usleep_range(20, 40); - - /* enable MT7530 core clock */ - _mt7620_mii_write(gsw, 0, 13, 0x1f); - _mt7620_mii_write(gsw, 0, 14, 0x410); - _mt7620_mii_write(gsw, 0, 13, 0x401f); - } - - /* RGMII */ - _mt7620_mii_write(gsw, 0, 14, 0x1); - - /* set MT7530 central align */ - mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR); - mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0, - MT7530_TRGMII_TXCTRL); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855); - - /* delay setting for 10/1000M */ - mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR, - P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2); - mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14); - - /* lower Tx Driving*/ - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44); - mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44); - - /* turn on all PHYs */ - for (i = 0; i <= 4; i++) { - val = _mt7620_mii_read(gsw, i, 0); - val &= ~BIT(11); - _mt7620_mii_write(gsw, i, 0, val); - } - -#define MT7530_NUM_PORTS 8 -#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8)) -#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8)) -#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8)) -#define MT7530_CPU_PORT 6 - - /* This is copied from mt7530_apply_config in libreCMC driver */ - { - int i; - - for (i = 0; i < MT7530_NUM_PORTS; i++) - mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000); - - mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT), - 0x00ff0000); - - for (i = 0; i < MT7530_NUM_PORTS; i++) - mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0); - } - - /* enable irq */ - mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL); - mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f); -} - -static const struct of_device_id mediatek_gsw_match[] = { - { .compatible = "mediatek,mt7621-gsw" }, - {}, -}; -MODULE_DEVICE_TABLE(of, mediatek_gsw_match); - -int mtk_gsw_init(struct mtk_eth *eth) -{ - struct device_node *np = eth->switch_np; - struct platform_device *pdev = of_find_device_by_node(np); - struct mt7620_gsw *gsw; - - if (!pdev) - return -ENODEV; - - if (!of_device_is_compatible(np, mediatek_gsw_match->compatible)) - return -EINVAL; - - gsw = platform_get_drvdata(pdev); - eth->sw_priv = gsw; - - if (!gsw->irq) - return -EINVAL; - - request_irq(gsw->irq, gsw_interrupt_mt7621, 0, - "gsw", eth); - disable_irq(gsw->irq); - - mt7621_hw_init(eth, gsw, np); - - enable_irq(gsw->irq); - - return 0; -} -EXPORT_SYMBOL_GPL(mtk_gsw_init); - -static int mt7621_gsw_probe(struct platform_device *pdev) -{ - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct mt7620_gsw *gsw; - - gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL); - if (!gsw) - return -ENOMEM; - - gsw->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(gsw->base)) - return PTR_ERR(gsw->base); - - gsw->dev = &pdev->dev; - gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - - platform_set_drvdata(pdev, gsw); - - return 0; -} - -static int mt7621_gsw_remove(struct platform_device *pdev) -{ - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static struct platform_driver gsw_driver = { - .probe = mt7621_gsw_probe, - .remove = mt7621_gsw_remove, - .driver = { - .name = "mt7621-gsw", - .of_match_table = mediatek_gsw_match, - }, -}; - -module_platform_driver(gsw_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); -MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC"); diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c deleted file mode 100644 index 5fea6a447eed..000000000000 --- a/drivers/staging/mt7621-eth/mdio.c +++ /dev/null @@ -1,275 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/phy.h> -#include <linux/of_net.h> -#include <linux/of_mdio.h> - -#include "mtk_eth_soc.h" -#include "mdio.h" - -static int mtk_mdio_reset(struct mii_bus *bus) -{ - /* TODO */ - return 0; -} - -static void mtk_phy_link_adjust(struct net_device *dev) -{ - struct mtk_eth *eth = netdev_priv(dev); - unsigned long flags; - int i; - - spin_lock_irqsave(ð->phy->lock, flags); - for (i = 0; i < 8; i++) { - if (eth->phy->phy_node[i]) { - struct phy_device *phydev = eth->phy->phy[i]; - int status_change = 0; - - if (phydev->link) - if (eth->phy->duplex[i] != phydev->duplex || - eth->phy->speed[i] != phydev->speed) - status_change = 1; - - if (phydev->link != eth->link[i]) - status_change = 1; - - switch (phydev->speed) { - case SPEED_1000: - case SPEED_100: - case SPEED_10: - eth->link[i] = phydev->link; - eth->phy->duplex[i] = phydev->duplex; - eth->phy->speed[i] = phydev->speed; - - if (status_change && - eth->soc->mdio_adjust_link) - eth->soc->mdio_adjust_link(eth, i); - break; - } - } - } - spin_unlock_irqrestore(ð->phy->lock, flags); -} - -int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac, - struct device_node *phy_node) -{ - const __be32 *_port = NULL; - struct phy_device *phydev; - int phy_mode, port; - - _port = of_get_property(phy_node, "reg", NULL); - - if (!_port || (be32_to_cpu(*_port) >= 0x20)) { - pr_err("%pOFn: invalid port id\n", phy_node); - return -EINVAL; - } - port = be32_to_cpu(*_port); - phy_mode = of_get_phy_mode(phy_node); - if (phy_mode < 0) { - dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); - eth->phy->phy_node[port] = NULL; - return -EINVAL; - } - - phydev = of_phy_connect(eth->netdev[mac->id], phy_node, - mtk_phy_link_adjust, 0, phy_mode); - if (!phydev) { - dev_err(eth->dev, "could not connect to PHY\n"); - eth->phy->phy_node[port] = NULL; - return -ENODEV; - } - - phydev->supported &= PHY_1000BT_FEATURES; - phydev->advertising = phydev->supported; - - dev_info(eth->dev, - "connected port %d to PHY at %s [uid=%08x, driver=%s]\n", - port, phydev_name(phydev), phydev->phy_id, - phydev->drv->name); - - eth->phy->phy[port] = phydev; - eth->link[port] = 0; - - return 0; -} - -static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac, - struct phy_device *phy) -{ - phy_attach(eth->netdev[mac->id], phydev_name(phy), - PHY_INTERFACE_MODE_MII); - - phy->autoneg = AUTONEG_ENABLE; - phy->speed = 0; - phy->duplex = 0; - phy_set_max_speed(phy, SPEED_100); - phy->advertising = phy->supported | ADVERTISED_Autoneg; - - phy_start_aneg(phy); -} - -static int mtk_phy_connect(struct mtk_mac *mac) -{ - struct mtk_eth *eth = mac->hw; - int i; - - for (i = 0; i < 8; i++) { - if (eth->phy->phy_node[i]) { - if (!mac->phy_dev) { - mac->phy_dev = eth->phy->phy[i]; - mac->phy_flags = MTK_PHY_FLAG_PORT; - } - } else if (eth->mii_bus) { - struct phy_device *phy; - - phy = mdiobus_get_phy(eth->mii_bus, i); - if (phy) { - phy_init(eth, mac, phy); - if (!mac->phy_dev) { - mac->phy_dev = phy; - mac->phy_flags = MTK_PHY_FLAG_ATTACH; - } - } - } - } - - return 0; -} - -static void mtk_phy_disconnect(struct mtk_mac *mac) -{ - struct mtk_eth *eth = mac->hw; - unsigned long flags; - int i; - - for (i = 0; i < 8; i++) - if (eth->phy->phy_fixed[i]) { - spin_lock_irqsave(ð->phy->lock, flags); - eth->link[i] = 0; - if (eth->soc->mdio_adjust_link) - eth->soc->mdio_adjust_link(eth, i); - spin_unlock_irqrestore(ð->phy->lock, flags); - } else if (eth->phy->phy[i]) { - phy_disconnect(eth->phy->phy[i]); - } else if (eth->mii_bus) { - struct phy_device *phy = - mdiobus_get_phy(eth->mii_bus, i); - - if (phy) - phy_detach(phy); - } -} - -static void mtk_phy_start(struct mtk_mac *mac) -{ - struct mtk_eth *eth = mac->hw; - unsigned long flags; - int i; - - for (i = 0; i < 8; i++) { - if (eth->phy->phy_fixed[i]) { - spin_lock_irqsave(ð->phy->lock, flags); - eth->link[i] = 1; - if (eth->soc->mdio_adjust_link) - eth->soc->mdio_adjust_link(eth, i); - spin_unlock_irqrestore(ð->phy->lock, flags); - } else if (eth->phy->phy[i]) { - phy_start(eth->phy->phy[i]); - } - } -} - -static void mtk_phy_stop(struct mtk_mac *mac) -{ - struct mtk_eth *eth = mac->hw; - unsigned long flags; - int i; - - for (i = 0; i < 8; i++) - if (eth->phy->phy_fixed[i]) { - spin_lock_irqsave(ð->phy->lock, flags); - eth->link[i] = 0; - if (eth->soc->mdio_adjust_link) - eth->soc->mdio_adjust_link(eth, i); - spin_unlock_irqrestore(ð->phy->lock, flags); - } else if (eth->phy->phy[i]) { - phy_stop(eth->phy->phy[i]); - } -} - -static struct mtk_phy phy_ralink = { - .connect = mtk_phy_connect, - .disconnect = mtk_phy_disconnect, - .start = mtk_phy_start, - .stop = mtk_phy_stop, -}; - -int mtk_mdio_init(struct mtk_eth *eth) -{ - struct device_node *mii_np; - int err; - - if (!eth->soc->mdio_read || !eth->soc->mdio_write) - return 0; - - spin_lock_init(&phy_ralink.lock); - eth->phy = &phy_ralink; - - mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); - if (!mii_np) { - dev_err(eth->dev, "no %s child node found", "mdio-bus"); - return -ENODEV; - } - - if (!of_device_is_available(mii_np)) { - err = 0; - goto err_put_node; - } - - eth->mii_bus = mdiobus_alloc(); - if (!eth->mii_bus) { - err = -ENOMEM; - goto err_put_node; - } - - eth->mii_bus->name = "mdio"; - eth->mii_bus->read = eth->soc->mdio_read; - eth->mii_bus->write = eth->soc->mdio_write; - eth->mii_bus->reset = mtk_mdio_reset; - eth->mii_bus->priv = eth; - eth->mii_bus->parent = eth->dev; - - snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); - err = of_mdiobus_register(eth->mii_bus, mii_np); - if (err) - goto err_free_bus; - - return 0; - -err_free_bus: - kfree(eth->mii_bus); -err_put_node: - of_node_put(mii_np); - eth->mii_bus = NULL; - return err; -} - -void mtk_mdio_cleanup(struct mtk_eth *eth) -{ - if (!eth->mii_bus) - return; - - mdiobus_unregister(eth->mii_bus); - of_node_put(eth->mii_bus->dev.of_node); - kfree(eth->mii_bus); -} diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h deleted file mode 100644 index b14e23842a01..000000000000 --- a/drivers/staging/mt7621-eth/mdio.h +++ /dev/null @@ -1,27 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#ifndef _RALINK_MDIO_H__ -#define _RALINK_MDIO_H__ - -#ifdef CONFIG_NET_MEDIATEK_MDIO -int mtk_mdio_init(struct mtk_eth *eth); -void mtk_mdio_cleanup(struct mtk_eth *eth); -int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac, - struct device_node *phy_node); -#else -static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; } -static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {} -#endif -#endif diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c deleted file mode 100644 index ced605c2914e..000000000000 --- a/drivers/staging/mt7621-eth/mdio_mt7620.c +++ /dev/null @@ -1,173 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> - -#include "mtk_eth_soc.h" -#include "gsw_mt7620.h" -#include "mdio.h" - -static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw) -{ - unsigned long t_start = jiffies; - - while (1) { - if (!(mtk_switch_r32(gsw, - gsw->piac_offset + MT7620_GSW_REG_PIAC) & - GSW_MDIO_ACCESS)) - return 0; - if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT)) - break; - } - - dev_err(gsw->dev, "mdio: MDIO timeout\n"); - return -1; -} - -u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, - u32 phy_register, u32 write_data) -{ - if (mt7620_mii_busy_wait(gsw)) - return -1; - - write_data &= 0xffff; - - mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE | - (phy_register << GSW_MDIO_REG_SHIFT) | - (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data, - MT7620_GSW_REG_PIAC); - - if (mt7620_mii_busy_wait(gsw)) - return -1; - - return 0; -} -EXPORT_SYMBOL_GPL(_mt7620_mii_write); - -u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg) -{ - u32 d; - - if (mt7620_mii_busy_wait(gsw)) - return 0xffff; - - mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ | - (phy_reg << GSW_MDIO_REG_SHIFT) | - (phy_addr << GSW_MDIO_ADDR_SHIFT), - MT7620_GSW_REG_PIAC); - - if (mt7620_mii_busy_wait(gsw)) - return 0xffff; - - d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff; - - return d; -} -EXPORT_SYMBOL_GPL(_mt7620_mii_read); - -int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) -{ - struct mtk_eth *eth = bus->priv; - struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; - - return _mt7620_mii_write(gsw, phy_addr, phy_reg, val); -} - -int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) -{ - struct mtk_eth *eth = bus->priv; - struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; - - return _mt7620_mii_read(gsw, phy_addr, phy_reg); -} - -void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val) -{ - _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); - _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff); - _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16); -} -EXPORT_SYMBOL_GPL(mt7530_mdio_w32); - -u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg) -{ - u16 high, low; - - _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); - low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf); - high = _mt7620_mii_read(gsw, 0x1f, 0x10); - - return (high << 16) | (low & 0xffff); -} -EXPORT_SYMBOL_GPL(mt7530_mdio_r32); - -void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg) -{ - u32 val = mt7530_mdio_r32(gsw, reg); - - val &= ~mask; - val |= set; - mt7530_mdio_w32(gsw, reg, val); -} -EXPORT_SYMBOL_GPL(mt7530_mdio_m32); - -static unsigned char *mtk_speed_str(int speed) -{ - switch (speed) { - case 2: - case SPEED_1000: - return "1000"; - case 1: - case SPEED_100: - return "100"; - case 0: - case SPEED_10: - return "10"; - } - - return "? "; -} - -int mt7620_has_carrier(struct mtk_eth *eth) -{ - struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv; - int i; - - for (i = 0; i < GSW_PORT6; i++) - if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1) - return 1; - return 0; -} - -void mt7620_print_link_state(struct mtk_eth *eth, int port, int link, - int speed, int duplex) -{ - struct mt7620_gsw *gsw = eth->sw_priv; - - if (link) - dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n", - port, mtk_speed_str(speed), - (duplex) ? "Full" : "Half"); - else - dev_info(gsw->dev, "port %d link down\n", port); -} - -void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port) -{ - mt7620_print_link_state(eth, port, eth->link[port], - eth->phy->speed[port], - (eth->phy->duplex[port] == DUPLEX_FULL)); -} diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c deleted file mode 100644 index 6027b19f7bc2..000000000000 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.c +++ /dev/null @@ -1,2176 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/platform_device.h> -#include <linux/of_device.h> -#include <linux/mfd/syscon.h> -#include <linux/clk.h> -#include <linux/of_net.h> -#include <linux/of_mdio.h> -#include <linux/if_vlan.h> -#include <linux/reset.h> -#include <linux/tcp.h> -#include <linux/io.h> -#include <linux/bug.h> -#include <linux/regmap.h> - -#include "mtk_eth_soc.h" -#include "mdio.h" -#include "ethtool.h" - -#define MAX_RX_LENGTH 1536 -#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) -#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) -#define DMA_DUMMY_DESC 0xffffffff -#define MTK_DEFAULT_MSG_ENABLE \ - (NETIF_MSG_DRV | \ - NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | \ - NETIF_MSG_IFDOWN | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_RX_ERR | \ - NETIF_MSG_TX_ERR) - -#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) -#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) -#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) - -#define SYSC_REG_RSTCTRL 0x34 - -static int mtk_msg_level = -1; -module_param_named(msg_level, mtk_msg_level, int, 0); -MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); - -static const u16 mtk_reg_table_default[MTK_REG_COUNT] = { - [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG, - [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG, - [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG, - [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0, - [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0, - [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0, - [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0, - [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0, - [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0, - [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0, - [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0, - [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE, - [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS, - [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0, - [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT, - [MTK_REG_MTK_RST_GL] = MTK_RST_GL, -}; - -static const u16 *mtk_reg_table = mtk_reg_table_default; - -void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg) -{ - __raw_writel(val, eth->base + reg); -} - -u32 mtk_r32(struct mtk_eth *eth, unsigned int reg) -{ - return __raw_readl(eth->base + reg); -} - -static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg) -{ - mtk_w32(eth, val, mtk_reg_table[reg]); -} - -static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg) -{ - return mtk_r32(eth, mtk_reg_table[reg]); -} - -/* these bits are also exposed via the reset-controller API. however the switch - * and FE need to be brought out of reset in the exakt same moemtn and the - * reset-controller api does not provide this feature yet. Do the reset manually - * until we fixed the reset-controller api to be able to do this - */ -void mtk_reset(struct mtk_eth *eth, u32 reset_bits) -{ - u32 val; - - regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val); - val |= reset_bits; - regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); - usleep_range(10, 20); - val &= ~reset_bits; - regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); - usleep_range(10, 20); -} -EXPORT_SYMBOL(mtk_reset); - -static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask) -{ - if (eth->soc->dma_type & MTK_PDMA) - mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS); - if (eth->soc->dma_type & MTK_QDMA) - mtk_w32(eth, mask, MTK_QMTK_INT_STATUS); -} - -static inline u32 mtk_irq_pending(struct mtk_eth *eth) -{ - u32 status = 0; - - if (eth->soc->dma_type & MTK_PDMA) - status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS); - if (eth->soc->dma_type & MTK_QDMA) - status |= mtk_r32(eth, MTK_QMTK_INT_STATUS); - - return status; -} - -static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask) -{ - u32 status_reg = MTK_REG_MTK_INT_STATUS; - - if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) - status_reg = MTK_REG_MTK_INT_STATUS2; - - mtk_reg_w32(eth, mask, status_reg); -} - -static u32 mtk_irq_pending_status(struct mtk_eth *eth) -{ - u32 status_reg = MTK_REG_MTK_INT_STATUS; - - if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) - status_reg = MTK_REG_MTK_INT_STATUS2; - - return mtk_reg_r32(eth, status_reg); -} - -static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) -{ - u32 val; - - if (eth->soc->dma_type & MTK_PDMA) { - val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); - mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE); - /* flush write */ - mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); - } - if (eth->soc->dma_type & MTK_QDMA) { - val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); - mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE); - /* flush write */ - mtk_r32(eth, MTK_QMTK_INT_ENABLE); - } -} - -static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) -{ - u32 val; - - if (eth->soc->dma_type & MTK_PDMA) { - val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); - mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE); - /* flush write */ - mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); - } - if (eth->soc->dma_type & MTK_QDMA) { - val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); - mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE); - /* flush write */ - mtk_r32(eth, MTK_QMTK_INT_ENABLE); - } -} - -static inline u32 mtk_irq_enabled(struct mtk_eth *eth) -{ - u32 enabled = 0; - - if (eth->soc->dma_type & MTK_PDMA) - enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); - if (eth->soc->dma_type & MTK_QDMA) - enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE); - - return enabled; -} - -static inline void mtk_hw_set_macaddr(struct mtk_mac *mac, - unsigned char *macaddr) -{ - unsigned long flags; - - spin_lock_irqsave(&mac->hw->page_lock, flags); - mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH); - mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | - (macaddr[4] << 8) | macaddr[5], - MTK_GDMA1_MAC_ADRL); - spin_unlock_irqrestore(&mac->hw->page_lock, flags); -} - -static int mtk_set_mac_address(struct net_device *dev, void *p) -{ - int ret = eth_mac_addr(dev, p); - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - - if (ret) - return ret; - - if (eth->soc->set_mac) - eth->soc->set_mac(mac, dev->dev_addr); - else - mtk_hw_set_macaddr(mac, p); - - return 0; -} - -static inline int mtk_max_frag_size(int mtu) -{ - /* make sure buf_size will be at least MAX_RX_LENGTH */ - if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH) - mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN; - - return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); -} - -static inline int mtk_max_buf_size(int frag_size) -{ - int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - - WARN_ON(buf_size < MAX_RX_LENGTH); - - return buf_size; -} - -static inline void mtk_get_rxd(struct mtk_rx_dma *rxd, - struct mtk_rx_dma *dma_rxd) -{ - rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); - rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); - rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); - rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); -} - -static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd, - struct mtk_tx_dma *dma_txd) -{ - WRITE_ONCE(dma_txd->txd1, txd->txd1); - WRITE_ONCE(dma_txd->txd3, txd->txd3); - WRITE_ONCE(dma_txd->txd4, txd->txd4); - /* clean dma done flag last */ - WRITE_ONCE(dma_txd->txd2, txd->txd2); -} - -static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring) -{ - int i; - - if (ring->rx_data && ring->rx_dma) { - for (i = 0; i < ring->rx_ring_size; i++) { - if (!ring->rx_data[i]) - continue; - if (!ring->rx_dma[i].rxd1) - continue; - dma_unmap_single(eth->dev, - ring->rx_dma[i].rxd1, - ring->rx_buf_size, - DMA_FROM_DEVICE); - skb_free_frag(ring->rx_data[i]); - } - kfree(ring->rx_data); - ring->rx_data = NULL; - } - - if (ring->rx_dma) { - dma_free_coherent(eth->dev, - ring->rx_ring_size * sizeof(*ring->rx_dma), - ring->rx_dma, - ring->rx_phys); - ring->rx_dma = NULL; - } -} - -static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring) -{ - int i, pad = 0; - - ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); - ring->rx_buf_size = mtk_max_buf_size(ring->frag_size); - ring->rx_ring_size = eth->soc->dma_ring_size; - ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), - GFP_KERNEL); - if (!ring->rx_data) - goto no_rx_mem; - - for (i = 0; i < ring->rx_ring_size; i++) { - ring->rx_data[i] = netdev_alloc_frag(ring->frag_size); - if (!ring->rx_data[i]) - goto no_rx_mem; - } - - ring->rx_dma = - dma_alloc_coherent(eth->dev, - ring->rx_ring_size * sizeof(*ring->rx_dma), - &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO); - if (!ring->rx_dma) - goto no_rx_mem; - - if (!eth->soc->rx_2b_offset) - pad = NET_IP_ALIGN; - - for (i = 0; i < ring->rx_ring_size; i++) { - dma_addr_t dma_addr = dma_map_single(eth->dev, - ring->rx_data[i] + NET_SKB_PAD + pad, - ring->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) - goto no_rx_mem; - ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; - - if (eth->soc->rx_sg_dma) - ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); - else - ring->rx_dma[i].rxd2 = RX_DMA_LSO; - } - ring->rx_calc_idx = ring->rx_ring_size - 1; - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - - return 0; - -no_rx_mem: - return -ENOMEM; -} - -static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) -{ - if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } - if (tx_buf->flags & MTK_TX_FLAGS_PAGE1) - dma_unmap_page(dev, - dma_unmap_addr(tx_buf, dma_addr1), - dma_unmap_len(tx_buf, dma_len1), - DMA_TO_DEVICE); - - tx_buf->flags = 0; - if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) - dev_kfree_skb_any(tx_buf->skb); - tx_buf->skb = NULL; -} - -static void mtk_pdma_tx_clean(struct mtk_eth *eth) -{ - struct mtk_tx_ring *ring = ð->tx_ring; - int i; - - if (ring->tx_buf) { - for (i = 0; i < ring->tx_ring_size; i++) - mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); - kfree(ring->tx_buf); - ring->tx_buf = NULL; - } - - if (ring->tx_dma) { - dma_free_coherent(eth->dev, - ring->tx_ring_size * sizeof(*ring->tx_dma), - ring->tx_dma, - ring->tx_phys); - ring->tx_dma = NULL; - } -} - -static void mtk_qdma_tx_clean(struct mtk_eth *eth) -{ - struct mtk_tx_ring *ring = ð->tx_ring; - int i; - - if (ring->tx_buf) { - for (i = 0; i < ring->tx_ring_size; i++) - mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); - kfree(ring->tx_buf); - ring->tx_buf = NULL; - } - - if (ring->tx_dma) { - dma_free_coherent(eth->dev, - ring->tx_ring_size * sizeof(*ring->tx_dma), - ring->tx_dma, - ring->tx_phys); - ring->tx_dma = NULL; - } -} - -void mtk_stats_update_mac(struct mtk_mac *mac) -{ - struct mtk_hw_stats *hw_stats = mac->hw_stats; - unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; - u64 stats; - - base += hw_stats->reg_offset; - - u64_stats_update_begin(&hw_stats->syncp); - - if (mac->hw->soc->new_stats) { - hw_stats->rx_bytes += mtk_r32(mac->hw, base); - stats = mtk_r32(mac->hw, base + 0x04); - if (stats) - hw_stats->rx_bytes += (stats << 32); - hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); - hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); - hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); - hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); - hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); - hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); - hw_stats->rx_flow_control_packets += - mtk_r32(mac->hw, base + 0x24); - hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); - hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); - hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); - stats = mtk_r32(mac->hw, base + 0x34); - if (stats) - hw_stats->tx_bytes += (stats << 32); - hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); - } else { - hw_stats->tx_bytes += mtk_r32(mac->hw, base); - hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04); - hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08); - hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c); - hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20); - hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24); - hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28); - hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c); - hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30); - hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34); - hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38); - hw_stats->rx_flow_control_packets += - mtk_r32(mac->hw, base + 0x3c); - } - - u64_stats_update_end(&hw_stats->syncp); -} - -static void mtk_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *storage) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_hw_stats *hw_stats = mac->hw_stats; - unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; - unsigned int start; - - if (!base) { - netdev_stats_to_stats64(storage, &dev->stats); - return; - } - - if (netif_running(dev) && netif_device_present(dev)) { - if (spin_trylock(&hw_stats->stats_lock)) { - mtk_stats_update_mac(mac); - spin_unlock(&hw_stats->stats_lock); - } - } - - do { - start = u64_stats_fetch_begin_irq(&hw_stats->syncp); - storage->rx_packets = hw_stats->rx_packets; - storage->tx_packets = hw_stats->tx_packets; - storage->rx_bytes = hw_stats->rx_bytes; - storage->tx_bytes = hw_stats->tx_bytes; - storage->collisions = hw_stats->tx_collisions; - storage->rx_length_errors = hw_stats->rx_short_errors + - hw_stats->rx_long_errors; - storage->rx_over_errors = hw_stats->rx_overflow; - storage->rx_crc_errors = hw_stats->rx_fcs_errors; - storage->rx_errors = hw_stats->rx_checksum_errors; - storage->tx_aborted_errors = hw_stats->tx_skip; - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); - - storage->tx_errors = dev->stats.tx_errors; - storage->rx_dropped = dev->stats.rx_dropped; - storage->tx_dropped = dev->stats.tx_dropped; -} - -static int mtk_vlan_rx_add_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - u32 idx = (vid & 0xf); - u32 vlan_cfg; - - if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && - (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) - return 0; - - if (test_bit(idx, ð->vlan_map)) { - netdev_warn(dev, "disable tx vlan offload\n"); - dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; - netdev_update_features(dev); - } else { - vlan_cfg = mtk_r32(eth, - mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + - ((idx >> 1) << 2)); - if (idx & 0x1) { - vlan_cfg &= 0xffff; - vlan_cfg |= (vid << 16); - } else { - vlan_cfg &= 0xffff0000; - vlan_cfg |= vid; - } - mtk_w32(eth, - vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + - ((idx >> 1) << 2)); - set_bit(idx, ð->vlan_map); - } - - return 0; -} - -static int mtk_vlan_rx_kill_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - u32 idx = (vid & 0xf); - - if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && - (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) - return 0; - - clear_bit(idx, ð->vlan_map); - - return 0; -} - -static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring) -{ - barrier(); - return (u32)(ring->tx_ring_size - - ((ring->tx_next_idx - ring->tx_free_idx) & - (ring->tx_ring_size - 1))); -} - -static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth) -{ - unsigned int len; - int ret; - - if (unlikely(skb->len >= VLAN_ETH_ZLEN)) - return 0; - - if (eth->soc->padding_64b && !eth->soc->padding_bug) - return 0; - - if (skb_vlan_tag_present(skb)) - len = ETH_ZLEN; - else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) - len = VLAN_ETH_ZLEN; - else if (!eth->soc->padding_64b) - len = ETH_ZLEN; - else - return 0; - - if (skb->len >= len) - return 0; - - ret = skb_pad(skb, len - skb->len); - if (ret < 0) - return ret; - skb->len = len; - skb_set_tail_pointer(skb, len); - - return ret; -} - -static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev, - int tx_num, struct mtk_tx_ring *ring, bool gso) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct skb_frag_struct *frag; - struct mtk_tx_dma txd, *ptxd; - struct mtk_tx_buf *tx_buf; - int i, j, k, frag_size, frag_map_size, offset; - dma_addr_t mapped_addr; - unsigned int nr_frags; - u32 def_txd4; - - if (mtk_skb_padto(skb, eth)) { - netif_warn(eth, tx_err, dev, "tx padding failed!\n"); - return -1; - } - - tx_buf = &ring->tx_buf[ring->tx_next_idx]; - memset(tx_buf, 0, sizeof(*tx_buf)); - memset(&txd, 0, sizeof(txd)); - nr_frags = skb_shinfo(skb)->nr_frags; - - /* init tx descriptor */ - def_txd4 = eth->soc->txd4; - txd.txd4 = def_txd4; - - if (eth->soc->mac_count > 1) - txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; - - if (gso) - txd.txd4 |= TX_DMA_TSO; - - /* TX Checksum offload */ - if (skb->ip_summed == CHECKSUM_PARTIAL) - txd.txd4 |= TX_DMA_CHKSUM; - - /* VLAN header offload */ - if (skb_vlan_tag_present(skb)) { - u16 tag = skb_vlan_tag_get(skb); - - txd.txd4 |= TX_DMA_INS_VLAN | - ((tag >> VLAN_PRIO_SHIFT) << 4) | - (tag & 0xF); - } - - mapped_addr = dma_map_single(&dev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) - return -1; - - txd.txd1 = mapped_addr; - txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb)); - - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); - - /* TX SG offload */ - j = ring->tx_next_idx; - k = 0; - for (i = 0; i < nr_frags; i++) { - offset = 0; - frag = &skb_shinfo(skb)->frags[i]; - frag_size = skb_frag_size(frag); - - while (frag_size > 0) { - frag_map_size = min(frag_size, TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, - frag_map_size, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) - goto err_dma; - - if (k & 0x1) { - j = NEXT_TX_DESP_IDX(j); - txd.txd1 = mapped_addr; - txd.txd2 = TX_DMA_PLEN0(frag_map_size); - txd.txd4 = def_txd4; - - tx_buf = &ring->tx_buf[j]; - memset(tx_buf, 0, sizeof(*tx_buf)); - - tx_buf->flags |= MTK_TX_FLAGS_PAGE0; - dma_unmap_addr_set(tx_buf, dma_addr0, - mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, - frag_map_size); - } else { - txd.txd3 = mapped_addr; - txd.txd2 |= TX_DMA_PLEN1(frag_map_size); - - tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; - tx_buf->flags |= MTK_TX_FLAGS_PAGE1; - dma_unmap_addr_set(tx_buf, dma_addr1, - mapped_addr); - dma_unmap_len_set(tx_buf, dma_len1, - frag_map_size); - - if (!((i == (nr_frags - 1)) && - (frag_map_size == frag_size))) { - mtk_set_txd_pdma(&txd, - &ring->tx_dma[j]); - memset(&txd, 0, sizeof(txd)); - } - } - frag_size -= frag_map_size; - offset += frag_map_size; - k++; - } - } - - /* set last segment */ - if (k & 0x1) - txd.txd2 |= TX_DMA_LS1; - else - txd.txd2 |= TX_DMA_LS0; - mtk_set_txd_pdma(&txd, &ring->tx_dma[j]); - - /* store skb to cleanup */ - tx_buf->skb = skb; - - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); - - ring->tx_next_idx = NEXT_TX_DESP_IDX(j); - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); - - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) - mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0); - - return 0; - -err_dma: - j = ring->tx_next_idx; - for (i = 0; i < tx_num; i++) { - ptxd = &ring->tx_dma[j]; - tx_buf = &ring->tx_buf[j]; - - /* unmap dma */ - mtk_txd_unmap(&dev->dev, tx_buf); - - ptxd->txd2 = TX_DMA_DESP2_DEF; - j = NEXT_TX_DESP_IDX(j); - } - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - return -1; -} - -/* the qdma core needs scratch memory to be setup */ -static int mtk_init_fq_dma(struct mtk_eth *eth) -{ - dma_addr_t dma_addr, phy_ring_head, phy_ring_tail; - int cnt = eth->soc->dma_ring_size; - int i; - - eth->scratch_ring = dma_alloc_coherent(eth->dev, - cnt * sizeof(struct mtk_tx_dma), - &phy_ring_head, - GFP_ATOMIC | __GFP_ZERO); - if (unlikely(!eth->scratch_ring)) - return -ENOMEM; - - eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE, - GFP_KERNEL); - dma_addr = dma_map_single(eth->dev, - eth->scratch_head, cnt * QDMA_PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) - return -ENOMEM; - - memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); - phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1)); - - for (i = 0; i < cnt; i++) { - eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE)); - if (i < cnt - 1) - eth->scratch_ring[i].txd2 = (phy_ring_head + - ((i + 1) * sizeof(struct mtk_tx_dma))); - eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE); - } - - mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); - mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); - mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); - mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); - - return 0; -} - -static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) -{ - void *ret = ring->tx_dma; - - return ret + (desc - ring->tx_phys); -} - -static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring, - struct mtk_tx_dma *txd) -{ - return mtk_qdma_phys_to_virt(ring, txd->txd2); -} - -static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, - struct mtk_tx_dma *txd) -{ - int idx = txd - ring->tx_dma; - - return &ring->tx_buf[idx]; -} - -static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev, - int tx_num, struct mtk_tx_ring *ring, bool gso) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct mtk_tx_dma *itxd, *txd; - struct mtk_tx_buf *tx_buf; - dma_addr_t mapped_addr; - unsigned int nr_frags; - int i, n_desc = 1; - u32 txd4 = eth->soc->txd4; - - itxd = ring->tx_next_free; - if (itxd == ring->tx_last_free) - return -ENOMEM; - - if (eth->soc->mac_count > 1) - txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; - - tx_buf = mtk_desc_to_tx_buf(ring, itxd); - memset(tx_buf, 0, sizeof(*tx_buf)); - - if (gso) - txd4 |= TX_DMA_TSO; - - /* TX Checksum offload */ - if (skb->ip_summed == CHECKSUM_PARTIAL) - txd4 |= TX_DMA_CHKSUM; - - /* VLAN header offload */ - if (skb_vlan_tag_present(skb)) - txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb); - - mapped_addr = dma_map_single(&dev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) - return -ENOMEM; - - WRITE_ONCE(itxd->txd1, mapped_addr); - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); - - /* TX SG offload */ - txd = itxd; - nr_frags = skb_shinfo(skb)->nr_frags; - for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - unsigned int offset = 0; - int frag_size = skb_frag_size(frag); - - while (frag_size) { - bool last_frag = false; - unsigned int frag_map_size; - - txd = mtk_tx_next_qdma(ring, txd); - if (txd == ring->tx_last_free) - goto err_dma; - - n_desc++; - frag_map_size = min(frag_size, TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, - frag_map_size, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) - goto err_dma; - - if (i == nr_frags - 1 && - (frag_size - frag_map_size) == 0) - last_frag = true; - - WRITE_ONCE(txd->txd1, mapped_addr); - WRITE_ONCE(txd->txd3, (QDMA_TX_SWC | - TX_DMA_PLEN0(frag_map_size) | - last_frag * TX_DMA_LS0) | - mac->id); - WRITE_ONCE(txd->txd4, 0); - - tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; - tx_buf = mtk_desc_to_tx_buf(ring, txd); - memset(tx_buf, 0, sizeof(*tx_buf)); - - tx_buf->flags |= MTK_TX_FLAGS_PAGE0; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); - frag_size -= frag_map_size; - offset += frag_map_size; - } - } - - /* store skb to cleanup */ - tx_buf->skb = skb; - - WRITE_ONCE(itxd->txd4, txd4); - WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | - (!nr_frags * TX_DMA_LS0))); - - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); - - ring->tx_next_free = mtk_tx_next_qdma(ring, txd); - atomic_sub(n_desc, &ring->tx_free_count); - - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); - - return 0; - -err_dma: - do { - tx_buf = mtk_desc_to_tx_buf(ring, txd); - - /* unmap dma */ - mtk_txd_unmap(&dev->dev, tx_buf); - - itxd->txd3 = TX_DMA_DESP2_DEF; - itxd = mtk_tx_next_qdma(ring, itxd); - } while (itxd != txd); - - return -ENOMEM; -} - -static inline int mtk_cal_txd_req(struct sk_buff *skb) -{ - int i, nfrags; - struct skb_frag_struct *frag; - - nfrags = 1; - if (skb_is_gso(skb)) { - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); - } - } else { - nfrags += skb_shinfo(skb)->nr_frags; - } - - return DIV_ROUND_UP(nfrags, 2); -} - -static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct mtk_tx_ring *ring = ð->tx_ring; - struct net_device_stats *stats = &dev->stats; - int tx_num; - int len = skb->len; - bool gso = false; - - tx_num = mtk_cal_txd_req(skb); - if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) { - netif_stop_queue(dev); - netif_err(eth, tx_queued, dev, - "Tx Ring full when queue awake!\n"); - return NETDEV_TX_BUSY; - } - - /* TSO: fill MSS info in tcp checksum field */ - if (skb_is_gso(skb)) { - if (skb_cow_head(skb, 0)) { - netif_warn(eth, tx_err, dev, - "GSO expand head fail.\n"); - goto drop; - } - - if (skb_shinfo(skb)->gso_type & - (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { - gso = true; - tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); - } - } - - if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0) - goto drop; - - stats->tx_packets++; - stats->tx_bytes += len; - - if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) { - netif_stop_queue(dev); - smp_mb(); - if (unlikely(atomic_read(&ring->tx_free_count) > - ring->tx_thresh)) - netif_wake_queue(dev); - } - - return NETDEV_TX_OK; - -drop: - stats->tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static int mtk_poll_rx(struct napi_struct *napi, int budget, - struct mtk_eth *eth, u32 rx_intr) -{ - struct mtk_soc_data *soc = eth->soc; - struct mtk_rx_ring *ring = ð->rx_ring[0]; - int idx = ring->rx_calc_idx; - u32 checksum_bit; - struct sk_buff *skb; - u8 *data, *new_data; - struct mtk_rx_dma *rxd, trxd; - int done = 0, pad; - - if (eth->soc->hw_features & NETIF_F_RXCSUM) - checksum_bit = soc->checksum_bit; - else - checksum_bit = 0; - - if (eth->soc->rx_2b_offset) - pad = 0; - else - pad = NET_IP_ALIGN; - - while (done < budget) { - struct net_device *netdev; - unsigned int pktlen; - dma_addr_t dma_addr; - int mac = 0; - - idx = NEXT_RX_DESP_IDX(idx); - rxd = &ring->rx_dma[idx]; - data = ring->rx_data[idx]; - - mtk_get_rxd(&trxd, rxd); - if (!(trxd.rxd2 & RX_DMA_DONE)) - break; - - /* find out which mac the packet come from. values start at 1 */ - if (eth->soc->mac_count > 1) { - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK; - mac--; - if (mac < 0 || mac >= eth->soc->mac_count) - goto release_desc; - } - - netdev = eth->netdev[mac]; - - /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); - if (unlikely(!new_data || !netdev)) { - netdev->stats.rx_dropped++; - goto release_desc; - } - dma_addr = dma_map_single(&netdev->dev, - new_data + NET_SKB_PAD + pad, - ring->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { - skb_free_frag(new_data); - goto release_desc; - } - - /* receive data */ - skb = build_skb(data, ring->frag_size); - if (unlikely(!skb)) { - put_page(virt_to_head_page(new_data)); - goto release_desc; - } - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - - dma_unmap_single(&netdev->dev, trxd.rxd1, - ring->rx_buf_size, DMA_FROM_DEVICE); - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); - skb->dev = netdev; - skb_put(skb, pktlen); - if (trxd.rxd4 & checksum_bit) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, netdev); - - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += pktlen; - - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && - RX_DMA_VID(trxd.rxd3)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - RX_DMA_VID(trxd.rxd3)); - napi_gro_receive(napi, skb); - - ring->rx_data[idx] = new_data; - rxd->rxd1 = (unsigned int)dma_addr; - -release_desc: - if (eth->soc->rx_sg_dma) - rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); - else - rxd->rxd2 = RX_DMA_LSO; - - ring->rx_calc_idx = idx; - /* make sure that all changes to the dma ring are flushed before - * we continue - */ - wmb(); - if (eth->soc->dma_type == MTK_QDMA) - mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0); - else - mtk_reg_w32(eth, ring->rx_calc_idx, - MTK_REG_RX_CALC_IDX0); - done++; - } - - if (done < budget) - mtk_irq_ack(eth, rx_intr); - - return done; -} - -static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) -{ - struct sk_buff *skb; - struct mtk_tx_buf *tx_buf; - int done = 0; - u32 idx, hwidx; - struct mtk_tx_ring *ring = ð->tx_ring; - unsigned int bytes = 0; - - idx = ring->tx_free_idx; - hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0); - - while ((idx != hwidx) && budget) { - tx_buf = &ring->tx_buf[idx]; - skb = tx_buf->skb; - - if (!skb) - break; - - if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { - bytes += skb->len; - done++; - budget--; - } - mtk_txd_unmap(eth->dev, tx_buf); - idx = NEXT_TX_DESP_IDX(idx); - } - ring->tx_free_idx = idx; - atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); - - /* read hw index again make sure no new tx packet */ - if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0)) - *tx_again = 1; - - if (done) - netdev_completed_queue(*eth->netdev, done, bytes); - - return done; -} - -static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) -{ - struct mtk_tx_ring *ring = ð->tx_ring; - struct mtk_tx_dma *desc; - struct sk_buff *skb; - struct mtk_tx_buf *tx_buf; - int total = 0, done[MTK_MAX_DEVS]; - unsigned int bytes[MTK_MAX_DEVS]; - u32 cpu, dma; - int i; - - memset(done, 0, sizeof(done)); - memset(bytes, 0, sizeof(bytes)); - - cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); - dma = mtk_r32(eth, MTK_QTX_DRX_PTR); - - desc = mtk_qdma_phys_to_virt(ring, cpu); - - while ((cpu != dma) && budget) { - u32 next_cpu = desc->txd2; - int mac; - - desc = mtk_tx_next_qdma(ring, desc); - if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0) - break; - - mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & - TX_DMA_FPORT_MASK; - mac--; - - tx_buf = mtk_desc_to_tx_buf(ring, desc); - skb = tx_buf->skb; - if (!skb) - break; - - if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { - bytes[mac] += skb->len; - done[mac]++; - budget--; - } - mtk_txd_unmap(eth->dev, tx_buf); - - ring->tx_last_free->txd2 = next_cpu; - ring->tx_last_free = desc; - atomic_inc(&ring->tx_free_count); - - cpu = next_cpu; - } - - mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); - - /* read hw index again make sure no new tx packet */ - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) - *tx_again = true; - - for (i = 0; i < eth->soc->mac_count; i++) { - if (!done[i]) - continue; - netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); - total += done[i]; - } - - return total; -} - -static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr, - bool *tx_again) -{ - struct mtk_tx_ring *ring = ð->tx_ring; - struct net_device *netdev = eth->netdev[0]; - int done; - - done = eth->tx_ring.tx_poll(eth, budget, tx_again); - if (!*tx_again) - mtk_irq_ack(eth, tx_intr); - - if (!done) - return 0; - - smp_mb(); - if (unlikely(!netif_queue_stopped(netdev))) - return done; - - if (atomic_read(&ring->tx_free_count) > ring->tx_thresh) - netif_wake_queue(netdev); - - return done; -} - -static void mtk_stats_update(struct mtk_eth *eth) -{ - int i; - - for (i = 0; i < eth->soc->mac_count; i++) { - if (!eth->mac[i] || !eth->mac[i]->hw_stats) - continue; - if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { - mtk_stats_update_mac(eth->mac[i]); - spin_unlock(ð->mac[i]->hw_stats->stats_lock); - } - } -} - -static int mtk_poll(struct napi_struct *napi, int budget) -{ - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); - u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr; - int tx_done, rx_done; - bool tx_again = false; - - status = mtk_irq_pending(eth); - mtk_status = mtk_irq_pending_status(eth); - tx_intr = eth->soc->tx_int; - rx_intr = eth->soc->rx_int; - status_intr = eth->soc->status_int; - tx_done = 0; - rx_done = 0; - tx_again = 0; - - if (status & tx_intr) - tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again); - - if (status & rx_intr) - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); - - if (unlikely(mtk_status & status_intr)) { - mtk_stats_update(eth); - mtk_irq_ack_status(eth, status_intr); - } - - if (unlikely(netif_msg_intr(eth))) { - mask = mtk_irq_enabled(eth); - netdev_info(eth->netdev[0], - "done tx %d, rx %d, intr 0x%08x/0x%x\n", - tx_done, rx_done, status, mask); - } - - if (tx_again || rx_done == budget) - return budget; - - status = mtk_irq_pending(eth); - if (status & (tx_intr | rx_intr)) - return budget; - - napi_complete(napi); - mtk_irq_enable(eth, tx_intr | rx_intr); - - return rx_done; -} - -static int mtk_pdma_tx_alloc(struct mtk_eth *eth) -{ - int i; - struct mtk_tx_ring *ring = ð->tx_ring; - - ring->tx_ring_size = eth->soc->dma_ring_size; - ring->tx_free_idx = 0; - ring->tx_next_idx = 0; - ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, - MAX_SKB_FRAGS); - - ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), - GFP_KERNEL); - if (!ring->tx_buf) - goto no_tx_mem; - - ring->tx_dma = - dma_alloc_coherent(eth->dev, - ring->tx_ring_size * sizeof(*ring->tx_dma), - &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO); - if (!ring->tx_dma) - goto no_tx_mem; - - for (i = 0; i < ring->tx_ring_size; i++) { - ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; - ring->tx_dma[i].txd4 = eth->soc->txd4; - } - - atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); - ring->tx_map = mtk_pdma_tx_map; - ring->tx_poll = mtk_pdma_tx_poll; - ring->tx_clean = mtk_pdma_tx_clean; - - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - - mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0); - mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0); - mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0); - mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG); - - return 0; - -no_tx_mem: - return -ENOMEM; -} - -static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth) -{ - struct mtk_tx_ring *ring = ð->tx_ring; - int i, sz = sizeof(*ring->tx_dma); - - ring->tx_ring_size = eth->soc->dma_ring_size; - ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), - GFP_KERNEL); - if (!ring->tx_buf) - goto no_tx_mem; - - ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, - &ring->tx_phys, - GFP_ATOMIC | __GFP_ZERO); - if (!ring->tx_dma) - goto no_tx_mem; - - for (i = 0; i < ring->tx_ring_size; i++) { - int next = (i + 1) % ring->tx_ring_size; - u32 next_ptr = ring->tx_phys + next * sz; - - ring->tx_dma[i].txd2 = next_ptr; - ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF; - } - - atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2); - ring->tx_next_free = &ring->tx_dma[0]; - ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2]; - ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, - MAX_SKB_FRAGS); - - ring->tx_map = mtk_qdma_tx_map; - ring->tx_poll = mtk_qdma_tx_poll; - ring->tx_clean = mtk_qdma_tx_clean; - - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - - mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR); - mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR); - mtk_w32(eth, - ring->tx_phys + ((ring->tx_ring_size - 1) * sz), - MTK_QTX_CRX_PTR); - mtk_w32(eth, - ring->tx_phys + ((ring->tx_ring_size - 1) * sz), - MTK_QTX_DRX_PTR); - - return 0; - -no_tx_mem: - return -ENOMEM; -} - -static int mtk_qdma_init(struct mtk_eth *eth, int ring) -{ - int err; - - err = mtk_init_fq_dma(eth); - if (err) - return err; - - err = mtk_qdma_tx_alloc_tx(eth); - if (err) - return err; - - err = mtk_dma_rx_alloc(eth, ð->rx_ring[ring]); - if (err) - return err; - - mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0); - mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0); - mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0); - mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); - - /* Enable random early drop and set drop threshold automatically */ - mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES); - mtk_w32(eth, 0x0, MTK_QDMA_HRED2); - - return 0; -} - -static int mtk_pdma_qdma_init(struct mtk_eth *eth) -{ - int err = mtk_qdma_init(eth, 1); - - if (err) - return err; - - err = mtk_dma_rx_alloc(eth, ð->rx_ring[0]); - if (err) - return err; - - mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0); - mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0); - mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0); - mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); - - return 0; -} - -static int mtk_pdma_init(struct mtk_eth *eth) -{ - struct mtk_rx_ring *ring = ð->rx_ring[0]; - int err; - - err = mtk_pdma_tx_alloc(eth); - if (err) - return err; - - err = mtk_dma_rx_alloc(eth, ring); - if (err) - return err; - - mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0); - mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0); - mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0); - mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); - - return 0; -} - -static void mtk_dma_free(struct mtk_eth *eth) -{ - int i; - - for (i = 0; i < eth->soc->mac_count; i++) - if (eth->netdev[i]) - netdev_reset_queue(eth->netdev[i]); - eth->tx_ring.tx_clean(eth); - mtk_clean_rx(eth, ð->rx_ring[0]); - mtk_clean_rx(eth, ð->rx_ring[1]); - kfree(eth->scratch_head); -} - -static void mtk_tx_timeout(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct mtk_tx_ring *ring = ð->tx_ring; - - eth->netdev[mac->id]->stats.tx_errors++; - netif_err(eth, tx_err, dev, - "transmit timed out\n"); - if (eth->soc->dma_type & MTK_PDMA) { - netif_info(eth, drv, dev, "pdma_cfg:%08x\n", - mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG)); - netif_info(eth, drv, dev, - "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", - 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0), - mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0), - mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0), - mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0), - ring->tx_free_idx, - ring->tx_next_idx); - } - if (eth->soc->dma_type & MTK_QDMA) { - netif_info(eth, drv, dev, "qdma_cfg:%08x\n", - mtk_r32(eth, MTK_QDMA_GLO_CFG)); - netif_info(eth, drv, dev, - "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n", - 0, mtk_r32(eth, MTK_QTX_CTX_PTR), - mtk_r32(eth, MTK_QTX_DTX_PTR), - mtk_r32(eth, MTK_QTX_CRX_PTR), - mtk_r32(eth, MTK_QTX_DRX_PTR), - atomic_read(&ring->tx_free_count)); - } - netif_info(eth, drv, dev, - "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", - 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0), - mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0), - mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0), - mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0)); - - schedule_work(&mac->pending_work); -} - -static irqreturn_t mtk_handle_irq(int irq, void *_eth) -{ - struct mtk_eth *eth = _eth; - u32 status, int_mask; - - status = mtk_irq_pending(eth); - if (unlikely(!status)) - return IRQ_NONE; - - int_mask = (eth->soc->rx_int | eth->soc->tx_int); - if (likely(status & int_mask)) { - if (likely(napi_schedule_prep(ð->rx_napi))) - __napi_schedule(ð->rx_napi); - } else { - mtk_irq_ack(eth, status); - } - mtk_irq_disable(eth, int_mask); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void mtk_poll_controller(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - u32 int_mask = eth->soc->tx_int | eth->soc->rx_int; - - mtk_irq_disable(eth, int_mask); - mtk_handle_irq(dev->irq, dev); - mtk_irq_enable(eth, int_mask); -} -#endif - -int mtk_set_clock_cycle(struct mtk_eth *eth) -{ - unsigned long sysclk = eth->sysclk; - - sysclk /= MTK_US_CYC_CNT_DIVISOR; - sysclk <<= MTK_US_CYC_CNT_SHIFT; - - mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) & - ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) | - sysclk, - MTK_GLO_CFG); - return 0; -} - -void mtk_fwd_config(struct mtk_eth *eth) -{ - u32 fwd_cfg; - - fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); - - /* disable jumbo frame */ - if (eth->soc->jumbo_frame) - fwd_cfg &= ~MTK_GDM1_JMB_EN; - - /* set unicast/multicast/broadcast frame to cpu */ - fwd_cfg &= ~0xffff; - - mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); -} - -void mtk_csum_config(struct mtk_eth *eth) -{ - if (eth->soc->hw_features & NETIF_F_RXCSUM) - mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) | - (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), - MTK_GDMA1_FWD_CFG); - else - mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) & - ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), - MTK_GDMA1_FWD_CFG); - if (eth->soc->hw_features & NETIF_F_IP_CSUM) - mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) | - (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), - MTK_CDMA_CSG_CFG); - else - mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) & - ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), - MTK_CDMA_CSG_CFG); -} - -static int mtk_start_dma(struct mtk_eth *eth) -{ - unsigned long flags; - u32 val; - int err; - - if (eth->soc->dma_type == MTK_PDMA) - err = mtk_pdma_init(eth); - else if (eth->soc->dma_type == MTK_QDMA) - err = mtk_qdma_init(eth, 0); - else - err = mtk_pdma_qdma_init(eth); - if (err) { - mtk_dma_free(eth); - return err; - } - - spin_lock_irqsave(ð->page_lock, flags); - - val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN; - if (eth->soc->rx_2b_offset) - val |= MTK_RX_2B_OFFSET; - val |= eth->soc->pdma_glo_cfg; - - if (eth->soc->dma_type & MTK_PDMA) - mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG); - - if (eth->soc->dma_type & MTK_QDMA) - mtk_w32(eth, val, MTK_QDMA_GLO_CFG); - - spin_unlock_irqrestore(ð->page_lock, flags); - - return 0; -} - -static int mtk_open(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - - dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); - - if (!atomic_read(ð->dma_refcnt)) { - int err = mtk_start_dma(eth); - - if (err) - return err; - - napi_enable(ð->rx_napi); - mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int); - } - atomic_inc(ð->dma_refcnt); - - if (eth->phy) - eth->phy->start(mac); - - if (eth->soc->has_carrier && eth->soc->has_carrier(eth)) - netif_carrier_on(dev); - - netif_start_queue(dev); - eth->soc->fwd_config(eth); - - return 0; -} - -static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) -{ - unsigned long flags; - u32 val; - int i; - - /* stop the dma enfine */ - spin_lock_irqsave(ð->page_lock, flags); - val = mtk_r32(eth, glo_cfg); - mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), - glo_cfg); - spin_unlock_irqrestore(ð->page_lock, flags); - - /* wait for dma stop */ - for (i = 0; i < 10; i++) { - val = mtk_r32(eth, glo_cfg); - if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { - msleep(20); - continue; - } - break; - } -} - -static int mtk_stop(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - - netif_tx_disable(dev); - if (eth->phy) - eth->phy->stop(mac); - - if (!atomic_dec_and_test(ð->dma_refcnt)) - return 0; - - mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); - napi_disable(ð->rx_napi); - - if (eth->soc->dma_type & MTK_PDMA) - mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]); - - if (eth->soc->dma_type & MTK_QDMA) - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); - - mtk_dma_free(eth); - - return 0; -} - -static int __init mtk_init_hw(struct mtk_eth *eth) -{ - int i, err; - - eth->soc->reset_fe(eth); - - if (eth->soc->switch_init) - if (eth->soc->switch_init(eth)) { - dev_err(eth->dev, "failed to initialize switch core\n"); - return -ENODEV; - } - - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, - dev_name(eth->dev), eth); - if (err) - return err; - - err = mtk_mdio_init(eth); - if (err) - return err; - - /* disable delay and normal interrupt */ - mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG); - if (eth->soc->dma_type & MTK_QDMA) - mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); - - /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */ - if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) - for (i = 0; i < 16; i += 2) - mtk_w32(eth, ((i + 1) << 16) + i, - mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + - (i * 2)); - - if (eth->soc->fwd_config(eth)) - dev_err(eth->dev, "unable to get clock\n"); - - if (mtk_reg_table[MTK_REG_MTK_RST_GL]) { - mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL); - mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL); - } - - return 0; -} - -static int __init mtk_init(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct device_node *port; - const char *mac_addr; - int err; - - mac_addr = of_get_mac_address(mac->of_node); - if (mac_addr) - ether_addr_copy(dev->dev_addr, mac_addr); - - /* If the mac address is invalid, use random mac address */ - if (!is_valid_ether_addr(dev->dev_addr)) { - eth_hw_addr_random(dev); - dev_err(eth->dev, "generated random MAC address %pM\n", - dev->dev_addr); - } - mac->hw->soc->set_mac(mac, dev->dev_addr); - - if (eth->soc->port_init) - for_each_child_of_node(mac->of_node, port) - if (of_device_is_compatible(port, - "mediatek,eth-port") && - of_device_is_available(port)) - eth->soc->port_init(eth, mac, port); - - if (eth->phy) { - err = eth->phy->connect(mac); - if (err) - return err; - } - - return 0; -} - -static void mtk_uninit(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - - if (eth->phy) - eth->phy->disconnect(mac); - mtk_mdio_cleanup(eth); - - mtk_irq_disable(eth, ~0); - free_irq(dev->irq, dev); -} - -static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mtk_mac *mac = netdev_priv(dev); - - if (!mac->phy_dev) - return -ENODEV; - - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return phy_mii_ioctl(mac->phy_dev, ifr, cmd); - default: - break; - } - - return -EOPNOTSUPP; -} - -static int mtk_change_mtu(struct net_device *dev, int new_mtu) -{ - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - int frag_size, old_mtu; - u32 fwd_cfg; - - if (!eth->soc->jumbo_frame) - return eth_change_mtu(dev, new_mtu); - - frag_size = mtk_max_frag_size(new_mtu); - if (new_mtu < 68 || frag_size > PAGE_SIZE) - return -EINVAL; - - old_mtu = dev->mtu; - dev->mtu = new_mtu; - - /* return early if the buffer sizes will not change */ - if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) - return 0; - if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) - return 0; - - if (new_mtu <= ETH_DATA_LEN) - eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN); - else - eth->rx_ring[0].frag_size = PAGE_SIZE; - eth->rx_ring[0].rx_buf_size = - mtk_max_buf_size(eth->rx_ring[0].frag_size); - - if (!netif_running(dev)) - return 0; - - mtk_stop(dev); - fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); - if (new_mtu <= ETH_DATA_LEN) { - fwd_cfg &= ~MTK_GDM1_JMB_EN; - } else { - fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT); - fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << - MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN; - } - mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); - - return mtk_open(dev); -} - -static void mtk_pending_work(struct work_struct *work) -{ - struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); - struct mtk_eth *eth = mac->hw; - struct net_device *dev = eth->netdev[mac->id]; - int err; - - rtnl_lock(); - mtk_stop(dev); - - err = mtk_open(dev); - if (err) { - netif_alert(eth, ifup, dev, - "Driver up/down cycle failed, closing device.\n"); - dev_close(dev); - } - rtnl_unlock(); -} - -static int mtk_cleanup(struct mtk_eth *eth) -{ - int i; - - for (i = 0; i < eth->soc->mac_count; i++) { - struct mtk_mac *mac = netdev_priv(eth->netdev[i]); - - if (!eth->netdev[i]) - continue; - - unregister_netdev(eth->netdev[i]); - free_netdev(eth->netdev[i]); - cancel_work_sync(&mac->pending_work); - } - - return 0; -} - -static const struct net_device_ops mtk_netdev_ops = { - .ndo_init = mtk_init, - .ndo_uninit = mtk_uninit, - .ndo_open = mtk_open, - .ndo_stop = mtk_stop, - .ndo_start_xmit = mtk_start_xmit, - .ndo_set_mac_address = mtk_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mtk_do_ioctl, - .ndo_change_mtu = mtk_change_mtu, - .ndo_tx_timeout = mtk_tx_timeout, - .ndo_get_stats64 = mtk_get_stats64, - .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mtk_poll_controller, -#endif -}; - -static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) -{ - struct mtk_mac *mac; - const __be32 *_id = of_get_property(np, "reg", NULL); - int id, err; - - if (!_id) { - dev_err(eth->dev, "missing mac id\n"); - return -EINVAL; - } - id = be32_to_cpup(_id); - if (id >= eth->soc->mac_count || eth->netdev[id]) { - dev_err(eth->dev, "%d is not a valid mac id\n", id); - return -EINVAL; - } - - eth->netdev[id] = alloc_etherdev(sizeof(*mac)); - if (!eth->netdev[id]) { - dev_err(eth->dev, "alloc_etherdev failed\n"); - return -ENOMEM; - } - mac = netdev_priv(eth->netdev[id]); - eth->mac[id] = mac; - mac->id = id; - mac->hw = eth; - mac->of_node = np; - INIT_WORK(&mac->pending_work, mtk_pending_work); - - if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) { - mac->hw_stats = devm_kzalloc(eth->dev, - sizeof(*mac->hw_stats), - GFP_KERNEL); - if (!mac->hw_stats) { - err = -ENOMEM; - goto free_netdev; - } - spin_lock_init(&mac->hw_stats->stats_lock); - mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; - } - - SET_NETDEV_DEV(eth->netdev[id], eth->dev); - eth->netdev[id]->netdev_ops = &mtk_netdev_ops; - eth->netdev[id]->base_addr = (unsigned long)eth->base; - - if (eth->soc->init_data) - eth->soc->init_data(eth->soc, eth->netdev[id]); - - eth->netdev[id]->vlan_features = eth->soc->hw_features & - ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); - eth->netdev[id]->features |= eth->soc->hw_features; - - if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) - eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - mtk_set_ethtool_ops(eth->netdev[id]); - - err = register_netdev(eth->netdev[id]); - if (err) { - dev_err(eth->dev, "error bringing up device\n"); - err = -ENOMEM; - goto free_netdev; - } - eth->netdev[id]->irq = eth->irq; - netif_info(eth, probe, eth->netdev[id], - "mediatek frame engine at 0x%08lx, irq %d\n", - eth->netdev[id]->base_addr, eth->netdev[id]->irq); - - return 0; - -free_netdev: - free_netdev(eth->netdev[id]); - return err; -} - -static int mtk_probe(struct platform_device *pdev) -{ - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - const struct of_device_id *match; - struct device_node *mac_np; - struct mtk_soc_data *soc; - struct mtk_eth *eth; - struct clk *sysclk; - int err; - - device_reset(&pdev->dev); - - match = of_match_device(of_mtk_match, &pdev->dev); - soc = (struct mtk_soc_data *)match->data; - - if (soc->reg_table) - mtk_reg_table = soc->reg_table; - - eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); - if (!eth) - return -ENOMEM; - - eth->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(eth->base)) - return PTR_ERR(eth->base); - - spin_lock_init(ð->page_lock); - - eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "mediatek,ethsys"); - if (IS_ERR(eth->ethsys)) - return PTR_ERR(eth->ethsys); - - eth->irq = platform_get_irq(pdev, 0); - if (eth->irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found\n"); - return -ENXIO; - } - - sysclk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(sysclk)) { - dev_err(&pdev->dev, - "the clock is not defined in the devicetree\n"); - return -ENXIO; - } - eth->sysclk = clk_get_rate(sysclk); - - eth->switch_np = of_parse_phandle(pdev->dev.of_node, - "mediatek,switch", 0); - if (soc->has_switch && !eth->switch_np) { - dev_err(&pdev->dev, "failed to read switch phandle\n"); - return -ENODEV; - } - - eth->dev = &pdev->dev; - eth->soc = soc; - eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); - - err = mtk_init_hw(eth); - if (err) - return err; - - if (eth->soc->mac_count > 1) { - for_each_child_of_node(pdev->dev.of_node, mac_np) { - if (!of_device_is_compatible(mac_np, - "mediatek,eth-mac")) - continue; - - if (!of_device_is_available(mac_np)) - continue; - - err = mtk_add_mac(eth, mac_np); - if (err) - goto err_free_dev; - } - - init_dummy_netdev(ð->dummy_dev); - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, - soc->napi_weight); - } else { - err = mtk_add_mac(eth, pdev->dev.of_node); - if (err) - goto err_free_dev; - netif_napi_add(eth->netdev[0], ð->rx_napi, mtk_poll, - soc->napi_weight); - } - - platform_set_drvdata(pdev, eth); - - return 0; - -err_free_dev: - mtk_cleanup(eth); - return err; -} - -static int mtk_remove(struct platform_device *pdev) -{ - struct mtk_eth *eth = platform_get_drvdata(pdev); - - netif_napi_del(ð->rx_napi); - mtk_cleanup(eth); - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static struct platform_driver mtk_driver = { - .probe = mtk_probe, - .remove = mtk_remove, - .driver = { - .name = "mtk_soc_eth", - .of_match_table = of_mtk_match, - }, -}; - -module_platform_driver(mtk_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); -MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h deleted file mode 100644 index e6ed80433f49..000000000000 --- a/drivers/staging/mt7621-eth/mtk_eth_soc.h +++ /dev/null @@ -1,716 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#ifndef MTK_ETH_H -#define MTK_ETH_H - -#include <linux/mii.h> -#include <linux/interrupt.h> -#include <linux/netdevice.h> -#include <linux/dma-mapping.h> -#include <linux/phy.h> -#include <linux/ethtool.h> -#include <linux/version.h> -#include <linux/atomic.h> - -/* these registers have different offsets depending on the SoC. we use a lookup - * table for these - */ -enum mtk_reg { - MTK_REG_PDMA_GLO_CFG = 0, - MTK_REG_PDMA_RST_CFG, - MTK_REG_DLY_INT_CFG, - MTK_REG_TX_BASE_PTR0, - MTK_REG_TX_MAX_CNT0, - MTK_REG_TX_CTX_IDX0, - MTK_REG_TX_DTX_IDX0, - MTK_REG_RX_BASE_PTR0, - MTK_REG_RX_MAX_CNT0, - MTK_REG_RX_CALC_IDX0, - MTK_REG_RX_DRX_IDX0, - MTK_REG_MTK_INT_ENABLE, - MTK_REG_MTK_INT_STATUS, - MTK_REG_MTK_DMA_VID_BASE, - MTK_REG_MTK_COUNTER_BASE, - MTK_REG_MTK_RST_GL, - MTK_REG_MTK_INT_STATUS2, - MTK_REG_COUNT -}; - -/* delayed interrupt bits */ -#define MTK_DELAY_EN_INT 0x80 -#define MTK_DELAY_MAX_INT 0x04 -#define MTK_DELAY_MAX_TOUT 0x04 -#define MTK_DELAY_TIME 20 -#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \ - | MTK_DELAY_MAX_TOUT) -#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN) -#define MTK_PSE_FQFC_CFG_INIT 0x80504000 -#define MTK_PSE_FQFC_CFG_256Q 0xff908000 - -/* interrupt bits */ -#define MTK_CNT_PPE_AF BIT(31) -#define MTK_CNT_GDM_AF BIT(29) -#define MTK_PSE_P2_FC BIT(26) -#define MTK_PSE_BUF_DROP BIT(24) -#define MTK_GDM_OTHER_DROP BIT(23) -#define MTK_PSE_P1_FC BIT(22) -#define MTK_PSE_P0_FC BIT(21) -#define MTK_PSE_FQ_EMPTY BIT(20) -#define MTK_GE1_STA_CHG BIT(18) -#define MTK_TX_COHERENT BIT(17) -#define MTK_RX_COHERENT BIT(16) -#define MTK_TX_DONE_INT3 BIT(11) -#define MTK_TX_DONE_INT2 BIT(10) -#define MTK_TX_DONE_INT1 BIT(9) -#define MTK_TX_DONE_INT0 BIT(8) -#define MTK_RX_DONE_INT0 BIT(2) -#define MTK_TX_DLY_INT BIT(1) -#define MTK_RX_DLY_INT BIT(0) - -#define MTK_RX_DONE_INT MTK_RX_DONE_INT0 -#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ - MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) - -#define RT5350_RX_DLY_INT BIT(30) -#define RT5350_TX_DLY_INT BIT(28) -#define RT5350_RX_DONE_INT1 BIT(17) -#define RT5350_RX_DONE_INT0 BIT(16) -#define RT5350_TX_DONE_INT3 BIT(3) -#define RT5350_TX_DONE_INT2 BIT(2) -#define RT5350_TX_DONE_INT1 BIT(1) -#define RT5350_TX_DONE_INT0 BIT(0) - -#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1) -#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \ - RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3) - -/* registers */ -#define MTK_GDMA_OFFSET 0x0020 -#define MTK_PSE_OFFSET 0x0040 -#define MTK_GDMA2_OFFSET 0x0060 -#define MTK_CDMA_OFFSET 0x0080 -#define MTK_DMA_VID0 0x00a8 -#define MTK_PDMA_OFFSET 0x0100 -#define MTK_PPE_OFFSET 0x0200 -#define MTK_CMTABLE_OFFSET 0x0400 -#define MTK_POLICYTABLE_OFFSET 0x1000 - -#define MT7621_GDMA_OFFSET 0x0500 -#define MT7620_GDMA_OFFSET 0x0600 - -#define RT5350_PDMA_OFFSET 0x0800 -#define RT5350_SDM_OFFSET 0x0c00 - -#define MTK_MDIO_ACCESS 0x00 -#define MTK_MDIO_CFG 0x04 -#define MTK_GLO_CFG 0x08 -#define MTK_RST_GL 0x0C -#define MTK_INT_STATUS 0x10 -#define MTK_INT_ENABLE 0x14 -#define MTK_MDIO_CFG2 0x18 -#define MTK_FOC_TS_T 0x1C - -#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00) -#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04) -#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08) -#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C) -#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10) - -#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00) -#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04) -#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08) -#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C) -#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10) - -#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00) -#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04) -#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08) -#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C) - -#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00) -#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04) - -#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000)) - -/* FIXME this might be different for different SOCs */ -#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00) - -#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00) -#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04) -#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08) -#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C) -#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10) -#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14) -#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18) -#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C) -#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20) -#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24) -#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28) -#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C) -#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30) -#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34) -#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38) -#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C) -#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100) -#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104) -#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108) -#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C) -#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110) -#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114) -#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118) -#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C) -#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204) -#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208) -#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c) -#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220) -#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228) -#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280) - -#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00) -#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04) -#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08) -#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C) -#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10) -#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14) -#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18) -#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C) -#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20) -#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24) -#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28) -#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C) -#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30) -#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34) -#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38) -#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C) -#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40) -#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44) -#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48) -#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C) -#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50) -#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54) -#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58) -#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C) -#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60) -#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64) -#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68) -#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C) - -/* Switch DMA configuration */ -#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00) -#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04) -#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08) -#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C) -#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10) -#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100) -#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104) -#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108) -#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C) -#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110) - -#define RT5350_SDM_ICS_EN BIT(16) -#define RT5350_SDM_TCS_EN BIT(17) -#define RT5350_SDM_UCS_EN BIT(18) - -/* QDMA registers */ -#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) -#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) -#define MTK_QRX_BASE_PTR0 0x1900 -#define MTK_QRX_MAX_CNT0 0x1904 -#define MTK_QRX_CRX_IDX0 0x1908 -#define MTK_QRX_DRX_IDX0 0x190C -#define MTK_QDMA_GLO_CFG 0x1A04 -#define MTK_QDMA_RST_IDX 0x1A08 -#define MTK_QDMA_DELAY_INT 0x1A0C -#define MTK_QDMA_FC_THRES 0x1A10 -#define MTK_QMTK_INT_STATUS 0x1A18 -#define MTK_QMTK_INT_ENABLE 0x1A1C -#define MTK_QDMA_HRED2 0x1A44 - -#define MTK_QTX_CTX_PTR 0x1B00 -#define MTK_QTX_DTX_PTR 0x1B04 - -#define MTK_QTX_CRX_PTR 0x1B10 -#define MTK_QTX_DRX_PTR 0x1B14 - -#define MTK_QDMA_FQ_HEAD 0x1B20 -#define MTK_QDMA_FQ_TAIL 0x1B24 -#define MTK_QDMA_FQ_CNT 0x1B28 -#define MTK_QDMA_FQ_BLEN 0x1B2C - -#define QDMA_PAGE_SIZE 2048 -#define QDMA_TX_OWNER_CPU BIT(31) -#define QDMA_TX_SWC BIT(14) -#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16) -#define QDMA_RES_THRES 4 - -/* MDIO_CFG register bits */ -#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29) -#define MTK_MDIO_CFG_GP1_BP_EN BIT(16) -#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15) -#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13) -#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13) -#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13) -#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12) -#define MTK_MDIO_CFG_GP1_FC_TX BIT(11) -#define MTK_MDIO_CFG_GP1_FC_RX BIT(10) -#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9) -#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8) -#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6) -#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6) -#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6) -#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6) -#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5) -#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4) -#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2) -#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2) -#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2) -#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2) -#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0 -#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1 -#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2 -#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3 - -/* uni-cast port */ -#define MTK_GDM1_JMB_LEN_MASK 0xf -#define MTK_GDM1_JMB_LEN_SHIFT 28 -#define MTK_GDM1_ICS_EN BIT(22) -#define MTK_GDM1_TCS_EN BIT(21) -#define MTK_GDM1_UCS_EN BIT(20) -#define MTK_GDM1_JMB_EN BIT(19) -#define MTK_GDM1_STRPCRC BIT(16) -#define MTK_GDM1_UFRC_P_CPU (0 << 12) -#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12) -#define MTK_GDM1_UFRC_P_PPE (6 << 12) - -/* checksums */ -#define MTK_ICS_GEN_EN BIT(2) -#define MTK_UCS_GEN_EN BIT(1) -#define MTK_TCS_GEN_EN BIT(0) - -/* dma mode */ -#define MTK_PDMA BIT(0) -#define MTK_QDMA BIT(1) -#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA) - -/* dma ring */ -#define MTK_PST_DRX_IDX0 BIT(16) -#define MTK_PST_DTX_IDX3 BIT(3) -#define MTK_PST_DTX_IDX2 BIT(2) -#define MTK_PST_DTX_IDX1 BIT(1) -#define MTK_PST_DTX_IDX0 BIT(0) - -#define MTK_RX_2B_OFFSET BIT(31) -#define MTK_TX_WB_DDONE BIT(6) -#define MTK_RX_DMA_BUSY BIT(3) -#define MTK_TX_DMA_BUSY BIT(1) -#define MTK_RX_DMA_EN BIT(2) -#define MTK_TX_DMA_EN BIT(0) - -#define MTK_PDMA_SIZE_4DWORDS (0 << 4) -#define MTK_PDMA_SIZE_8DWORDS (1 << 4) -#define MTK_PDMA_SIZE_16DWORDS (2 << 4) - -#define MTK_US_CYC_CNT_MASK 0xff -#define MTK_US_CYC_CNT_SHIFT 0x8 -#define MTK_US_CYC_CNT_DIVISOR 1000000 - -/* PDMA descriptor rxd2 */ -#define RX_DMA_DONE BIT(31) -#define RX_DMA_LSO BIT(30) -#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) -#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) -#define RX_DMA_TAG BIT(15) - -/* PDMA descriptor rxd3 */ -#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff) -#define RX_DMA_VID(_x) ((_x) & 0xfff) - -/* PDMA descriptor rxd4 */ -#define RX_DMA_L4VALID BIT(30) -#define RX_DMA_FPORT_SHIFT 19 -#define RX_DMA_FPORT_MASK 0x7 - -struct mtk_rx_dma { - unsigned int rxd1; - unsigned int rxd2; - unsigned int rxd3; - unsigned int rxd4; -} __packed __aligned(4); - -/* PDMA tx descriptor bits */ -#define TX_DMA_BUF_LEN 0x3fff -#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16) -#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16) -#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) -#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN) -#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) -#define TX_DMA_LS1 BIT(14) -#define TX_DMA_LS0 BIT(30) -#define TX_DMA_DONE BIT(31) -#define TX_DMA_FPORT_SHIFT 25 -#define TX_DMA_FPORT_MASK 0x7 -#define TX_DMA_INS_VLAN_MT7621 BIT(16) -#define TX_DMA_INS_VLAN BIT(7) -#define TX_DMA_INS_PPPOE BIT(12) -#define TX_DMA_TAG BIT(15) -#define TX_DMA_TAG_MASK BIT(15) -#define TX_DMA_QN(_x) ((_x) << 16) -#define TX_DMA_PN(_x) ((_x) << 24) -#define TX_DMA_QN_MASK TX_DMA_QN(0x7) -#define TX_DMA_PN_MASK TX_DMA_PN(0x7) -#define TX_DMA_UDF BIT(20) -#define TX_DMA_CHKSUM (0x7 << 29) -#define TX_DMA_TSO BIT(28) -#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1)) - -/* frame engine counters */ -#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00) -#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300) -#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40) - -/* phy device flags */ -#define MTK_PHY_FLAG_PORT BIT(0) -#define MTK_PHY_FLAG_ATTACH BIT(1) - -struct mtk_tx_dma { - unsigned int txd1; - unsigned int txd2; - unsigned int txd3; - unsigned int txd4; -} __packed __aligned(4); - -struct mtk_eth; -struct mtk_mac; - -/* manage the attached phys */ -struct mtk_phy { - spinlock_t lock; - - struct phy_device *phy[8]; - struct device_node *phy_node[8]; - const __be32 *phy_fixed[8]; - int duplex[8]; - int speed[8]; - int tx_fc[8]; - int rx_fc[8]; - int (*connect)(struct mtk_mac *mac); - void (*disconnect)(struct mtk_mac *mac); - void (*start)(struct mtk_mac *mac); - void (*stop)(struct mtk_mac *mac); -}; - -/* struct mtk_soc_data - the structure that holds the SoC specific data - * @reg_table: Some of the legacy registers changed their location - * over time. Their offsets are stored in this table - * - * @init_data: Some features depend on the silicon revision. This - * callback allows runtime modification of the content of - * this struct - * @reset_fe: This callback is used to trigger the reset of the frame - * engine - * @set_mac: This callback is used to set the unicast mac address - * filter - * @fwd_config: This callback is used to setup the forward config - * register of the MAC - * @switch_init: This callback is used to bring up the switch core - * @port_init: Some SoCs have ports that can be router to a switch port - * or an external PHY. This callback is used to setup these - * ports. - * @has_carrier: This callback allows driver to check if there is a cable - * attached. - * @mdio_init: This callbck is used to setup the MDIO bus if one is - * present - * @mdio_cleanup: This callback is used to cleanup the MDIO state. - * @mdio_write: This callback is used to write data to the MDIO bus. - * @mdio_read: This callback is used to write data to the MDIO bus. - * @mdio_adjust_link: This callback is used to apply the PHY settings. - * @piac_offset: the PIAC register has a different different base offset - * @hw_features: feature set depends on the SoC type - * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs - * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs - * @dma_type: SoCs is PDMA, QDMA or a mix of the 2 - * @pdma_glo_cfg: the default DMA configuration - * @rx_int: the TX interrupt bits used by the SoC - * @tx_int: the TX interrupt bits used by the SoC - * @status_int: the Status interrupt bits used by the SoC - * @checksum_bit: the bits used to turn on HW checksumming - * @txd4: default value of the TXD4 descriptor - * @mac_count: the number of MACs that the SoC has - * @new_stats: there is a old and new way to read hardware stats - * registers - * @jumbo_frame: does the SoC support jumbo frames ? - * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes - * @rx_sg_dma: scatter gather support - * @padding_64b enable 64 bit padding - * @padding_bug: rt2880 has a padding bug - * @has_switch: does the SoC have a built-in switch - * - * Although all of the supported SoCs share the same basic functionality, there - * are several SoC specific functions and features that we need to support. This - * struct holds the SoC specific data so that the common core can figure out - * how to setup and use these differences. - */ -struct mtk_soc_data { - const u16 *reg_table; - - void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev); - void (*reset_fe)(struct mtk_eth *eth); - void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr); - int (*fwd_config)(struct mtk_eth *eth); - int (*switch_init)(struct mtk_eth *eth); - void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac, - struct device_node *port); - int (*has_carrier)(struct mtk_eth *eth); - int (*mdio_init)(struct mtk_eth *eth); - void (*mdio_cleanup)(struct mtk_eth *eth); - int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, - u16 val); - int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg); - void (*mdio_adjust_link)(struct mtk_eth *eth, int port); - u32 piac_offset; - netdev_features_t hw_features; - u32 dma_ring_size; - u32 napi_weight; - u32 dma_type; - u32 pdma_glo_cfg; - u32 rx_int; - u32 tx_int; - u32 status_int; - u32 checksum_bit; - u32 txd4; - u32 mac_count; - - u32 new_stats:1; - u32 jumbo_frame:1; - u32 rx_2b_offset:1; - u32 rx_sg_dma:1; - u32 padding_64b:1; - u32 padding_bug:1; - u32 has_switch:1; -}; - -#define MTK_STAT_OFFSET 0x40 - -/* struct mtk_hw_stats - the structure that holds the traffic statistics. - * @stats_lock: make sure that stats operations are atomic - * @reg_offset: the status register offset of the SoC - * @syncp: the refcount - * - * All of the supported SoCs have hardware counters for traffic statstics. - * Whenever the status IRQ triggers we can read the latest stats from these - * counters and store them in this struct. - */ -struct mtk_hw_stats { - spinlock_t stats_lock; - u32 reg_offset; - struct u64_stats_sync syncp; - - u64 tx_bytes; - u64 tx_packets; - u64 tx_skip; - u64 tx_collisions; - u64 rx_bytes; - u64 rx_packets; - u64 rx_overflow; - u64 rx_fcs_errors; - u64 rx_short_errors; - u64 rx_long_errors; - u64 rx_checksum_errors; - u64 rx_flow_control_packets; -}; - -/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how - * memory was allocated so that it can be freed properly - */ -enum mtk_tx_flags { - MTK_TX_FLAGS_SINGLE0 = 0x01, - MTK_TX_FLAGS_PAGE0 = 0x02, - MTK_TX_FLAGS_PAGE1 = 0x04, -}; - -/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at - * by the TX descriptor s - * @skb: The SKB pointer of the packet being sent - * @dma_addr0: The base addr of the first segment - * @dma_len0: The length of the first segment - * @dma_addr1: The base addr of the second segment - * @dma_len1: The length of the second segment - */ -struct mtk_tx_buf { - struct sk_buff *skb; - u32 flags; - DEFINE_DMA_UNMAP_ADDR(dma_addr0); - DEFINE_DMA_UNMAP_LEN(dma_len0); - DEFINE_DMA_UNMAP_ADDR(dma_addr1); - DEFINE_DMA_UNMAP_LEN(dma_len1); -}; - -/* struct mtk_tx_ring - This struct holds info describing a TX ring - * @tx_dma: The descriptor ring - * @tx_buf: The memory pointed at by the ring - * @tx_phys: The physical addr of tx_buf - * @tx_next_free: Pointer to the next free descriptor - * @tx_last_free: Pointer to the last free descriptor - * @tx_thresh: The threshold of minimum amount of free descriptors - * @tx_map: Callback to map a new packet into the ring - * @tx_poll: Callback for the housekeeping function - * @tx_clean: Callback for the cleanup function - * @tx_ring_size: How many descriptors are in the ring - * @tx_free_idx: The index of th next free descriptor - * @tx_next_idx: QDMA uses a linked list. This element points to the next - * free descriptor in the list - * @tx_free_count: QDMA uses a linked list. Track how many free descriptors - * are present - */ -struct mtk_tx_ring { - struct mtk_tx_dma *tx_dma; - struct mtk_tx_buf *tx_buf; - dma_addr_t tx_phys; - struct mtk_tx_dma *tx_next_free; - struct mtk_tx_dma *tx_last_free; - u16 tx_thresh; - int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num, - struct mtk_tx_ring *ring, bool gso); - int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again); - void (*tx_clean)(struct mtk_eth *eth); - - /* PDMA only */ - u16 tx_ring_size; - u16 tx_free_idx; - - /* QDMA only */ - u16 tx_next_idx; - atomic_t tx_free_count; -}; - -/* struct mtk_rx_ring - This struct holds info describing a RX ring - * @rx_dma: The descriptor ring - * @rx_data: The memory pointed at by the ring - * @trx_phys: The physical addr of rx_buf - * @rx_ring_size: How many descriptors are in the ring - * @rx_buf_size: The size of each packet buffer - * @rx_calc_idx: The current head of ring - */ -struct mtk_rx_ring { - struct mtk_rx_dma *rx_dma; - u8 **rx_data; - dma_addr_t rx_phys; - u16 rx_ring_size; - u16 frag_size; - u16 rx_buf_size; - u16 rx_calc_idx; -}; - -/* currently no SoC has more than 2 macs */ -#define MTK_MAX_DEVS 2 - -/* struct mtk_eth - This is the main datasructure for holding the state - * of the driver - * @dev: The device pointer - * @base: The mapped register i/o base - * @page_lock: Make sure that register operations are atomic - * @soc: pointer to our SoC specific data - * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a - * dummy for NAPI to work - * @netdev: The netdev instances - * @mac: Each netdev is linked to a physical MAC - * @switch_np: The phandle for the switch - * @irq: The IRQ that we are using - * @msg_enable: Ethtool msg level - * @ysclk: The sysclk rate - neeed for calibration - * @ethsys: The register map pointing at the range used to setup - * MII modes - * @dma_refcnt: track how many netdevs are using the DMA engine - * @tx_ring: Pointer to the memore holding info about the TX ring - * @rx_ring: Pointer to the memore holding info about the RX ring - * @rx_napi: The NAPI struct - * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring - * @scratch_head: The scratch memory that scratch_ring points to. - * @phy: Info about the attached PHYs - * @mii_bus: If there is a bus we need to create an instance for it - * @link: Track if the ports have a physical link - * @sw_priv: Pointer to the switches private data - * @vlan_map: RX VID tracking - */ - -struct mtk_eth { - struct device *dev; - void __iomem *base; - spinlock_t page_lock; - struct mtk_soc_data *soc; - struct net_device dummy_dev; - struct net_device *netdev[MTK_MAX_DEVS]; - struct mtk_mac *mac[MTK_MAX_DEVS]; - struct device_node *switch_np; - int irq; - u32 msg_enable; - unsigned long sysclk; - struct regmap *ethsys; - atomic_t dma_refcnt; - struct mtk_tx_ring tx_ring; - struct mtk_rx_ring rx_ring[2]; - struct napi_struct rx_napi; - struct mtk_tx_dma *scratch_ring; - void *scratch_head; - struct mtk_phy *phy; - struct mii_bus *mii_bus; - int link[8]; - void *sw_priv; - unsigned long vlan_map; -}; - -/* struct mtk_mac - the structure that holds the info about the MACs of the - * SoC - * @id: The number of the MAC - * @of_node: Our devicetree node - * @hw: Backpointer to our main datastruture - * @hw_stats: Packet statistics counter - * @phy_dev: The attached PHY if available - * @phy_flags: The PHYs flags - * @pending_work: The workqueue used to reset the dma ring - */ -struct mtk_mac { - int id; - struct device_node *of_node; - struct mtk_eth *hw; - struct mtk_hw_stats *hw_stats; - struct phy_device *phy_dev; - u32 phy_flags; - struct work_struct pending_work; -}; - -/* the struct describing the SoC. these are declared in the soc_xyz.c files */ -extern const struct of_device_id of_mtk_match[]; - -/* read the hardware status register */ -void mtk_stats_update_mac(struct mtk_mac *mac); - -/* default checksum setup handler */ -void mtk_reset(struct mtk_eth *eth, u32 reset_bits); - -/* register i/o wrappers */ -void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg); -u32 mtk_r32(struct mtk_eth *eth, unsigned int reg); - -/* default clock calibration handler */ -int mtk_set_clock_cycle(struct mtk_eth *eth); - -/* default checksum setup handler */ -void mtk_csum_config(struct mtk_eth *eth); - -/* default forward config handler */ -void mtk_fwd_config(struct mtk_eth *eth); - -#endif /* MTK_ETH_H */ diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c deleted file mode 100644 index 5d63b5d96f6b..000000000000 --- a/drivers/staging/mt7621-eth/soc_mt7621.c +++ /dev/null @@ -1,161 +0,0 @@ -/* This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> - * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/if_vlan.h> -#include <linux/of_net.h> - -#include <asm/mach-ralink/ralink_regs.h> - -#include "mtk_eth_soc.h" -#include "gsw_mt7620.h" -#include "mdio.h" - -#define MT7620_CDMA_CSG_CFG 0x400 -#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00) -#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04) -#define MT7621_RESET_FE BIT(6) -#define MT7621_L4_VALID BIT(24) - -#define MT7621_TX_DMA_UDF BIT(19) - -#define CDMA_ICS_EN BIT(2) -#define CDMA_UCS_EN BIT(1) -#define CDMA_TCS_EN BIT(0) - -#define GDMA_ICS_EN BIT(22) -#define GDMA_TCS_EN BIT(21) -#define GDMA_UCS_EN BIT(20) - -/* frame engine counters */ -#define MT7621_REG_MIB_OFFSET 0x2000 -#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00) -#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400) -#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40) - -#define GSW_REG_GDMA1_MAC_ADRL 0x508 -#define GSW_REG_GDMA1_MAC_ADRH 0x50C -#define GSW_REG_GDMA2_MAC_ADRL 0x1508 -#define GSW_REG_GDMA2_MAC_ADRH 0x150C - -#define MT7621_MTK_RST_GL 0x04 -#define MT7620_MTK_INT_STATUS2 0x08 - -/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29) - * but after test it should be BIT(13). - */ -#define MT7621_MTK_GDM1_AF BIT(28) -#define MT7621_MTK_GDM2_AF BIT(29) - -static const u16 mt7621_reg_table[MTK_REG_COUNT] = { - [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, - [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, - [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, - [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, - [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, - [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, - [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0, - [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, - [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, - [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, - [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0, - [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE, - [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS, - [MTK_REG_MTK_DMA_VID_BASE] = 0, - [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT, - [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL, - [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2, -}; - -static void mt7621_mtk_reset(struct mtk_eth *eth) -{ - mtk_reset(eth, MT7621_RESET_FE); -} - -static int mt7621_fwd_config(struct mtk_eth *eth) -{ - /* Setup GMAC1 only, there is no support for GMAC2 yet */ - mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff, - MT7620_GDMA1_FWD_CFG); - - /* Enable RX checksum */ - mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN | - GDMA_TCS_EN | GDMA_UCS_EN), - MT7620_GDMA1_FWD_CFG); - - /* Enable RX VLan Offloading */ - mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL); - - return 0; -} - -static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr) -{ - unsigned long flags; - - spin_lock_irqsave(&mac->hw->page_lock, flags); - if (mac->id == 0) { - mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], - GSW_REG_GDMA1_MAC_ADRH); - mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) | - (hwaddr[4] << 8) | hwaddr[5], - GSW_REG_GDMA1_MAC_ADRL); - } - if (mac->id == 1) { - mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1], - GSW_REG_GDMA2_MAC_ADRH); - mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) | - (hwaddr[4] << 8) | hwaddr[5], - GSW_REG_GDMA2_MAC_ADRL); - } - spin_unlock_irqrestore(&mac->hw->page_lock, flags); -} - -static struct mtk_soc_data mt7621_data = { - .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_IPV6_CSUM, - .dma_type = MTK_PDMA, - .dma_ring_size = 256, - .napi_weight = 64, - .new_stats = 1, - .padding_64b = 1, - .rx_2b_offset = 1, - .rx_sg_dma = 1, - .has_switch = 1, - .mac_count = 2, - .reset_fe = mt7621_mtk_reset, - .set_mac = mt7621_set_mac, - .fwd_config = mt7621_fwd_config, - .switch_init = mtk_gsw_init, - .reg_table = mt7621_reg_table, - .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS, - .rx_int = RT5350_RX_DONE_INT, - .tx_int = RT5350_TX_DONE_INT, - .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF, - .checksum_bit = MT7621_L4_VALID, - .has_carrier = mt7620_has_carrier, - .mdio_read = mt7620_mdio_read, - .mdio_write = mt7620_mdio_write, - .mdio_adjust_link = mt7620_mdio_link_adjust, -}; - -const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data }, - {}, -}; - -MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig index d33533872a16..c8fa17cfa807 100644 --- a/drivers/staging/mt7621-pci/Kconfig +++ b/drivers/staging/mt7621-pci/Kconfig @@ -1,6 +1,7 @@ config PCI_MT7621 tristate "MediaTek MT7621 PCI Controller" depends on RALINK + depends on PCI select PCI_DRIVERS_GENERIC help This selects a driver for the MediaTek MT7621 PCI Controller. diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index d6248eecf123..2aee64fdaec5 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) goto no_phy; phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + priv->phy_mode); of_node_put(phy_node); if (!phydev) diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index ce61c5670ef6..986db76705cc 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip, return np; } -static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port) +static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface, + int port) { + struct device_node *np = priv->of_node; u32 delay_value; + bool rx_delay; + bool tx_delay; - if (!of_property_read_u32(np, "rx-delay", &delay_value)) + /* By default, both RX/TX delay is enabled in + * __cvmx_helper_rgmii_enable(). + */ + rx_delay = true; + tx_delay = true; + + if (!of_property_read_u32(np, "rx-delay", &delay_value)) { cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); - if (!of_property_read_u32(np, "tx-delay", &delay_value)) + rx_delay = delay_value > 0; + } + if (!of_property_read_u32(np, "tx-delay", &delay_value)) { cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); + tx_delay = delay_value > 0; + } + + if (!rx_delay && !tx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + else if (!rx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID; + else if (!tx_delay) + priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID; + else + priv->phy_mode = PHY_INTERFACE_MODE_RGMII; } static int cvm_oct_probe(struct platform_device *pdev) @@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev) priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; + priv->phy_mode = PHY_INTERFACE_MODE_NA; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); @@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev) break; case CVMX_HELPER_INTERFACE_MODE_SGMII: + priv->phy_mode = PHY_INTERFACE_MODE_SGMII; dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; @@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev) strcpy(dev->name, "spi%d"); break; - case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: + priv->phy_mode = PHY_INTERFACE_MODE_GMII; + dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; + strcpy(dev->name, "eth%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_RGMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); - cvm_set_rgmii_delay(priv->of_node, interface, + cvm_set_rgmii_delay(priv, interface, port_index); break; } diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index 4a07e7f43d12..be570d33685a 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -12,7 +12,7 @@ #define OCTEON_ETHERNET_H #include <linux/of.h> - +#include <linux/phy.h> #include <asm/octeon/cvmx-helper-board.h> /** @@ -33,6 +33,8 @@ struct octeon_ethernet { * cvmx_helper_interface_mode_t */ int imode; + /* PHY mode */ + phy_interface_t phy_mode; /* List of outstanding tx buffers per queue */ struct sk_buff_head tx_free_list[16]; unsigned int last_speed; diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c index 80b8d4153414..a54286498a47 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c @@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon) { unsigned char lob; int ret, i; - struct dcon_gpio *pin = &gpios_asis[0]; + const struct dcon_gpio *pin = &gpios_asis[0]; for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index 1723a47a96b4..952f2ab51347 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c @@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - rtw_alloc_hwxmits(padapter); + res = rtw_alloc_hwxmits(padapter); + if (res == _FAIL) + goto exit; rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i++) @@ -1503,7 +1505,7 @@ exit: return res; } -void rtw_alloc_hwxmits(struct adapter *padapter) +s32 rtw_alloc_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; @@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, sizeof(struct hw_xmit), GFP_KERNEL); + if (!pxmitpriv->hwxmits) + return _FAIL; hwxmits = pxmitpriv->hwxmits; @@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; hwxmits[2] .sta_queue = &pxmitpriv->be_pending; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; + return _SUCCESS; } void rtw_free_hwxmits(struct adapter *padapter) diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h index 788f59c74ea1..ba7e15fbde72 100644 --- a/drivers/staging/rtl8188eu/include/rtw_xmit.h +++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h @@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); -void rtw_alloc_hwxmits(struct adapter *padapter); +s32 rtw_alloc_hwxmits(struct adapter *padapter); void rtw_free_hwxmits(struct adapter *padapter); s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index 1920d02f7c9f..8c36acedf507 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) { - u32 val; - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - if (pcmd->rsp && pcmd->rspsz > 0) - memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); + r8712_free_cmd_obj(pcmd); return H2C_SUCCESS; } diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h index 92fb77666d44..1ef86b8c592f 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.h +++ b/drivers/staging/rtl8712/rtl8712_cmd.h @@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd { static struct _cmd_callback cmd_callback[] = { {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ {GEN_CMD_CODE(_Write_MACREG), NULL}, - {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, + {GEN_CMD_CODE(_Read_BBREG), NULL}, {GEN_CMD_CODE(_Write_BBREG), NULL}, {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 094d61bcb469..b87f13a0b563 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) } } - rtw_alloc_hwxmits(padapter); + res = rtw_alloc_hwxmits(padapter); + if (res == _FAIL) + goto exit; rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i++) { @@ -2144,7 +2146,7 @@ exit: return res; } -void rtw_alloc_hwxmits(struct adapter *padapter) +s32 rtw_alloc_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; @@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); - if (pxmitpriv->hwxmits == NULL) { - DBG_871X("alloc hwxmits fail!...\n"); - return; - } + if (!pxmitpriv->hwxmits) + return _FAIL; hwxmits = pxmitpriv->hwxmits; @@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) } - + return _SUCCESS; } void rtw_free_hwxmits(struct adapter *padapter) diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h index 1b38b9182b31..37f42b2f22f1 100644 --- a/drivers/staging/rtl8723bs/include/rtw_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h @@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); -void rtw_alloc_hwxmits(struct adapter *padapter); +s32 rtw_alloc_hwxmits(struct adapter *padapter); void rtw_free_hwxmits(struct adapter *padapter); diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c index 9930ed954abb..4cc77b2016e1 100644 --- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c +++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c @@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv, rtlpriv->phydm.internal = kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); + if (!rtlpriv->phydm.internal) + return 0; _rtl_phydm_init_com_info(rtlpriv, ic, params); diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index f061dd1382aa..cf6b7a80b753 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c @@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1_rsvd_page_loc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index edff6ce85655..9d85a3a1af4c 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, return -EINVAL; spin_lock_irqsave(&speakup_info.spinlock, flags); + synth_soft.alive = 1; while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); - if (!unicode) - synth_buffer_skip_nonlatin1(); - if (!synth_buffer_empty() || speakup_info.flushing) - break; + if (synth_current() == &synth_soft) { + if (!unicode) + synth_buffer_skip_nonlatin1(); + if (!synth_buffer_empty() || speakup_info.flushing) + break; + } spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (fp->f_flags & O_NONBLOCK) { finish_wait(&speakup_event, &wait); @@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ while (chars_sent <= count - bytes_per_ch) { + if (synth_current() != &synth_soft) + break; if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; @@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait) poll_wait(fp, &speakup_event, wait); spin_lock_irqsave(&speakup_info.spinlock, flags); - if (!synth_buffer_empty() || speakup_info.flushing) + if (synth_current() == &synth_soft && + (!synth_buffer_empty() || speakup_info.flushing)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&speakup_info.spinlock, flags); return ret; diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index c8e688878fc7..ac6a74883af4 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h @@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n); int synth_release_region(unsigned long start, unsigned long n); int synth_add(struct spk_synth *in_synth); void synth_remove(struct spk_synth *in_synth); +struct spk_synth *synth_current(void); extern struct speakup_info_t speakup_info; diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 25f259ee4ffc..3568bfb89912 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth) } EXPORT_SYMBOL_GPL(synth_remove); +struct spk_synth *synth_current(void) +{ + return synth; +} +EXPORT_SYMBOL_GPL(synth_current); + short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 804daf83be35..064d0db4c51e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev) struct device_node *fw_node; const struct of_device_id *of_id; struct vchiq_drvdata *drvdata; + struct device *vchiq_dev; int err; of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); @@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev) goto failed_platform_init; } - if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, - NULL, "vchiq"))) + vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL, + "vchiq"); + if (IS_ERR(vchiq_dev)) { + err = PTR_ERR(vchiq_dev); goto failed_device_create; + } vchiq_debugfs_init(); diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b370985b58a1..c6bb4aaf9bd0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) return; } - MACvIntDisable(priv->PortOffset); - spin_lock_irqsave(&priv->lock, flags); /* Read low level stats */ @@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) } spin_unlock_irqrestore(&priv->lock, flags); - - MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); } static void vnt_interrupt_work(struct work_struct *work) @@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work) if (priv->vif) vnt_interrupt_process(priv); + + MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); } static irqreturn_t vnt_interrupt(int irq, void *arg) { struct vnt_private *priv = arg; - if (priv->vif) - schedule_work(&priv->interrupt_work); + schedule_work(&priv->interrupt_work); + + MACvIntDisable(priv->PortOffset); return IRQ_HANDLED; } diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 720760cd493f..ba39647a690c 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = { static void bcm2835_thermal_debugfs(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); struct debugfs_regset32 *regset; data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); @@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) data->tz = tz; - platform_set_drvdata(pdev, tz); + platform_set_drvdata(pdev, data); /* * Thermal_zone doesn't enable hwmon as default, @@ -290,8 +289,8 @@ err_clk: static int bcm2835_thermal_remove(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tz; debugfs_remove_recursive(data->debugfsdir); thermal_zone_of_sensor_unregister(&pdev->dev, tz); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6fff16113628..f7c1f49ec87f 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 power, unsigned long *state) { - unsigned int cur_freq, target_freq; + unsigned int target_freq; u32 last_load, normalised_power; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; - cur_freq = cpufreq_quick_get(policy->cpu); power = power > 0 ? power : 0; last_load = cpufreq_cdev->last_load ?: 1; normalised_power = (power * 100) / last_load; diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 61ca7ce3624e..5f3ed24e26ec 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -22,6 +22,13 @@ enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, INT3400_THERMAL_ACTIVE, INT3400_THERMAL_CRITICAL, + INT3400_THERMAL_ADAPTIVE_PERFORMANCE, + INT3400_THERMAL_EMERGENCY_CALL_MODE, + INT3400_THERMAL_PASSIVE_2, + INT3400_THERMAL_POWER_BOSS, + INT3400_THERMAL_VIRTUAL_SENSOR, + INT3400_THERMAL_COOLING_MODE, + INT3400_THERMAL_HARDWARE_DUTY_CYCLING, INT3400_THERMAL_MAXIMUM_UUID, }; @@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "3A95C389-E4B8-4629-A526-C52C88626BAE", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", + "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", + "5349962F-71E6-431D-9AE8-0A635B710AEE", + "9E04115A-AE87-4D1C-9500-0F3E340BFE75", + "F5A35014-C209-46A4-993A-EB56DE7530A1", + "6ED722A7-9240-48A5-B479-31EEF723D7CF", + "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531", + "BE84BABF-C4D4-403D-B495-3128FD44dAC1", }; struct int3400_thermal_priv { @@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { - int3400_thermal_ops.get_mode = int3400_thermal_get_mode; - int3400_thermal_ops.set_mode = int3400_thermal_set_mode; - } + int3400_thermal_ops.get_mode = int3400_thermal_get_mode; + int3400_thermal_ops.set_mode = int3400_thermal_set_mode; + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 7571f7c2e7c9..ac7256b5f020 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -101,7 +101,7 @@ struct powerclamp_worker_data { bool clamping; }; -static struct powerclamp_worker_data * __percpu worker_data; +static struct powerclamp_worker_data __percpu *worker_data; static struct thermal_cooling_device *cooling_dev; static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu * clamping kthread worker @@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu) struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); struct kthread_worker *worker; - worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu); if (IS_ERR(worker)) return; diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 5c07a61447d3..e4ea7f6aef20 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -199,6 +199,9 @@ enum { #define MT7622_TS1 0 #define MT7622_NUM_CONTROLLER 1 +/* The maximum number of banks */ +#define MAX_NUM_ZONES 8 + /* The calibration coefficient of sensor */ #define MT7622_CALIBRATION 165 @@ -249,7 +252,7 @@ struct mtk_thermal_data { const int num_controller; const int *controller_offset; bool need_switch_bank; - struct thermal_bank_cfg bank_data[]; + struct thermal_bank_cfg bank_data[MAX_NUM_ZONES]; }; struct mtk_thermal { @@ -268,7 +271,7 @@ struct mtk_thermal { s32 vts[MAX_NUM_VTS]; const struct mtk_thermal_data *conf; - struct mtk_thermal_bank banks[]; + struct mtk_thermal_bank banks[MAX_NUM_ZONES]; }; /* MT8183 thermal sensor data */ diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 48eef552cba4..fc9399d9c082 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp) struct exynos_tmu_data *data = p; int value, ret = 0; - if (!data || !data->tmu_read || !data->enabled) + if (!data || !data->tmu_read) return -EINVAL; else if (!data->enabled) /* diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index db5df3d54818..3bdd56a1021b 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -49,11 +49,6 @@ struct ar933x_uart_port { struct clk *clk; }; -static inline bool ar933x_uart_console_enabled(void) -{ - return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE); -} - static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, int offset) { @@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = { .verify_port = ar933x_uart_verify_port, }; +#ifdef CONFIG_SERIAL_AR933X_CONSOLE static struct ar933x_uart_port * ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; @@ -604,14 +600,7 @@ static struct console ar933x_uart_console = { .index = -1, .data = &ar933x_uart_driver, }; - -static void ar933x_uart_add_console_port(struct ar933x_uart_port *up) -{ - if (!ar933x_uart_console_enabled()) - return; - - ar933x_console_ports[up->port.line] = up; -} +#endif /* CONFIG_SERIAL_AR933X_CONSOLE */ static struct uart_driver ar933x_uart_driver = { .owner = THIS_MODULE, @@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev) baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); - ar933x_uart_add_console_port(up); +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_console_ports[up->port.line] = up; +#endif ret = uart_add_one_port(&ar933x_uart_driver, &up->port); if (ret) @@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void) { int ret; - if (ar933x_uart_console_enabled()) - ar933x_uart_driver.cons = &ar933x_uart_console; +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_uart_driver.cons = &ar933x_uart_console; +#endif ret = uart_register_driver(&ar933x_uart_driver); if (ret) diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 05147fe24343..0b4f36905321 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -166,6 +166,8 @@ struct atmel_uart_port { unsigned int pending_status; spinlock_t lock_suspended; + bool hd_start_rx; /* can start RX during half-duplex operation */ + /* ISO7816 */ unsigned int fidi_min; unsigned int fidi_max; @@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value) __raw_writeb(value, port->membase + ATMEL_US_THR); } +static inline int atmel_uart_is_half_duplex(struct uart_port *port) +{ + return ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || + (port->iso7816.flags & SER_ISO7816_ENABLED); +} + #ifdef CONFIG_SERIAL_ATMEL_PDC static bool atmel_use_pdc_rx(struct uart_port *port) { @@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port) /* Disable interrupts */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); - if (((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || - port->iso7816.flags & SER_ISO7816_ENABLED) + if (atmel_uart_is_half_duplex(port)) atmel_start_rx(port); + } /* @@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port) return; if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) - if (((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || - port->iso7816.flags & SER_ISO7816_ENABLED) + if (atmel_uart_is_half_duplex(port)) atmel_stop_rx(port); if (atmel_use_pdc_tx(port)) @@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg) */ if (!uart_circ_empty(xmit)) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); - else if (((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || - port->iso7816.flags & SER_ISO7816_ENABLED) { - /* DMA done, stop TX, start RX for RS485 */ - atmel_start_rx(port); + else if (atmel_uart_is_half_duplex(port)) { + /* + * DMA done, re-enable TXEMPTY and signal that we can stop + * TX and start RX for RS485 + */ + atmel_port->hd_start_rx = true; + atmel_uart_writel(port, ATMEL_US_IER, + atmel_port->tx_done_mask); } spin_unlock_irqrestore(&port->lock, flags); @@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port) sg_dma_len(&atmel_port->sg_rx)/2, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(port->dev, "Preparing DMA cyclic failed\n"); + goto chan_err; + } desc->callback = atmel_complete_rx_dma; desc->callback_param = port; atmel_port->desc_rx = desc; @@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & atmel_port->tx_done_mask) { - /* Either PDC or interrupt transmission */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + + /* Start RX if flag was set and FIFO is empty */ + if (atmel_port->hd_start_rx) { + if (!(atmel_uart_readl(port, ATMEL_US_CSR) + & ATMEL_US_TXEMPTY)) + dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); + + atmel_port->hd_start_rx = false; + atmel_start_rx(port); + return; + } + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); } } @@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); } else { - if (((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || - port->iso7816.flags & SER_ISO7816_ENABLED) { + if (atmel_uart_is_half_duplex(port)) { /* DMA done, stop TX, start RX for RS485 */ atmel_start_rx(port); } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 6fb312e7af71..bfe5e9e034ec 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -148,8 +148,10 @@ static int configure_kgdboc(void) char *cptr = config; struct console *cons; - if (!strlen(config) || isspace(config[0])) + if (!strlen(config) || isspace(config[0])) { + err = 0; goto noconfig; + } kgdboc_io_ops.is_console = 0; kgdb_tty_driver = NULL; diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index f5bdde405627..450ba6d7996c 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi) if (spi->dev.of_node) { const struct of_device_id *of_id = of_match_device(max310x_dt_ids, &spi->dev); + if (!of_id) + return -ENODEV; devtype = (struct max310x_devtype *)of_id->data; } else { diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 231f751d1ef4..7e7b1559fa36 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev) return -EINVAL; } + if (!match) + return -ENODEV; + /* Assume that all UART ports have a DT alias or none has */ id = of_alias_get_id(pdev->dev.of_node, "serial"); if (!pdev->dev.of_node || id < 0) diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 27235a526cce..4c188f4079b3 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev) s->port.mapbase = r->start; s->port.membase = ioremap(r->start, resource_size(r)); + if (!s->port.membase) { + ret = -ENOMEM; + goto out_disable_clks; + } s->port.ops = &mxs_auart_ops; s->port.iotype = UPIO_MEM; s->port.fifosize = MXS_AUART_FIFO_SIZE; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 3bcec1c20219..35e5f9c5d5be 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; - int baud; + int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 635178cf3eed..a31db15cd7c0 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void) ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); if (ret < 0) { pr_err("failed to init sc16is7xx i2c --> %d\n", ret); - return ret; + goto err_i2c; } #endif @@ -1515,10 +1515,20 @@ static int __init sc16is7xx_init(void) ret = spi_register_driver(&sc16is7xx_spi_uart_driver); if (ret < 0) { pr_err("failed to init sc16is7xx spi --> %d\n", ret); - return ret; + goto err_spi; } #endif return ret; + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI +err_spi: +#endif +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C + i2c_del_driver(&sc16is7xx_i2c_uart_driver); +err_i2c: +#endif + uart_unregister_driver(&sc16is7xx_uart); + return ret; } module_init(sc16is7xx_init); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 060fcd42b6d5..3cd139752d3f 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (uart_circ_empty(xmit)) sci_stop_tx(port); - } else { - ctrl = serial_port_in(port, SCSCR); - - if (port->type != PORT_SCI) { - serial_port_in(port, SCxSR); /* Dummy read */ - sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); - } - ctrl |= SCSCR_TIE; - serial_port_out(port, SCSCR, ctrl); - } } /* On SH3, SCIF may read end-of-break as a space->mark char */ @@ -2522,14 +2512,16 @@ done: * center of the last stop bit in sampling clocks. */ int last_stop = bits * 2 - 1; - int deviation = min_err * srr * last_stop / 2 / baud; + int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * + (int)(srr + 1), + 2 * (int)baud); if (abs(deviation) >= 2) { /* At least two sampling clocks off at the * last stop bit; we can increase the error * margin by shifting the sampling point. */ - int shift = min(-8, max(7, deviation / 2)); + int shift = clamp(deviation / 2, -8, 7); hssrr |= (shift << HSCIF_SRHP_SHIFT) & HSCIF_SRHP_MASK; diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 044c3cbdcfa4..a9e12b3bc31d 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty) if (tty && C_HUPCL(tty)) tty_port_lower_dtr_rts(port); - if (port->ops->shutdown) + if (port->ops && port->ops->shutdown) port->ops->shutdown(port); } out: @@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup); */ int tty_port_carrier_raised(struct tty_port *port) { - if (port->ops->carrier_raised == NULL) + if (!port->ops || !port->ops->carrier_raised) return 1; return port->ops->carrier_raised(port); } @@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised); */ void tty_port_raise_dtr_rts(struct tty_port *port) { - if (port->ops->dtr_rts) + if (port->ops && port->ops->dtr_rts) port->ops->dtr_rts(port, 1); } EXPORT_SYMBOL(tty_port_raise_dtr_rts); @@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts); */ void tty_port_lower_dtr_rts(struct tty_port *port) { - if (port->ops->dtr_rts) + if (port->ops && port->ops->dtr_rts) port->ops->dtr_rts(port, 0); } EXPORT_SYMBOL(tty_port_lower_dtr_rts); @@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty, if (!tty_port_initialized(port)) { clear_bit(TTY_IO_ERROR, &tty->flags); - if (port->ops->activate) { + if (port->ops && port->ops->activate) { int retval = port->ops->activate(port, tty); if (retval) { mutex_unlock(&port->mutex); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index d34984aa646d..650c66886c80 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar) return; } scr_memsetw(start, vc->vc_video_erase_char, 2 * count); - update_region(vc, (unsigned long) start, count); + if (con_should_update(vc)) + do_update_region(vc, (unsigned long) start, count); vc->vc_need_wrap = 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 739f8960811a..ec666eb4b7b4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work) clear_bit(EVENT_RX_STALL, &acm->flags); } - if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { + if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) tty_port_tty_wakeup(&acm->port); - clear_bit(EVENT_TTY_WAKEUP, &acm->flags); - } } /* diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 48277bbc15e4..73c8e6591746 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) do { controller = of_find_node_with_property(controller, "phys"); + if (!of_device_is_available(controller)) + continue; index = 0; do { if (arg0 == -1) { diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 8987cec9549d..ebcadaad89d1 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); - /* Undo any residual pm_autopm_get_interface_* calls */ - for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r) - usb_autopm_put_interface_no_suspend(intf); - atomic_set(&intf->pm_usage_cnt, 0); - if (!error) usb_autosuspend_device(udev); @@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf) int status; usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); status = pm_runtime_put_sync(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), @@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf) int status; usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); status = pm_runtime_put(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), @@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf) struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); pm_runtime_put_noidle(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend); @@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf) status = pm_runtime_get_sync(&intf->dev); if (status < 0) pm_runtime_put_sync(&intf->dev); - else - atomic_inc(&intf->pm_usage_cnt); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); @@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf) status = pm_runtime_get(&intf->dev); if (status < 0 && status != -EINPROGRESS) pm_runtime_put_noidle(&intf->dev); - else - atomic_inc(&intf->pm_usage_cnt); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); @@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf) struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); - atomic_inc(&intf->pm_usage_cnt); pm_runtime_get_noresume(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3189181bb628..975d7c1288e3 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd, retval = usb_phy_roothub_set_mode(hcd->phy_roothub, PHY_MODE_USB_HOST_SS); if (retval) + retval = usb_phy_roothub_set_mode(hcd->phy_roothub, + PHY_MODE_USB_HOST); + if (retval) goto err_usb_phy_roothub_power_on; retval = usb_phy_roothub_power_on(hcd->phy_roothub); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 82239f27c4cc..e844bb7b5676 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; - if (size <= 0 || !buf || !index) + if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; + if (index <= 0 || index >= 256) + return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index fdc6e4e403e8..8cced3609e24 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -29,6 +29,7 @@ #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 +#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e @@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 75b113a5b25c..f3816a5c861e 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -391,20 +391,20 @@ try_again: req->complete = f_hidg_req_complete; req->context = hidg; + spin_unlock_irqrestore(&hidg->write_spinlock, flags); + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); - goto release_write_pending_unlocked; + goto release_write_pending; } else { status = count; } - spin_unlock_irqrestore(&hidg->write_spinlock, flags); return status; release_write_pending: spin_lock_irqsave(&hidg->write_spinlock, flags); -release_write_pending_unlocked: hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index baf72f95f0f1..213b52508621 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g, struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; - if (driver->max_speed == USB_SPEED_UNKNOWN) + switch (g->speed) { + /* All the speeds we support */ + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + break; + default: + dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n", + driver->max_speed); return -EINVAL; + } /* * SLAVE side init ... the layer above hardware, which @@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t) /* Bus speed is 500000 bytes/ms, so use a little less */ total = 490000; break; - default: + default: /* Can't happen */ dev_err(dummy_dev(dum_hcd), "bogus device speed\n"); - return; + total = 0; + break; } /* FIXME if HZ != 1000 this will probably misbehave ... */ @@ -1828,7 +1839,7 @@ restart: /* Used up this frame's bandwidth? */ if (total <= 0) - break; + continue; /* find the gadget's ep for this request (if configured) */ address = usb_pipeendpoint (urb->pipe); diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index b77f3126580e..c2011cd7df8c 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) break; } if (&req->req != _req) { + ep->stopped = stopped; spin_unlock_irqrestore(&ep->dev->lock, flags); return -EINVAL; } diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index f63f82450bf4..898339e5df10 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) (void) readl(&ep->dev->pci->pcimstctl); writel(BIT(DMA_START), &dma->dmastat); - - if (!ep->is_in) - stop_out_naking(ep); } static void start_dma(struct net2280_ep *ep, struct net2280_request *req) @@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req) writel(BIT(DMA_START), &dma->dmastat); return; } + stop_out_naking(ep); } tmp = dmactl_default; @@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) break; } if (&req->req != _req) { + ep->stopped = stopped; spin_unlock_irqrestore(&ep->dev->lock, flags); - dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", - __func__); + ep_dbg(ep->dev, "%s: Request mismatch\n", __func__); return -EINVAL; } diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 934584f0a20a..6343fbacd244 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void) printk(KERN_INFO "driver %s\n", hcd_name); workqueue = create_singlethread_workqueue("u132"); retval = platform_driver_register(&u132_platform_driver); + if (retval) + destroy_workqueue(workqueue); + return retval; } diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index c78be578abb0..d932cc31711e 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci) return -1; writel(0, &dbc->regs->control); - xhci_dbc_mem_cleanup(xhci); dbc->state = DS_DISABLED; return 0; @@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) ret = xhci_do_dbc_stop(xhci); spin_unlock_irqrestore(&dbc->lock, flags); - if (!ret) + if (!ret) { + xhci_dbc_mem_cleanup(xhci); pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); + } } static void diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e2eece693655..96a740543183 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd) port_index = max_ports; while (port_index--) { u32 t1, t2; - + int retries = 10; +retry: t1 = readl(ports[port_index]->addr); t2 = xhci_port_state_to_neutral(t1); portsc_buf[port_index] = 0; - /* Bail out if a USB3 port has a new device in link training */ - if ((hcd->speed >= HCD_USB3) && + /* + * Give a USB3 port in link training time to finish, but don't + * prevent suspend as port might be stuck + */ + if ((hcd->speed >= HCD_USB3) && retries-- && (t1 & PORT_PLS_MASK) == XDEV_POLLING) { - bus_state->bus_suspended = 0; spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); - return -EBUSY; + msleep(XHCI_PORT_POLLING_LFPS_TIME); + spin_lock_irqsave(&xhci->lock, flags); + xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n", + port_index); + goto retry; } - /* suspend ports in U0, or bail out for new connect changes */ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { if ((t1 & PORT_CSC) && wake_enabled) { diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index a6e463715779..671bce18782c 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) if (!xhci_rcar_wait_for_pll_active(hcd)) return -ETIMEDOUT; + xhci->quirks |= XHCI_TRUST_TX_LENGTH; return xhci_rcar_download_firmware(hcd); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 40fa25c4d041..9215a28dad40 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci, } } - if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && - DEV_SUPERSPEED_ANY(portsc)) { + if ((portsc & PORT_PLC) && + DEV_SUPERSPEED_ANY(portsc) && + ((portsc & PORT_PLS_MASK) == XDEV_U0 || + (portsc & PORT_PLS_MASK) == XDEV_U1 || + (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); - /* We've just brought the device into U0 through either the + /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device * initiated remote wake, don't pass up the link state change, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 652dc36e3012..9334cdee382a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -452,6 +452,14 @@ struct xhci_op_regs { */ #define XHCI_DEFAULT_BESL 4 +/* + * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports + * to complete link training. usually link trainig completes much faster + * so check status 10 times with 36ms sleep in places we need to wait for + * polling to complete. + */ +#define XHCI_PORT_POLLING_LFPS_TIME 36 + /** * struct xhci_intr_reg - Interrupt Register Set * @irq_pending: IMAN - Interrupt Management Register. Used to enable diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4d72b7d1d383..04684849d683 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, */ hub->port_swap = USB251XB_DEF_PORT_SWAP; of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { - if ((port >= 0) && (port <= data->port_cnt)) + if (port <= data->port_cnt) hub->port_swap |= BIT(port); } @@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub) dev); int err; - if (np) { + if (np && of_id) { err = usb251xb_get_ofdata(hub, (struct usb251xb_data *)of_id->data); if (err) { diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 6d9fd5f64903..7b306aa22d25 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface) usb_deregister_dev(interface, &yurex_class); /* prevent more I/O from starting */ + usb_poison_urb(dev->urb); mutex_lock(&dev->io_mutex); dev->interface = NULL; mutex_unlock(&dev->io_mutex); diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig index bcc23486c4ed..928c2cd6fc00 100644 --- a/drivers/usb/mtu3/Kconfig +++ b/drivers/usb/mtu3/Kconfig @@ -6,6 +6,7 @@ config USB_MTU3 tristate "MediaTek USB3 Dual Role controller" depends on USB || USB_GADGET depends on ARCH_MEDIATEK || COMPILE_TEST + depends on EXTCON || !EXTCON select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD help Say Y or M here if your system runs on MediaTek SoCs with diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fffe23ab0189..979bef9bfb6b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ + { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8f5b17471759..1d8461ae2c34 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index b863bedb55a1..5755f0df0025 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -567,7 +567,9 @@ /* * NovaTech product ids (FTDI_VID) */ -#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ +#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ +#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ /* * Synapse Wireless product ids (FTDI_VID) diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index fc52ac75fbf6..18110225d506 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, if (!urbtrack) return -ENOMEM; - kref_get(&mos_parport->ref_count); - urbtrack->mos_parport = mos_parport; urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urbtrack->urb) { kfree(urbtrack); @@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, usb_sndctrlpipe(usbdev, 0), (unsigned char *)urbtrack->setup, NULL, 0, async_complete, urbtrack); + kref_get(&mos_parport->ref_count); + urbtrack->mos_parport = mos_parport; kref_init(&urbtrack->ref_count); INIT_LIST_HEAD(&urbtrack->urblist_entry); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 11b21d9410f3..83869065b802 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 +#define QUECTEL_PRODUCT_EM12 0x0512 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */ + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) }, /* Quectel products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), @@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ .driver_info = RSVD(4) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ - { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 31b024441938..cc794e25a0b6 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t) break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: - usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); - if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) { + if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); - usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; @@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) int ret; if (working_scsi(srb)) { - usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "working scsi, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); - if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) { + if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 0f62db091d8d..a2233d72ae7c 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -37,6 +37,7 @@ S(SRC_ATTACHED), \ S(SRC_STARTUP), \ S(SRC_SEND_CAPABILITIES), \ + S(SRC_SEND_CAPABILITIES_TIMEOUT), \ S(SRC_NEGOTIATE_CAPABILITIES), \ S(SRC_TRANSITION_SUPPLY), \ S(SRC_READY), \ @@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port) /* port->hard_reset_count = 0; */ port->caps_count = 0; port->pd_capable = true; - tcpm_set_state_cond(port, hard_reset_state(port), + tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT, PD_T_SEND_SOURCE_CAP); } break; + case SRC_SEND_CAPABILITIES_TIMEOUT: + /* + * Error recovery for a PD_DATA_SOURCE_CAP reply timeout. + * + * PD 2.0 sinks are supposed to accept src-capabilities with a + * 3.0 header and simply ignore any src PDOs which the sink does + * not understand such as PPS but some 2.0 sinks instead ignore + * the entire PD_DATA_SOURCE_CAP message, causing contract + * negotiation to fail. + * + * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try + * sending src-capabilities with a lower PD revision to + * make these broken sinks work. + */ + if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) { + tcpm_set_state(port, HARD_RESET_SEND, 0); + } else if (port->negotiated_rev > PD_REV20) { + port->negotiated_rev--; + port->hard_reset_count = 0; + tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); + } else { + tcpm_set_state(port, hard_reset_state(port), 0); + } + break; case SRC_NEGOTIATE_CAPABILITIES: ret = tcpm_pd_check_request(port); if (ret < 0) { diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c index 423208e19383..6770afd40765 100644 --- a/drivers/usb/typec/tcpm/wcove.c +++ b/drivers/usb/typec/tcpm/wcove.c @@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev) wcove->dev = &pdev->dev; wcove->regmap = pmic->regmap; - irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, - platform_get_irq(pdev, 0)); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); + return irq; + } + + irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq); if (irq < 0) return irq; diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 97b09a42a10c..dbfb2f24d71e 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) } if (usb_endpoint_xfer_isoc(epd)) { - /* validate packet size and number of packets */ - unsigned int maxp, packets, bytes; - - maxp = usb_endpoint_maxp(epd); - maxp *= usb_endpoint_maxp_mult(epd); - bytes = pdu->u.cmd_submit.transfer_buffer_length; - packets = DIV_ROUND_UP(bytes, maxp); - + /* validate number of packets */ if (pdu->u.cmd_submit.number_of_packets < 0 || - pdu->u.cmd_submit.number_of_packets > packets) { + pdu->u.cmd_submit.number_of_packets > + USBIP_MAX_ISO_PACKETS) { dev_err(&sdev->udev->dev, "CMD_SUBMIT: isoc invalid num packets %d\n", pdu->u.cmd_submit.number_of_packets); diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index bf8afe9b5883..8be857a4fa13 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug; #define USBIP_DIR_OUT 0x00 #define USBIP_DIR_IN 0x01 +/* + * Arbitrary limit for the maximum number of isochronous packets in an URB, + * compare for example the uhci_submit_isochronous function in + * drivers/usb/host/uhci-q.c + */ +#define USBIP_MAX_ISO_PACKETS 1024 + /** * struct usbip_header_basic - data pertinent to every request * @command: the usbip request type diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a25659b5a5d1..3fa20e95a6bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void) rc = pci_add_dynid(&vfio_pci_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", vendor, device, subvendor, subdevice, class, class_mask, rc); else - pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", vendor, device, subvendor, subdevice, class, class_mask); } diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 8dbb270998f4..6b64e45a5269 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1398,7 +1398,7 @@ unlock_exit: mutex_unlock(&container->lock); } -const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { +static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { .name = "iommu-vfio-powerpc", .owner = THIS_MODULE, .open = tce_iommu_open, diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 73652e21efec..d0f731c9920a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, MODULE_PARM_DESC(disable_hugepages, "Disable VFIO IOMMU support for IOMMU hugepages."); +static unsigned int dma_entry_limit __read_mostly = U16_MAX; +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); +MODULE_PARM_DESC(dma_entry_limit, + "Maximum number of user DMA mappings per container (65535)."); + struct vfio_iommu { struct list_head domain_list; struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; struct blocking_notifier_head notifier; + unsigned int dma_avail; bool v2; bool nesting; }; @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); kfree(dma); + iommu->dma_avail++; } static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; } + if (!iommu->dma_avail) { + ret = -ENOSPC; + goto out_unlock; + } + dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { ret = -ENOMEM; goto out_unlock; } + iommu->dma_avail--; dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) INIT_LIST_HEAD(&iommu->domain_list); iommu->dma_list = RB_ROOT; + iommu->dma_avail = dma_entry_limit; mutex_init(&iommu->lock); BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5ace833de746..351af88231ad 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) { - struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); + struct vhost_umem_node *tmp, *node; + if (!size) + return -EFAULT; + + node = kmalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index df7d09409efe..8ca333f21292 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -27,6 +27,10 @@ #define GUEST_MAPPINGS_TRIES 5 +#define VBG_KERNEL_REQUEST \ + (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \ + VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN) + /** * Reserves memory in which the VMM can relocate any guest mappings * that are floating around. @@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev) int i, rc; /* Query the required space. */ - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO, + VBG_KERNEL_REQUEST); if (!req) return; @@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev) * Tell the host that we're going to free the memory we reserved for * it, the free it up. (Leak the memory if anything goes wrong here.) */ - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO, + VBG_KERNEL_REQUEST); if (!req) return; @@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) struct vmmdev_guest_info2 *req2 = NULL; int rc, ret = -ENOMEM; - req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); - req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); + req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO, + VBG_KERNEL_REQUEST); + req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2, + VBG_KERNEL_REQUEST); if (!req1 || !req2) goto out_free; @@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev) req2->additions_minor = VBG_VERSION_MINOR; req2->additions_build = VBG_VERSION_BUILD; req2->additions_revision = VBG_SVN_REV; - /* (no features defined yet) */ - req2->additions_features = 0; + req2->additions_features = + VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO; strlcpy(req2->name, VBG_VERSION_STRING, sizeof(req2->name)); @@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) struct vmmdev_guest_status *req; int rc; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; @@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) struct vmmdev_heartbeat *req; int rc; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; @@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev) gdev->guest_heartbeat_req = vbg_req_alloc( sizeof(*gdev->guest_heartbeat_req), - VMMDEVREQ_GUEST_HEARTBEAT); + VMMDEVREQ_GUEST_HEARTBEAT, + VBG_KERNEL_REQUEST); if (!gdev->guest_heartbeat_req) return -ENOMEM; @@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, struct vmmdev_mask *req; int rc; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; @@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev, u32 changed, previous; int rc, ret = 0; - /* Allocate a request buffer before taking the spinlock */ - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); + /* + * Allocate a request buffer before taking the spinlock, when + * the session is being terminated the requestor is the kernel, + * as we're cleaning up. + */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, + session_termination ? VBG_KERNEL_REQUEST : + session->requestor); if (!req) { if (!session_termination) return -ENOMEM; @@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) struct vmmdev_mask *req; int rc; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; @@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, u32 changed, previous; int rc, ret = 0; - /* Allocate a request buffer before taking the spinlock */ - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); + /* + * Allocate a request buffer before taking the spinlock, when + * the session is being terminated the requestor is the kernel, + * as we're cleaning up. + */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, + session_termination ? VBG_KERNEL_REQUEST : + session->requestor); if (!req) { if (!session_termination) return -ENOMEM; @@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev) struct vmmdev_host_version *req; int rc, ret; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; @@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) gdev->mem_balloon.get_req = vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), - VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); + VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ, + VBG_KERNEL_REQUEST); gdev->mem_balloon.change_req = vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), - VMMDEVREQ_CHANGE_MEMBALLOON); + VMMDEVREQ_CHANGE_MEMBALLOON, + VBG_KERNEL_REQUEST); gdev->cancel_req = vbg_req_alloc(sizeof(*(gdev->cancel_req)), - VMMDEVREQ_HGCM_CANCEL2); + VMMDEVREQ_HGCM_CANCEL2, + VBG_KERNEL_REQUEST); gdev->ack_events_req = vbg_req_alloc(sizeof(*gdev->ack_events_req), - VMMDEVREQ_ACKNOWLEDGE_EVENTS); + VMMDEVREQ_ACKNOWLEDGE_EVENTS, + VBG_KERNEL_REQUEST); gdev->mouse_status_req = vbg_req_alloc(sizeof(*gdev->mouse_status_req), - VMMDEVREQ_GET_MOUSE_STATUS); + VMMDEVREQ_GET_MOUSE_STATUS, + VBG_KERNEL_REQUEST); if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || !gdev->cancel_req || !gdev->ack_events_req || @@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev) * vboxguest_linux.c calls this when userspace opens the char-device. * Return: A pointer to the new session or an ERR_PTR on error. * @gdev: The Guest extension device. - * @user: Set if this is a session for the vboxuser device. + * @requestor: VMMDEV_REQUESTOR_* flags */ -struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) +struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) { struct vbg_session *session; @@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) return ERR_PTR(-ENOMEM); session->gdev = gdev; - session->user_session = user; + session->requestor = requestor; return session; } @@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session) if (!session->hgcm_client_ids[i]) continue; - vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); + /* requestor is kernel here, as we're cleaning up. */ + vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST, + session->hgcm_client_ids[i], &rc); } kfree(session); @@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, return -EPERM; } - if (trusted_apps_only && session->user_session) { + if (trusted_apps_only && + (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) { vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", req->request_type); return -EPERM; @@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, if (i >= ARRAY_SIZE(session->hgcm_client_ids)) return -EMFILE; - ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, - &conn->hdr.rc); + ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc, + &client_id, &conn->hdr.rc); mutex_lock(&gdev->session_mutex); if (ret == 0 && conn->hdr.rc >= 0) { @@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, if (i >= ARRAY_SIZE(session->hgcm_client_ids)) return -EINVAL; - ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); + ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id, + &disconn->hdr.rc); mutex_lock(&gdev->session_mutex); if (ret == 0 && disconn->hdr.rc >= 0) @@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, } if (IS_ENABLED(CONFIG_COMPAT) && f32bit) - ret = vbg_hgcm_call32(gdev, client_id, + ret = vbg_hgcm_call32(gdev, session->requestor, client_id, call->function, call->timeout_ms, VBG_IOCTL_HGCM_CALL_PARMS32(call), call->parm_count, &call->hdr.rc); else - ret = vbg_hgcm_call(gdev, client_id, + ret = vbg_hgcm_call(gdev, session->requestor, client_id, call->function, call->timeout_ms, VBG_IOCTL_HGCM_CALL_PARMS(call), call->parm_count, &call->hdr.rc); @@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, } static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, + struct vbg_session *session, struct vbg_ioctl_write_coredump *dump) { struct vmmdev_write_core_dump *req; @@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) return -EINVAL; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP, + session->requestor); if (!req) return -ENOMEM; @@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) case VBG_IOCTL_CHECK_BALLOON: return vbg_ioctl_check_balloon(gdev, data); case VBG_IOCTL_WRITE_CORE_DUMP: - return vbg_ioctl_write_core_dump(gdev, data); + return vbg_ioctl_write_core_dump(gdev, session, data); } /* Variable sized requests. */ @@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) struct vmmdev_mouse_status *req; int rc; - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS, + VBG_KERNEL_REQUEST); if (!req) return -ENOMEM; diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 7ad9ec45bfa9..4188c12b839f 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -154,15 +154,15 @@ struct vbg_session { * host. Protected by vbg_gdev.session_mutex. */ u32 guest_caps; - /** Does this session belong to a root process or a user one? */ - bool user_session; + /** VMMDEV_REQUESTOR_* flags */ + u32 requestor; /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ bool cancel_waiters; }; int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); void vbg_core_exit(struct vbg_dev *gdev); -struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); +struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor); void vbg_core_close_session(struct vbg_session *session); int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); @@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id); void vbg_linux_mouse_event(struct vbg_dev *gdev); /* Private (non exported) functions form vboxguest_utils.c */ -void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, + u32 requestor); void vbg_req_free(void *req, size_t len); int vbg_req_perform(struct vbg_dev *gdev, void *req); int vbg_hgcm_call32( - struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, - struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, - int *vbox_status); + struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, + u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, + u32 parm_count, int *vbox_status); #endif diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e2a9619192d..6e8c0f1c1056 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -5,6 +5,7 @@ * Copyright (C) 2006-2016 Oracle Corporation */ +#include <linux/cred.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/miscdevice.h> @@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex); /** Global vbg_gdev pointer used by vbg_get/put_gdev. */ static struct vbg_dev *vbg_gdev; +static u32 vbg_misc_device_requestor(struct inode *inode) +{ + u32 requestor = VMMDEV_REQUESTOR_USERMODE | + VMMDEV_REQUESTOR_CON_DONT_KNOW | + VMMDEV_REQUESTOR_TRUST_NOT_GIVEN; + + if (from_kuid(current_user_ns(), current->cred->uid) == 0) + requestor |= VMMDEV_REQUESTOR_USR_ROOT; + else + requestor |= VMMDEV_REQUESTOR_USR_USER; + + if (in_egroup_p(inode->i_gid)) + requestor |= VMMDEV_REQUESTOR_GRP_VBOX; + + return requestor; +} + static int vbg_misc_device_open(struct inode *inode, struct file *filp) { struct vbg_session *session; @@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp) /* misc_open sets filp->private_data to our misc device */ gdev = container_of(filp->private_data, struct vbg_dev, misc_device); - session = vbg_core_open_session(gdev, false); + session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode)); if (IS_ERR(session)) return PTR_ERR(session); @@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp) gdev = container_of(filp->private_data, struct vbg_dev, misc_device_user); - session = vbg_core_open_session(gdev, false); + session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) | + VMMDEV_REQUESTOR_USER_DEVICE); if (IS_ERR(session)) return PTR_ERR(session); @@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, req == VBG_IOCTL_VMMDEV_REQUEST_BIG; if (is_vmmdev_req) - buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, + session->requestor); else buf = kmalloc(size, GFP_KERNEL); if (!buf) diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index bf4474214b4d..75fd140b02ff 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err); VBG_LOG(vbg_debug, pr_debug); #endif -void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type, + u32 requestor) { struct vmmdev_request_header *req; int order = get_order(PAGE_ALIGN(len)); @@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) req->request_type = req_type; req->rc = VERR_GENERAL_FAILURE; req->reserved1 = 0; - req->reserved2 = 0; + req->requestor = requestor; return req; } @@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev, return done; } -int vbg_hgcm_connect(struct vbg_dev *gdev, +int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, struct vmmdev_hgcm_service_location *loc, u32 *client_id, int *vbox_status) { @@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, int rc; hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), - VMMDEVREQ_HGCM_CONNECT); + VMMDEVREQ_HGCM_CONNECT, requestor); if (!hgcm_connect) return -ENOMEM; @@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev, } EXPORT_SYMBOL(vbg_hgcm_connect); -int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) +int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, + u32 client_id, int *vbox_status) { struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; int rc; hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), - VMMDEVREQ_HGCM_DISCONNECT); + VMMDEVREQ_HGCM_DISCONNECT, + requestor); if (!hgcm_disconnect) return -ENOMEM; @@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result( return 0; } -int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, - u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, - u32 parm_count, int *vbox_status) +int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, + u32 function, u32 timeout_ms, + struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, + int *vbox_status) { struct vmmdev_hgcm_call *call; void **bounce_bufs = NULL; @@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, goto free_bounce_bufs; } - call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); + call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor); if (!call) { ret = -ENOMEM; goto free_bounce_bufs; @@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call); #ifdef CONFIG_COMPAT int vbg_hgcm_call32( - struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, - struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, - int *vbox_status) + struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function, + u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32, + u32 parm_count, int *vbox_status) { struct vmmdev_hgcm_function_parameter *parm64 = NULL; u32 i, size; @@ -689,7 +693,7 @@ int vbg_hgcm_call32( goto out_free; } - ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, + ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms, parm64, parm_count, vbox_status); if (ret < 0) goto out_free; diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h index 77f0c8f8a231..84834dad38d5 100644 --- a/drivers/virt/vboxguest/vboxguest_version.h +++ b/drivers/virt/vboxguest/vboxguest_version.h @@ -9,11 +9,10 @@ #ifndef __VBOX_VERSION_H__ #define __VBOX_VERSION_H__ -/* Last synced October 4th 2017 */ -#define VBG_VERSION_MAJOR 5 -#define VBG_VERSION_MINOR 2 +#define VBG_VERSION_MAJOR 6 +#define VBG_VERSION_MINOR 0 #define VBG_VERSION_BUILD 0 -#define VBG_SVN_REV 68940 -#define VBG_VERSION_STRING "5.2.0" +#define VBG_SVN_REV 127566 +#define VBG_VERSION_STRING "6.0.0" #endif diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 5e2ae978935d..6337b8d75d96 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h @@ -98,8 +98,8 @@ struct vmmdev_request_header { s32 rc; /** Reserved field no.1. MBZ. */ u32 reserved1; - /** Reserved field no.2. MBZ. */ - u32 reserved2; + /** IN: Requestor information (VMMDEV_REQUESTOR_*) */ + u32 requestor; }; VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); @@ -247,6 +247,8 @@ struct vmmdev_guest_info { }; VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); +#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0) + /** struct vmmdev_guestinfo2 - Guest information report, version 2. */ struct vmmdev_guest_info2 { /** Header. */ @@ -259,7 +261,7 @@ struct vmmdev_guest_info2 { u32 additions_build; /** SVN revision. */ u32 additions_revision; - /** Feature mask, currently unused. */ + /** Feature mask. */ u32 additions_features; /** * The intentional meaning of this field was: diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index d0584c040c60..7a0398bb84f7 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev) for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); + if (vp_dev->msix_affinity_masks) { + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + } if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 18846afb39da..5df92c308286 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split( GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); if (queue) break; + if (!may_reduce_num) + return NULL; } if (!num) diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 0f4ecfcdb549..a9fb77585272 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf, /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */ alt = 3; err = usb_set_interface(dev->udev, - intf->altsetting[alt].desc.bInterfaceNumber, alt); + intf->cur_altsetting->desc.bInterfaceNumber, alt); if (err) { dev_err(&dev->udev->dev, "Failed to set alternative setting %d " "for %d interface: err=%d.\n", alt, - intf->altsetting[alt].desc.bInterfaceNumber, err); + intf->cur_altsetting->desc.bInterfaceNumber, err); goto err_out_clear; } - iface_desc = &intf->altsetting[alt]; + iface_desc = intf->cur_altsetting; if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { pr_info("Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index de01a6d0059d..a1c61e351d3f 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c @@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), - GFP_KERNEL); + vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); if (!vma_priv) return -ENOMEM; diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) if (xen_store_evtchn == 0) return -ENOENT; - nonseekable_open(inode, filp); - - filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ + stream_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) |