summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_processor.c9
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/acpica/hwregs.c23
-rw-r--r--drivers/acpi/processor_throttling.c9
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c17
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/dma-buf/dma-buf.c7
-rw-r--r--drivers/dma-buf/reservation.c72
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c32
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c49
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-pic32-evic.c2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c18
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/smsc95xx.c51
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/ptp/ptp_chardev.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/tty/Kconfig11
-rw-r--r--drivers/tty/pty.c15
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
125 files changed, 1035 insertions, 518 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0d92d0f915e9..c7ba948d253c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
-
- /*
- * We don't care about error returns - we just try to mark
- * these reserved so that nobody else is confused into thinking
- * that this region might be unused..
- *
- * (In particular, allocating the IO range for Cardbus)
- */
- request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3d5b8a099351..c1d138e128cb 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
}
int acpi_video_get_levels(struct acpi_device *device,
- struct acpi_video_device_brightness **dev_br)
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
br->count = count;
*dev_br = br;
+ if (pmax_level)
+ *pmax_level = max_level;
out:
kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
- result = acpi_video_get_levels(device->dev, &br);
+ result = acpi_video_get_levels(device->dev, &br, &max_level);
if (result)
return result;
device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry) {
- if (!acpi_video_device_lcd_query_levels(dev, &levels))
+ if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
kfree(levels);
}
mutex_unlock(&video->device_list_lock);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 0f18dbc9a37f..daceb80022b0 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
static u8
acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
{
- u64 address;
-
if (!reg->access_width) {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ max_bit_width = 32;
+ }
+
/*
* Detect old register descriptors where only the bit_width field
- * makes senses. The target address is copied to handle possible
- * alignment issues.
+ * makes senses.
*/
- ACPI_MOVE_64_TO_64(&address, &reg->address);
- if (!reg->bit_offset && reg->bit_width &&
+ if (reg->bit_width < max_bit_width &&
+ !reg->bit_offset && reg->bit_width &&
ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
- ACPI_IS_ALIGNED(reg->bit_width, 8) &&
- ACPI_IS_ALIGNED(address, reg->bit_width)) {
+ ACPI_IS_ALIGNED(reg->bit_width, 8)) {
return (reg->bit_width);
- } else {
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- return (32);
- } else {
- return (max_bit_width);
- }
}
+ return (max_bit_width);
} else {
return (1 << (reg->access_width + 2));
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d746336d..c72e64893d03 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
+ /*
+ * We don't care about error returns - we just try to mark
+ * these reserved so that nobody else is confused into thinking
+ * that this region might be unused..
+ *
+ * (In particular, allocating the IO range for Cardbus)
+ */
+ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
pr->throttling.state = 0;
duty_mask = pr->throttling.state_count - 1;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e443be..85aaf2222587 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@ static char *res_strings[] = {
"reserved 27",
"reserved 28",
"reserved 29",
- "reserved 30",
+ "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
"reassembly abort: no buffers",
"receive buffer overflow",
"change in GFC",
"receive buffer full",
"low priority discard - no receive descriptor",
"low priority discard - missing end of packet",
+ "reserved 37",
+ "reserved 38",
+ "reserved 39",
+ "reseverd 40",
"reserved 41",
"reserved 42",
"reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f2994738..809dd1e02091 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
/* make the ptr point to the corresponding buffer desc entry */
buf_desc_ptr += desc;
if (!desc || (desc > iadev->num_rx_desc) ||
- ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
+ ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
free_desc(dev, desc);
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
return -1;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 36bc11a106aa..9009295f5134 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
return cpufreq_driver->fast_switch(policy, target_freq);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3a9c4325d6e2..0d159b513469 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
- pr_info("_PPC limits will be enforced\n");
+ pr_debug("_PPC limits will be enforced\n");
return;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395cb8d8..0d0d4529ee36 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+ u32 unit_size;
int ret;
if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
- break;
+ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+ if (req->nbytes <= unit_size_map[0].size) {
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+ unit_size = unit_size_map[unit].value;
+ break;
+ }
+ }
+ }
- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2fe58f..63464e86f2b1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@ err_algs:
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->polling_mode)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07ee6677..6355ab38d630 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/reservation.h>
+#include <linux/mm.h>
#include <uapi/linux/dma-buf.h>
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf = file->private_data;
/* check for overflowing the buffer's size */
- if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (vma->vm_pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* check for offset overflow */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
/* check for overflowing the buffer's size */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd5722c997..9566a62ad8e3 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
#include <linux/reservation.h>
#include <linux/export.h>
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer. A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations). The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
DEFINE_WW_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence(). Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
*/
int reservation_object_reserve_shared(struct reservation_object *obj)
{
@@ -180,7 +199,11 @@ done:
fence_put(old_fence);
}
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
* Add a fence to a shared slot, obj->lock must be held, and
* reservation_object_reserve_shared_fence has been called.
*/
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot. The obj->lock must be held.
+ */
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
}
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
return ret;
}
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014daeef9..fc5f197906ac 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_LPC32XX_P0_P1_IRQ;
+ return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
- IRQ_LPC32XX_GPIO_00,
- IRQ_LPC32XX_GPIO_01,
- IRQ_LPC32XX_GPIO_02,
- IRQ_LPC32XX_GPIO_03,
- IRQ_LPC32XX_GPIO_04,
- IRQ_LPC32XX_GPIO_05,
-};
-
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
- return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
- IRQ_LPC32XX_GPI_00,
- IRQ_LPC32XX_GPI_01,
- IRQ_LPC32XX_GPI_02,
- IRQ_LPC32XX_GPI_03,
- IRQ_LPC32XX_GPI_04,
- IRQ_LPC32XX_GPI_05,
- IRQ_LPC32XX_GPI_06,
- IRQ_LPC32XX_GPI_07,
- IRQ_LPC32XX_GPI_08,
- IRQ_LPC32XX_GPI_09,
- -ENXIO, /* 10 */
- -ENXIO, /* 11 */
- -ENXIO, /* 12 */
- -ENXIO, /* 13 */
- -ENXIO, /* 14 */
- -ENXIO, /* 15 */
- -ENXIO, /* 16 */
- -ENXIO, /* 17 */
- -ENXIO, /* 18 */
- IRQ_LPC32XX_GPI_19,
- -ENXIO, /* 20 */
- -ENXIO, /* 21 */
- -ENXIO, /* 22 */
- -ENXIO, /* 23 */
- -ENXIO, /* 24 */
- -ENXIO, /* 25 */
- -ENXIO, /* 26 */
- -ENXIO, /* 27 */
- IRQ_LPC32XX_GPI_28,
-};
-
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
- return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
return -ENXIO;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904a31c..24f60d28f0c0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gpio_device *gdev = filp->private_data;
struct gpio_chip *chip = gdev->chip;
- int __user *ip = (int __user *)arg;
+ void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = gpio_ioctl,
- .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gpio_ioctl_compat,
+#endif
};
static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
goto err_free_label;
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1356,10 +1367,13 @@ done:
/*
* This descriptor validation needs to be inserted verbatim into each
* function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
*/
#define VALIDATE_DESC(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return 0; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return -EINVAL; \
} \
@@ -1370,7 +1384,9 @@ done:
} } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return; \
} \
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
*/
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /* Flush direction if something changed behind our back */
+ if (chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
- if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..01c36b8d6222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -799,6 +799,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+ int vmid;
};
/*
@@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ uint32_t oa_base, uint32_t oa_size,
+ bool vmid_switch);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 199f76baf22c..8943099eb135 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
return result;
}
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+ release_firmware(adev->pm.fw);
+ return 0;
+ }
+ /* cannot release other firmware because they are not created by cgs */
+ return -EINVAL;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bb8b149786d7..964f31404f17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
- if (adev->mode_info.atom_context)
+ if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
@@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_block_status[i].valid = false;
}
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (adev->ip_blocks[i].funcs->late_fini)
+ adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ }
+
return 0;
}
@@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_has_gpu_virtualization_table(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- adev->virtualization.supports_sr_iov) {
+ if (!amdgpu_card_posted(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b78e..7a0b1e50f293 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
+ int vmid = 0, old_vmid = ring->vmid;
struct fence *hwf;
uint64_t ctx;
@@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (job) {
vm = job->vm;
ctx = job->ctx;
+ vmid = job->vm_id;
} else {
vm = NULL;
ctx = 0;
+ vmid = 0;
}
if (!ring->ready) {
@@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ job->oa_base, job->oa_size,
+ (ring->current_ctx == ctx) && (old_vmid != vmid));
if (r) {
amdgpu_ring_undo(ring);
return r;
@@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
-
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
continue;
@@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
need_ctx_switch);
need_ctx_switch = false;
+ ring->vmid = vmid;
}
if (ring->funcs->emit_hdp_invalidate)
@@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
+ ring->vmid = old_vmid;
amdgpu_ring_undo(ring);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 6bd961fb43dc..82256558e0f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
if (ret)
return ret;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-#endif
-
return ret;
}
@@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
return ret;
}
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pp_enabled) {
+ amdgpu_pm_sysfs_fini(adev);
+ amd_powerplay_fini(adev->powerplay.pp_handle);
+ }
+
+ if (adev->powerplay.ip_funcs->late_fini)
+ adev->powerplay.ip_funcs->late_fini(
+ adev->powerplay.pp_handle);
+#endif
+}
+
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
@@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
+ .late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3b02272db678..870f9494252c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
ring->ring = NULL;
ring->ring_obj = NULL;
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..48618ee324eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 01abfc21b4a2..e19520c4b4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int r;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
+ if (adev->uvd.vcpu_bo) {
+ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+ if (!r) {
+ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ }
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ }
amdgpu_ring_fini(&adev->uvd.ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30ba11..62a4c127620f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+ uint32_t oa_base, uint32_t oa_size,
+ bool vmid_switch)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush &&
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ea407db1fbcf..5ec1f1e9c983 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 518dca43b133..9dc4e24e31e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
@@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
if (r)
return r;
- /* unhalt the MEs */
- cik_sdma_enable(adev, true);
+ /* halt the engine before programing */
+ cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ cik_sdma_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index 245cabf06575..ed03b75175d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
static int fiji_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 7f18a53ab53a..8c6ad1e72f02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -991,6 +991,22 @@ out:
return err;
}
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+}
+
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ gfx_v7_0_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f19bab68fd83..9f6f8669edc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -836,6 +836,26 @@ err1:
return r;
}
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ))
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_rlc_fini(adev);
- kfree(adev->gfx.rlc.register_list_format);
+ gfx_v8_0_free_microcode(adev);
return 0;
}
@@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
- case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
+ case CHIP_TOPAZ:
+ amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+ 0x00000000 : 0x00000002);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 460bc8ad37e6..825ccd63f2dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
static int iceland_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f4c3130d3fdb..b556bd0a8797 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
@@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
return -EINVAL;
}
- /* unhalt the MEs */
- sdma_v2_4_enable(adev, true);
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
@@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v2_4_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31d99b0010f7..532ea88da66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
@@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ /* unhalt the MEs */
+ sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
- /* unhalt the MEs */
- sdma_v3_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v3_0_ctx_switch_enable(adev, true);
+ /* disble sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
@@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v3_0_free_microcode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index b7615cefcac4..f06f6f4dc3a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
static int tonga_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 6080951d539d..afce1edbe250 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -157,6 +157,7 @@ struct amd_ip_funcs {
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a461e155a160..7464daf89ca1 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type);
+
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
@@ -645,6 +648,7 @@ struct cgs_ops {
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
+ cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +742,8 @@ struct cgs_device
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type) \
+ CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8e345bfddb69..e629f8a9fe93 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
- goto err;
+ goto err1;
pr_info("amdgpu: powerplay initialized\n");
return 0;
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 46410e3c7349..fb88e4e5d625 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
- if (eventmgr != NULL)
- kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 24a16e49b571..586f73276226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 1c48917da3cf..20f20e075588 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
+ /* do hwmgr finish*/
+ kfree(hwmgr->backend);
+
+ kfree(hwmgr->start_thermal_controller.function_list);
+
+ kfree(hwmgr->set_temperature_range.function_list);
+
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
@@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index 0b99ab3ba0c5..ae96f14b827c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
- sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 16fed487973b..d27e8c40602a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
}
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
- /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 10e3630ee39d..296ec7ef6d45 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
+ if (NULL != hwmgr->soft_pp_table)
hwmgr->soft_pp_table = NULL;
- }
- if (NULL != pp_table_information->vdd_dep_on_sclk)
- pp_table_information->vdd_dep_on_sclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_sclk);
+ pp_table_information->vdd_dep_on_sclk = NULL;
- if (NULL != pp_table_information->vdd_dep_on_mclk)
- pp_table_information->vdd_dep_on_mclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_mclk);
+ pp_table_information->vdd_dep_on_mclk = NULL;
- if (NULL != pp_table_information->valid_mclk_values)
- pp_table_information->valid_mclk_values = NULL;
+ kfree(pp_table_information->valid_mclk_values);
+ pp_table_information->valid_mclk_values = NULL;
- if (NULL != pp_table_information->valid_sclk_values)
- pp_table_information->valid_sclk_values = NULL;
+ kfree(pp_table_information->valid_sclk_values);
+ pp_table_information->valid_sclk_values = NULL;
- if (NULL != pp_table_information->vddc_lookup_table)
- pp_table_information->vddc_lookup_table = NULL;
+ kfree(pp_table_information->vddc_lookup_table);
+ pp_table_information->vddc_lookup_table = NULL;
- if (NULL != pp_table_information->vddgfx_lookup_table)
- pp_table_information->vddgfx_lookup_table = NULL;
+ kfree(pp_table_information->vddgfx_lookup_table);
+ pp_table_information->vddgfx_lookup_table = NULL;
- if (NULL != pp_table_information->mm_dep_table)
- pp_table_information->mm_dep_table = NULL;
+ kfree(pp_table_information->mm_dep_table);
+ pp_table_information->mm_dep_table = NULL;
- if (NULL != pp_table_information->cac_dtp_table)
- pp_table_information->cac_dtp_table = NULL;
+ kfree(pp_table_information->cac_dtp_table);
+ pp_table_information->cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table)
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != pp_table_information->ppm_parameter_table)
- pp_table_information->ppm_parameter_table = NULL;
+ kfree(pp_table_information->ppm_parameter_table);
+ pp_table_information->ppm_parameter_table = NULL;
- if (NULL != pp_table_information->pcie_table)
- pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
- if (NULL != hwmgr->pptable) {
- kfree(hwmgr->pptable);
- hwmgr->pptable = NULL;
- }
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 673a75c74e18..8e52a2e82db5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index de618ead9db8..043b6ac09d5f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c483baf6b4fb..0728c1e3d97a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
int smum_fini(struct pp_smumgr *smumgr)
{
+ kfree(smumgr->device);
kfree(smumgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 32820b680d88..b22722eabafc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
+ struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0ec1ad961e0d..dc723f7ead7d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
+ .max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index fbe304ee6c80..2aec27dbb5bb 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
+ if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf3482e..c6cf837c5193 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail_unlock;
+ }
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7daf4054dd2b..69836f5685b1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
}
return msm_obj->vaddr;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b89ca5174863..eb4bb8b2f3a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
+ submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
@@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
+
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b48f73ac6389..0857710c2ff2 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ continue;
+
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..42f5359cf988 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ goto fail;
+ }
ring->end = ring->start + (size / 4);
ring->cur = ring->start;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index c612dc1f1eb4..126a85cc81bc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -16,9 +16,9 @@ enum nvkm_devidx {
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index db10c11f0595..c5a6ebd5a478 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
struct nvbios_ocfg {
- u16 match;
+ u8 proto;
+ u8 flags;
u16 clkcmp[2];
};
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 57aaf98a26f9..300ea03be8f0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 0f3e4bb411cc..7d9248b8c664 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 33d9ee0fac40..1aeb698e9707 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a0913359ac05..839f4c8c1805 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index a74c5dd27dc0..e2a64ed14b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index f0314664349c..5dd34382f55a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
mask |= 0x0001 << or;
mask |= 0x0100 << head;
+
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
- break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index b6944142d616..f4b9cf8574be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -36,7 +36,7 @@ gm107_disp = {
.outp.internal.crt = nv50_dac_output_new,
.outp.internal.tmds = nv50_sor_output_new,
.outp.internal.lvds = nv50_sor_output_new,
- .outp.internal.dp = gf119_sor_dp_new,
+ .outp.internal.dp = gm107_sor_dp_new,
.dac.nr = 3,
.dac.power = nv50_dac_power,
.dac.sense = nv50_dac_sense,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 4226d2153b9c..fcb1b0c46d64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (outp->info.location == 0) {
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
} else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
pclk = pclk / 2;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index e9067ba4e179..4e983f6d7032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
-int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
- struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index b4b41b135643..22706c0a54b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,8 +40,7 @@ static int
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 loff = gf119_sor_loff(outp);
- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
@@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
return 0;
}
-static int
+int
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644
index 000000000000..37790b2617c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 soff = outp->base.or * 0x800;
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+ .pattern = gm107_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
+ .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 2cfbef9c344f..c44fa7ea672a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
}
static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
- struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm200_sor_soff(outp);
- const u32 data = 0x01010101 * pattern;
- if (outp->base.info.sorconf.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
- else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
- return 0;
-}
-
-static int
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
static const struct nvkm_output_dp_func
gm200_sor_dp_func = {
- .pattern = gm200_sor_dp_pattern,
+ .pattern = gm107_sor_dp_pattern,
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 9513badb8220..ae9ab5b1ab97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
- { 0x00, "NO_ERROR" },
- { 0x01, "STACK_MISMATCH" },
+ { 0x01, "STACK_ERROR" },
+ { 0x02, "API_STACK_ERROR" },
+ { 0x03, "RET_EMPTY_STACK_ERROR" },
+ { 0x04, "PC_WRAP" },
{ 0x05, "MISALIGNED_PC" },
- { 0x08, "MISALIGNED_GPR" },
- { 0x09, "INVALID_OPCODE" },
- { 0x0d, "GPR_OUT_OF_BOUNDS" },
- { 0x0e, "MEM_OUT_OF_BOUNDS" },
- { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x06, "PC_OVERFLOW" },
+ { 0x07, "MISALIGNED_IMMC_ADDR" },
+ { 0x08, "MISALIGNED_REG" },
+ { 0x09, "ILLEGAL_INSTR_ENCODING" },
+ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+ { 0x0b, "ILLEGAL_INSTR_PARAM" },
+ { 0x0c, "INVALID_CONST_ADDR" },
+ { 0x0d, "OOR_REG" },
+ { 0x0e, "OOR_ADDR" },
+ { 0x0f, "MISALIGNED_ADDR" },
{ 0x10, "INVALID_ADDR_SPACE" },
- { 0x11, "INVALID_PARAM" },
+ { 0x11, "ILLEGAL_INSTR_PARAM2" },
+ { 0x12, "INVALID_CONST_ADDR_LDC" },
+ { 0x13, "GEOMETRY_SM_ERROR" },
+ { 0x14, "DIVERGENT" },
+ { 0x15, "WARP_EXIT" },
{}
};
static const struct nvkm_bitfield gf100_mp_global_error[] = {
+ { 0x00000001, "SM_TO_SM_FAULT" },
+ { 0x00000002, "L1_ERROR" },
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
- { 0x00000008, "OUT_OF_STACK_SPACE" },
+ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+ { 0x00000010, "BPT_INT" },
+ { 0x00000020, "BPT_PAUSE" },
+ { 0x00000040, "SINGLE_STEP_COMPLETE" },
+ { 0x20000000, "ECC_SEC_ERROR" },
+ { 0x40000000, "ECC_DED_ERROR" },
+ { 0x80000000, "TIMEOUT" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index a5e92135cd77..9efb1b48cd54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
- info->match = nvbios_rd16(bios, data + 0x00);
+ info->proto = nvbios_rd08(bios, data + 0x00);
+ info->flags = nvbios_rd16(bios, data + 0x01);
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
}
u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
- if (info->match == type)
+ if ((info->proto == proto || info->proto == 0xff) &&
+ (info->flags == flags))
break;
}
return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index e292f5679418..389fb13a1998 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
}
static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = &ltc->subdev;
struct nvkm_device *device = subdev->device;
- u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
+ u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
u32 stat = nvkm_rd32(device, base + 0x00c);
if (stat) {
@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
- gm107_ltc_lts_isr(ltc, c, s);
+ gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 2a29bfd5125a..e18e0dc19ec8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
gm200_ltc = {
.oneinit = gm200_ltc_oneinit,
.init = gm200_ltc_init,
- .intr = gm107_ltc_intr, /*XXX: not validated */
+ .intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc = 16,
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9ed8272e54ae..56c43f355ce3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index e129245eb8a9..9255c0e1e4a7 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 904d0754ad78..0f18b76c7906 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
- vc4_hvs_dump_state(dev);
- }
-
if (crtc->state->event) {
unsigned long flags;
@@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
}
}
@@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ u32 chan = vc4_crtc->channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (vc4_crtc->event) {
+ if (vc4_crtc->event &&
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
kfree(flip_state);
@@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
return ret;
}
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 3446ece21b4a..250ed7e3754c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
};
@@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index cb37751bc99f..861a623bc185 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
return -ENOMEM;
/* Make sure that any outstanding modesets have finished. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
+ if (nonblock) {
+ ret = down_trylock(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return -EBUSY;
+ }
+ } else {
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
}
ret = drm_atomic_helper_prepare_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 6163b95c5411..f99eece4cc97 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -341,6 +341,10 @@
#define SCALER_DISPLACT0 0x00000030
#define SCALER_DISPLACT1 0x00000034
#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
+ (x) * (SCALER_DISPLACT1 - \
+ SCALER_DISPLACT0))
+
#define SCALER_DISPCTRL0 0x00000040
# define SCALER_DISPCTRLX_ENABLE BIT(31)
# define SCALER_DISPCTRLX_RESET BIT(30)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 6bd881be24ea..5eb1f9e17a98 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -82,6 +83,7 @@ struct its_node {
u64 flags;
u32 ite_size;
u32 device_ids;
+ int numa_node;
};
#define ITS_ITT_ALIGN SZ_256
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
+ const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ if (its_dev->its->numa_node >= 0) {
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+ if (!cpumask_intersects(mask_val, cpu_mask))
+ return -EINVAL;
+ }
+ }
+
+ cpu = cpumask_any_and(mask_val, cpu_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void)
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ continue;
+ }
+
/*
* We now have to bind each collection to its target
* redistributor.
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ const struct cpumask *cpu_mask = cpu_online_mask;
+
+ /* get the cpu_mask of local node */
+ if (its_dev->its->numa_node >= 0)
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
}
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_22375,
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
+#endif
{
}
};
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
its->base = its_base;
its->phys_base = res.start;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->numa_node = of_node_to_nid(node);
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
if (!its->cmd_base) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fb042ba9a3db..2c5ba0e704bf 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable)
while (count--) {
val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
cpu_relax();
udelay(1);
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index e7155db01d55..73addb4b625b 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
/* set polarity for external interrupts only */
for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
if (priv->ext_irqs[i] == data->hwirq) {
- ret = pic32_set_ext_polarity(i + 1, flow_type);
+ ret = pic32_set_ext_polarity(i, flow_type);
if (ret)
return ret;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c984321d1881..5d438ad3ee32 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (!err) {
+ if (err >= 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (!err) {
+ if (err >= 0) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 7fc8b7aa83f0..2ee4c21ec55e 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
[SDXC_CLK_400K] = { .output = 180, .sample = 180 },
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 150, .sample = 120 },
- [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
- [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 },
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
- /* TODO MMC DDR is not working on A80 */
- if (of_device_is_compatible(pdev->dev.of_node,
- "allwinner,sun9i-a80-mmc"))
- mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550eff..058460bdd5a6 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..d02c4240b7df 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,6 +96,10 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+ unsigned int rx_frag_size;
+
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 9fe8b5e310d1..c98acdc0d14f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
+static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ struct page *page;
+
+ if (alx->rx_frag_size > PAGE_SIZE)
+ return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+
+ page = alx->rx_page;
+ if (!page) {
+ alx->rx_page = page = alloc_page(gfp);
+ if (unlikely(!page))
+ return NULL;
+ alx->rx_page_offset = 0;
+ }
+
+ skb = build_skb(page_address(page) + alx->rx_page_offset,
+ alx->rx_frag_size);
+ if (likely(skb)) {
+ alx->rx_page_offset += alx->rx_frag_size;
+ if (alx->rx_page_offset >= PAGE_SIZE)
+ alx->rx_page = NULL;
+ else
+ get_page(page);
+ }
+ return skb;
+}
+
+
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ skb = alx_alloc_skb(alx, gfp);
if (!skb)
break;
dma = dma_map_single(&alx->hw.pdev->dev,
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
+
return count;
}
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
+ if (alx->rx_page) {
+ put_page(alx->rx_page);
+ alx->rx_page = NULL;
+ }
+
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
+
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
+ unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
+
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
+ head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ alx->rx_frag_size = roundup_pow_of_two(head_size);
+
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
+ unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
+ head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0a5b770cefaa..c5fe915870ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
@@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
@@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
@@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f9125cf42..06f031715b57 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
* re-adding ourselves to the poll list.
*/
- if (priv->tx_skb && !tx_ctrl_ct)
+ if (priv->tx_skb && !tx_ctrl_ct) {
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
napi_reschedule(napi);
+ }
}
return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ca2cccc594fd..3c0255e98535 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887873..67a648c7d3a9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 01fccec632ec..466939f8f0cf 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
+ spin_lock_init(&hwbm_pool->lock);
/* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb323..fc95affaf76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 92e0624f4cf0..19ceced6736c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e678b8..5aa8b751f417 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f6e61570cb2c..76aa4d27183c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool inline_ok;
u32 ring_cons;
- if (!priv->port_up)
- goto tx_drop;
-
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
+ ring->tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index cc84e09f324a..467d47ed2c39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
@@ -482,8 +483,6 @@ struct mlx4_en_priv {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cbf58e1f9333..21ec1c2df2c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, bool dcbx_enabled)
{
- u8 tc, priority, priority_map;
+ u8 tc, priority_map;
enum dcbx_protocol_type type;
u16 protocol_id;
+ int priority;
bool enable;
int i;
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !!(type == DCBX_PROTOCOL_ETH);
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 089016f46f26..2d89e8c16b32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u16 num_pqs, multi_cos_tcs = 1;
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
- qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
- GFP_KERNEL);
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->wfq_data)
goto alloc_err;
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_qm_info_free(p_hwfn);
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, false);
if (rc)
return rc;
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{
int i;
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 8b22f87033ce..753064679bde 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1bc75358cbc4..ad3cae3b7243 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
- return QEDE_ETHTOOL_TEST_MAX;
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 337e839ca586..5d00d1404bfc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
{
struct qede_dev *edev = netdev_priv(dev);
- return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
max_tx_rate);
}
@@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QEDE_VXLAN
static void qede_add_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
@@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 83d72106471c..fd5d1c93b55b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1681084cc96f..1f309127457d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -619,6 +619,17 @@ fail:
return rc;
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ /* All our existing PIO buffers went away */
+ efx_for_each_channel(channel, efx)
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->piobuf = NULL;
+}
+
#else /* !EFX_USE_PIO */
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
{
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
#endif /* EFX_USE_PIO */
static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
+ efx_ef10_forget_old_piobufs(efx);
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869487..097f363f1630 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321cda..d13ddf9703ff 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 3f83c369f56c..ec295851812b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
return -ENOMEM;
if (mdio_bus_data->irqs)
- memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+ memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a0f64cba86ba..2ace126533cd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
-static void __team_compute_features(struct team *team)
+static void ___team_compute_features(struct team *team)
{
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+}
+static void __team_compute_features(struct team *team)
+{
+ ___team_compute_features(team);
netdev_change_features(team->dev);
}
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
- __team_compute_features(team);
+ ___team_compute_features(team);
mutex_unlock(&team->lock);
+ netdev_change_features(team->dev);
}
static int team_port_enter(struct team *team, struct team_port *port)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36cd7f016a8d..9bbe0161a2f4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
}
- if (!count || count < 4)
+ if (count < 4)
goto goon;
rx_status = buf[count - 2];
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d9d2806a47b1..dc989a8b5afb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool link_ok;
+ struct delayed_work carrier_check;
+ struct usbnet *dev;
};
static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
+static void set_carrier(struct usbnet *dev, bool link)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ if (pdata->link_ok == link)
+ return;
+
+ pdata->link_ok = link;
+
+ if (link)
+ usbnet_link_change(dev, 1, 0);
+ else
+ usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+ struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+ carrier_check.work);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ if (pdata->suspend_flags != 0)
+ return;
+
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+ return;
+ }
+ if (ret & BMSR_LSTATUS)
+ set_carrier(dev, 1);
+ else
+ set_carrier(dev, 0);
+
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
+ INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
return 0;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
if (pdata) {
+ cancel_delayed_work(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540343..e0638e556fe7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- /* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].vq->num_free ==
- virtqueue_get_vring_size(vi->rq[i].vq)) {
- free_unused_bufs(vi);
- err = -ENOMEM;
- goto free_recv_bufs;
- }
- }
-
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_recv_bufs;
+ goto free_unregister_netdev;
}
/* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
-free_recv_bufs:
+free_unregister_netdev:
vi->vdev->config->reset(vdev);
- free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ff30c3bdfce..f999db2f97b4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ if (tb[IFLA_MTU])
+ conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
err = vxlan_dev_configure(src_net, dev, &conf);
switch (err) {
case -ENODEV:
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f2d01d4d9364..1b8304e1efaa 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* For SPIs, we need to track the affinity per IRQ */
if (using_spi) {
- if (i >= pdev->num_resources) {
- of_node_put(dn);
+ if (i >= pdev->num_resources)
break;
- }
irqs[i] = cpu;
}
/* Keep track of the CPUs containing this PMU type */
cpumask_set_cpu(cpu, &pmu->supported_cpus);
- of_node_put(dn);
i++;
} while (1);
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
armpmu_init(pmu);
- if (!__oprofile_cpu_pmu)
- __oprofile_cpu_pmu = pmu;
-
pmu->plat_device = pdev;
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (ret)
goto out_destroy;
+ if (!__oprofile_cpu_pmu)
+ __oprofile_cpu_pmu = pmu;
+
pr_info("enabled with %s PMU driver, %d counters available\n",
pmu->name, pmu->num_events);
@@ -1043,6 +1040,7 @@ out_destroy:
out_free:
pr_info("%s: failed to register PMU devices!\n",
of_node_full_name(node));
+ kfree(pmu->irq_affinity);
kfree(pmu);
return ret;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 207b13b618cf..a607655d7830 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
const struct mtk_desc_pin *pin;
chained_irq_enter(chip, desc);
- for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+ for (eint_num = 0;
+ eint_num < pctl->devdata->ap_num;
+ eint_num += 32, reg += 4) {
status = readl(reg);
- reg += 4;
while (status) {
offset = __ffs(status);
index = eint_num + offset;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ccbfc325c778..38faceff2f08 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
clk_enable(nmk_chip->clk);
- dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
clk_disable(nmk_chip->clk);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 579fd65299a0..d637c933c8a9 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET:
- sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
- if (!sysoff) {
- err = -ENOMEM;
- break;
- }
- if (copy_from_user(sysoff, (void __user *)arg,
- sizeof(*sysoff))) {
- err = -EFAULT;
+ sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+ if (IS_ERR(sysoff)) {
+ err = PTR_ERR(sysoff);
+ sysoff = NULL;
break;
}
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e77104..969c312de1be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@ struct aac_driver_ident
#define AAC_QUIRK_SCSI_32 0x0020
/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
* The adapter interface specs all queues to be located in the same
* physically contiguous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd230bc2..79871f3519ff 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
shost->this_id = shost->max_id;
- aac_intr_normal(aac, 0, 2, 0, NULL);
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
/*
* dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a315e9..6bff13e7afc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
ActiveCableEventData =
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
if (ActiveCableEventData->ReasonCode ==
- MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+ MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
ioc->name, ActiveCableEventData->ReceptacleID);
pr_info("cannot be powered and devices connected to this active cable");
pr_info("will not be seen. This active cable");
pr_info("requires %d mW of power",
ActiveCableEventData->ActiveCablePowerRequirement);
+ }
break;
default: /* ignore the rest */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332af0f51..c71344aebdbb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * If we finished all bytes in the request we are done now.
+ * special case: failed zero length commands always need to
+ * drop down into the retry code. Otherwise, if we finished
+ * all bytes in the request we are done now.
*/
- if (!scsi_end_request(req, error, good_bytes, 0))
+ if (!(blk_rq_bytes(req) == 0 && error) &&
+ !scsi_end_request(req, error, good_bytes, 0))
return;
/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03ef02b2..f459dff30512 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
@@ -1459,6 +1463,7 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index 13d431cbd29e..a578cd257db4 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev)
return -ENODEV;
d->raw_bd = bd;
- ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+ ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
if (ret)
return ret;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 82c4d2e45319..95103054c0e4 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -120,17 +120,6 @@ config UNIX98_PTYS
All modern Linux systems use the Unix98 ptys. Say Y unless
you're on an embedded system and want to conserve memory.
-config DEVPTS_MULTIPLE_INSTANCES
- bool "Support multiple instances of devpts"
- depends on UNIX98_PTYS
- default n
- ---help---
- Enable support for multiple instances of devpts filesystem.
- If you want to have isolated PTY namespaces (eg: in containers),
- say Y here. Otherwise, say N. If enabled, each mount of devpts
- filesystem with the '-o newinstance' option will create an
- independent PTY namespace.
-
config LEGACY_PTYS
bool "Legacy (BSD) PTY support"
default y
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index dd4b8417e7f4..f856c4544eea 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
else
fsi = tty->link->driver_data;
devpts_kill_index(fsi, tty->index);
- devpts_put_ref(fsi);
+ devpts_release(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
- fsi = devpts_get_ref(inode, filp);
- retval = -ENODEV;
- if (!fsi)
+ fsi = devpts_acquire(filp);
+ if (IS_ERR(fsi)) {
+ retval = PTR_ERR(fsi);
goto out_free_file;
+ }
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
@@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
retval = index;
if (index < 0)
- goto out_put_ref;
+ goto out_put_fsi;
mutex_lock(&tty_mutex);
@@ -789,8 +790,8 @@ err_release:
return retval;
out:
devpts_kill_index(fsi, index);
-out_put_ref:
- devpts_put_ref(fsi);
+out_put_fsi:
+ devpts_release(fsi);
out_free_file:
tty_free_file(filp);
return retval;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 93601407dab8..688691d9058d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
return count;
} else {
- if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+ data = 0;
+ if (pci_read_vpd(pdev, addr, 4, &data) < 0)
return count;
*pdata = cpu_to_le32(data);
}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e9ea3fef144a..15ecfc9c5f6c 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
- vfio_intx_set_signal(vdev, -1);
vfio_virqfd_disable(&vdev->ctx[0].unmask);
vfio_virqfd_disable(&vdev->ctx[0].mask);
+ vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
struct pci_dev *pdev = vdev->pdev;
int i;
- vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
vfio_virqfd_disable(&vdev->ctx[i].mask);
}
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
if (msix) {
pci_disable_msix(vdev->pdev);
kfree(vdev->msix);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 15a65823aad9..2ba19424e4a1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
unsigned long pfn, long npage, int prot)
{
long i;
- int ret;
+ int ret = 0;
for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
ret = iommu_map(domain->domain, iova,