summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/android/binder.c17
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/block/nbd.c20
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c25
-rw-r--r--drivers/clocksource/timer-of.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/fsi/fsi-core.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/host1x/dev.c8
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/infiniband/core/addr.c46
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c36
-rw-r--r--drivers/infiniband/core/verbs.c51
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c119
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c38
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c32
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h58
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mux/Kconfig19
-rw-r--r--drivers/mux/mux-core.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c299
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/phy/mdio-mux.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/fc.c121
-rw-r--r--drivers/nvme/host/pci.c49
-rw-r--r--drivers/nvme/target/admin-cmd.c16
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c109
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/rockchip-efuse.c2
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/perf/arm_pmu.c41
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c17
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c22
-rw-r--r--drivers/staging/vboxvideo/Kconfig12
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c286
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c412
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c877
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/tty/pty.c85
-rw-r--r--drivers/tty/serial/fsl_lpuart.c24
-rw-r--r--drivers/tty/serial/imx.c27
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c18
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c25
-rw-r--r--drivers/usb/gadget/udc/Kconfig5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/pci-quirks.c54
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c31
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/events/events_base.c13
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/xen-balloon.c22
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenfs/super.c1
336 files changed, 8184 insertions, 1771 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..62068a5e814f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
+static bool ec_no_wakeup __read_mostly;
+module_param(ec_no_wakeup, bool, 0644);
+MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
+
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+
+void acpi_ec_flush_work(void)
+{
+ if (first_ec)
+ __acpi_ec_flush_event(first_ec);
+
+ flush_scheduled_work();
+}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev)
return 0;
}
+static int acpi_ec_suspend_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ /*
+ * The SCI handler doesn't run at this point, so the GPE can be
+ * masked at the low level without side effects.
+ */
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+
+ return 0;
+}
+
+static int acpi_ec_resume_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+
+ return 0;
+}
+
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
@@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev)
#endif
static const struct dev_pm_ops acpi_ec_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..58dd7ab3c653 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+#ifdef CONFIG_PM_SLEEP
+void acpi_ec_flush_work(void);
+#endif
+
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..19182d091587 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
static __init int nfit_init(void)
{
+ int ret;
+
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
return -ENOMEM;
nfit_mce_register();
+ ret = acpi_bus_register_driver(&acpi_nfit_driver);
+ if (ret) {
+ nfit_mce_unregister();
+ destroy_workqueue(nfit_wq);
+ }
+
+ return ret;
- return acpi_bus_register_driver(&acpi_nfit_driver);
}
static __exit void nfit_exit(void)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- /* SRAT: Static Resource Affinity Table */
+ /* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[3];
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..fa8243c5c062 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -777,11 +777,11 @@ static void acpi_freeze_sync(void)
/*
* Process all pending events in case there are any wakeup ones.
*
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
*/
+ acpi_ec_flush_work();
acpi_os_wait_events_complete();
- flush_scheduled_work();
s2idle_wakeup = false;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index aae4d8d4be36..f7665c31feca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
+ if (target_wait) {
+ if (reply || !(t->flags & TF_ONE_WAY))
+ wake_up_interruptible_sync(target_wait);
+ else
+ wake_up_interruptible(target_wait);
+ }
return;
err_translate_failed:
@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
- if (unlikely(current->mm != proc->vma_vm_mm)) {
- pr_err("current mm mismatch proc mm\n");
- return -EINVAL;
- }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
- get_task_struct(current);
- proc->tsk = current;
- proc->vma_vm_mm = current->mm;
+ get_task_struct(current->group_leader);
+ proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 292dec18ffb8..07bdd51b3b9a 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret < 0)
- goto out_disable;
+ goto out_release;
zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
{
if (dev && dev->dma_mem)
return dev->dma_mem;
- return dma_coherent_default_memory;
+ return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
+ void *ret;
- if (!mem)
- return 0;
-
- *ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
goto err;
/*
- * Memory was found in the per-device area.
+ * Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
- memset(*ret, 0, size);
+ memset(ret, 0, size);
else
- memset_io(*ret, 0, size);
+ memset_io(ret, 0, size);
- return 1;
+ return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
+ return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (!mem)
+ return 0;
+
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ if (*ret)
+ return 1;
+
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ if (!dma_coherent_default_memory)
+ return NULL;
+
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+ dma_handle);
+}
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+ int order, void *vaddr)
+{
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
}
return 0;
}
-EXPORT_SYMBOL(dma_release_from_coherent);
/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
* @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret: result from remap_pfn_range()
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
*/
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
+ vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
}
return 0;
}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
+ * @size: size of the memory buffer allocated
+ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+ size_t size, int *ret)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+ vaddr, size, ret);
+}
/*
* Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..60303aa28587 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
spin_unlock_irq(&dev->power.lock);
- dev_pm_domain_set(dev, &genpd->domain);
-
return gpd_data;
err_free:
@@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
- dev_pm_domain_set(dev, NULL);
-
spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret)
goto out;
+ dev_pm_domain_set(dev, &genpd->domain);
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev_pm_domain_set(dev, NULL);
+
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
/*
* Register map access API - W1 (1-Wire) support
*
- * Copyright (C) 2017 OAO Radioavionica
+ * Copyright (c) 2017 Radioavionica Corporation
* Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
*
* This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/module.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "internal.h"
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index dea7d85134ee..5bdf923294a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd;
- int ret = 0;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- ret = PTR_ERR(cmd);
break;
}
@@ -910,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
continue;
}
sk_set_memalloc(sock->sk);
- sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
@@ -924,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
+ clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
@@ -980,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
int i, ret;
for (i = 0; i < config->num_connections; i++) {
+ struct nbd_sock *nsock = config->socks[i];
+
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
+ mutex_unlock(&nsock->tx_lock);
}
}
@@ -993,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
- &config->runtime_flags))
- send_disconnects(nbd);
+ set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ send_disconnects(nbd);
return 0;
}
@@ -1076,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
- config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ config->socks[i]->sock->sk->sk_sndtimeo =
+ nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..1498b899a593 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
- for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
- if (sysfs_streq(buf, virtblk_cache_types[i]))
- break;
-
+ i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
- return -EINVAL;
+ return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..98e34e4c62b8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
};
struct blkif_req {
- int error;
+ blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
+ bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
*/
max_grefs += INDIRECT_GREFS(max_grefs);
- /*
- * We have to reserve 'max_grefs' grants because persistent
- * grants are shared by all rings.
- */
- if (max_grefs > 0)
- if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
+ /* Check if we have enough persistent grants to allocate a requests */
+ if (rinfo->persistent_gnts_c < max_grefs) {
+ new_persistent_gnts = true;
+
+ if (gnttab_alloc_grant_references(
+ max_grefs - rinfo->persistent_gnts_c,
+ &setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
- max_grefs);
+ max_grefs - rinfo->persistent_gnts_c);
return 1;
}
+ }
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
- if (max_grefs > 0)
+ if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
@@ -906,8 +909,8 @@ out_err:
return BLK_STS_IOERR;
out_busy:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_RESOURCE;
}
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..d509b500a7b5 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt;
- of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name):
- irq_of_parse_and_map(np, of_irq->index);
+ if (of_irq->name) {
+ of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
+ if (ret < 0) {
+ pr_err("Failed to get interrupt %s for %s\n",
+ of_irq->name, np->full_name);
+ return ret;
+ }
+ } else {
+ of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
+ }
if (!of_irq->irq) {
pr_err("Failed to map interrupt for %s\n", np->full_name);
return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7fb8b7c980d..6cd503525638 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,6 +225,9 @@ struct global_params {
* @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU
* @last_sample_time: Last Sample time
+ * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
+ * This shift is a multiplier to mperf delta to
+ * calculate CPU busy.
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
@@ -259,6 +262,7 @@ struct cpudata {
u64 last_update;
u64 last_sample_time;
+ u64 aperf_mperf_shift;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
@@ -321,6 +325,7 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
void (*update_util)(struct update_util_data *data, u64 time,
@@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
return val;
}
+static int knl_get_aperf_mperf_shift(void)
+{
+ return 10;
+}
+
static int knl_get_turbo_pstate(void)
{
u64 value;
@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
@@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost;
int target, avg_pstate;
- busy_frac = div_fp(sample->mperf, sample->tsc);
+ busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
+ sample->tsc);
boost = cpu->iowait_boost;
cpu->iowait_boost >>= 1;
@@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
perf_scaled = mul_fp(perf_scaled, sample_ratio);
} else {
- sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
+ cpu->sample.tsc);
if (sample_ratio < int_tofp(1))
perf_scaled = 0;
}
@@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = {
.get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
+ .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
.update_util = intel_pstate_update_util_pid,
@@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.update_util = funcs->update_util;
+ pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
intel_pstate_use_acpi_profile();
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..4b75084fabad 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig"
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
- depends on BCM_PDC_MBOX
+ depends on MAILBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
break;
case HASH_ALG_SHA3_512:
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+ break;
case HASH_ALG_LAST:
default:
err = -EINVAL;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..9ccefb9b7232 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
#define SE_GROUP 0
#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
/* SE microcode */
-#define SE_FW "cnn55xx_se.fw"
+#define SE_FW FW_DIR "cnn55xx_se.fw"
static const char nitrox_driver_name[] = "CNN55XX";
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..1fabd4aee81b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct safexcel_crypto_priv *priv;
- u64 dma_mask;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
- dma_mask = DMA_BIT_MASK(64);
- ret = dma_set_mask_and_coherent(dev, dma_mask);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_clk;
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
int region_id, struct resource *res, unsigned int align,
void *addr, unsigned long flags);
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count);
+ int id, struct resource *res, int count);
#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev;
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
dax_region_put(dax_region);
put_dax(dax_dev);
kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
}
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count)
+ int id, struct resource *res, int count)
{
struct device *parent = dax_region->dev;
struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct inode *inode;
struct device *dev;
struct cdev *cdev;
- int rc = 0, i;
+ int rc, i;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
if (i < count)
goto err_id;
- dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- if (dev_dax->id < 0) {
- rc = dev_dax->id;
- goto err_id;
+ if (id < 0) {
+ id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ dev_dax->id = id;
+ if (id < 0) {
+ rc = id;
+ goto err_id;
+ }
+ } else {
+ /* region provider owns @id lifetime */
+ dev_dax->id = -1;
}
/*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
* device outside of mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL, NULL);
- if (!dax_dev)
+ if (!dax_dev) {
+ rc = -ENOMEM;
goto err_dax;
+ }
/* from here on we're committed to teardown via dax_dev_release() */
dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
dev->parent = parent;
dev->groups = dax_attribute_groups;
dev->release = dev_dax_release;
- dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = cdev_device_add(cdev, dev);
if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
return dev_dax;
err_dax:
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
err_id:
kfree(dev_dax);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
static int dax_pmem_probe(struct device *dev)
{
- int rc;
void *addr;
struct resource res;
+ int rc, id, region_id;
struct nd_pfn_sb *pfn_sb;
struct dev_dax *dev_dax;
struct dax_pmem *dax_pmem;
- struct nd_region *nd_region;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
/* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff);
- nd_region = to_nd_region(dev->parent);
- dax_region = alloc_dax_region(dev, nd_region->id, &res,
+ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+ if (rc != 2)
+ return -EINVAL;
+
+ dax_region = alloc_dax_region(dev, region_id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
if (!dax_region)
return -ENOMEM;
/* TODO: support for subdividing a dax region... */
- dev_dax = devm_create_dev_dax(dax_region, &res, 1);
+ dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
/* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
}
EXPORT_SYMBOL_GPL(dax_write_cache);
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..56e0a0e1b600 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
if (WARN_ON(!fence))
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
* we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
- } else
+ } else {
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
+ }
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence)
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..59a3b2f8ee91 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
show ? "_" : "",
sync_status_str(status));
- if (status) {
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..d7e219d2669d 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
+ while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+ info->timestamp_ns =
+ test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+ ktime_to_ns(fence->timestamp) :
+ ktime_set(0, 0);
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index a485864cb512..06432d84cbf8 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
}
-static const uint32_t fsi_slave_smode(int id)
+static uint32_t fsi_slave_smode(int id)
{
return FSI_SMODE_WSC | FSI_SMODE_ECRC
| fsi_smode_sid(id)
@@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
};
EXPORT_SYMBOL_GPL(fsi_bus_type);
-static int fsi_init(void)
+static int __init fsi_init(void)
{
return bus_register(&fsi_bus_type);
}
+postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
}
-
-module_init(fsi_init);
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..37971d9402e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = adev->gfx.mec.num_mec,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
};
@@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant */
- last_valid_bit = adev->gfx.mec.num_mec
+ last_valid_bit = 1 /* only first MEC can have compute queues */
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f621ee115c98..5e771bc11b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
result = idr_find(&fpriv->bo_list_handles, id);
if (result) {
- if (kref_get_unless_zero(&result->refcount))
+ if (kref_get_unless_zero(&result->refcount)) {
+ rcu_read_unlock();
mutex_lock(&result->lock);
- else
+ } else {
+ rcu_read_unlock();
result = NULL;
+ }
+ } else {
+ rcu_read_unlock();
}
- rcu_read_unlock();
return result;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3a0b69b09ed6..c9b9c88231aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
- /* We only use the first MEC */
- if (kfd->shared_resources.num_mec > 1)
- kfd->shared_resources.num_mec = 1;
-
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..602769ced3bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
return false;
}
-unsigned int get_mec_num(struct device_queue_manager *dqm)
-{
- BUG_ON(!dqm || !dqm->dev);
-
- return dqm->dev->shared_resources.num_mec;
-}
-
unsigned int get_queues_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_mec_num(struct device_queue_manager *dqm);
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..36f376677a53 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
- /* number of mec available from the hardware */
- uint32_t num_mec;
-
/* number of pipes per mec */
uint32_t num_pipe_per_mec;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d6f097f44b6c..197174e562d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].sclk_offset == 0)
- pp_table->StaticVoltageOffsetVid[i] = 248;
- else
- pp_table->StaticVoltageOffsetVid[i] =
- (uint8_t)(dep_table->entries[i].sclk_offset *
- VOLTAGE_VID_OFFSET_SCALE2 /
- VOLTAGE_VID_OFFSET_SCALE1);
- }
+ for (i = 0; i < dep_table->count; i++)
+ pp_table->StaticVoltageOffsetVid[i] =
+ convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
- char id[6];
+ char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_puts(m, "\t\tType: N/A\n");
}
+ memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
config DRM_EXYNOS_HDMI
bool "HDMI"
depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you want to use Exynos HDMI for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 35a8dfc93836..242bd50faa26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a11b79596e2f..b6a46d9a016e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
- if (!dsi->bridge_node)
- return -EINVAL;
return 0;
}
@@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
return ret;
}
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->bridge_node) {
+ bridge = of_drm_find_bridge(dsi->bridge_node);
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
+ }
return mipi_dsi_host_register(&dsi->dsi_host);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..16bbee897e0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int ret;
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret)
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
+ return 0;
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
already_disabled:
mutex_unlock(&mic_mutex);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct component_ops exynos_mic_component_ops = {
@@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret) {
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+ return ret;
+ }
+
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +480,13 @@ err:
static int exynos_mic_remove(struct platform_device *pdev)
{
+ struct exynos_mic *mic = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
+
+ drm_bridge_remove(&mic->bridge);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 06bfbe400cf1..d3b69d66736f 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
cancel_delayed_work(&hdata->hotplug_work);
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
-
- hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
return hdmi_bridge_init(hdata);
}
-static struct of_device_id hdmi_match_types[] = {
+static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
@@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_hdmi_suspend(struct device *dev)
+static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev)
return 0;
}
-static int exynos_hdmi_resume(struct device *dev)
+static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
@@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..a998a8dd783c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.atomic_check = mixer_atomic_check,
};
-static struct mixer_drv_data exynos5420_mxr_drv_data = {
+static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos5250_mxr_drv_data = {
+static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos4212_mxr_drv_data = {
+static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
-static struct mixer_drv_data exynos4210_mxr_drv_data = {
+static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
-static struct of_device_id mixer_match_types[] = {
+static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..7cb0818a13de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
- bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
- hrtimer_cancel(&irq->vblank_timer.timer);
-
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- have_enabled_pipe =
- pipe_is_enabled(vgpu, pipe);
- if (have_enabled_pipe)
- break;
+ if (pipe_is_enabled(vgpu, pipe))
+ goto out;
}
}
- if (have_enabled_pipe)
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ /* all the pipes are disabled */
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ return;
+
+out:
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..348b29a845c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!i915_gem_object_has_struct_page(obj)) {
obj->cache_dirty = false;
- return;
+ return false;
}
/* If the GPU is snooping the contents of the CPU cache,
@@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* tracking.
*/
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
- return;
+ return false;
trace_i915_gem_object_clflush(obj);
@@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
}
obj->cache_dirty = false;
+ return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
struct drm_i915_private;
struct drm_i915_gem_object;
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..e9503f6d1100 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- entry->flags |= __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma));
-
if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma);
if (unlikely(err)) {
@@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma));
+
return 0;
}
@@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
+ if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
@@ -1775,7 +1775,7 @@ out:
}
}
- return err ?: have_copy;
+ return err;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
int err;
for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+ struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct i915_vma *vma = exec_to_vma(entry);
struct drm_i915_gem_object *obj = vma->obj;
@@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->request->capture_list = capture;
}
+ if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
+ if (i915_gem_clflush_object(obj, 0))
+ entry->flags &= ~EXEC_OBJECT_ASYNC;
+ }
+
if (entry->flags & EXEC_OBJECT_ASYNC)
goto skip_flushes;
- if (unlikely(obj->cache_dirty && !obj->cache_coherent))
- i915_gem_clflush_object(obj, 0);
-
err = i915_gem_request_await_object
(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
if (err)
@@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_unlock;
err = eb_relocate(&eb);
- if (err)
+ if (err) {
/*
* If the user expects the execobject.offset and
* reloc.presumed_offset to be an exact match,
@@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* relocation.
*/
args->flags &= ~__EXEC_HAS_RELOC;
- if (err < 0)
goto err_vma;
+ }
if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..20cf272c97b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma->flags--;
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..9edeaaef77ad 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val &= ~LOADGEN_SELECT;
- if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
- ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
+ if ((rate <= 600000 && width == 4 && ln >= 1) ||
+ (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..9471c88d449e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
-static void intel_update_primary_planes(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.visible) {
- trace_intel_update_plane(&plane->base,
- to_intel_crtc(crtc));
-
- plane->update_plane(plane,
- to_intel_crtc_state(crtc->state),
- plane_state);
- }
- }
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state;
int ret;
+
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
@@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
drm_modeset_backoff(ctx);
}
-
- /* reset doesn't touch the display, but flips might get nuked anyway, */
- if (!i915.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
@@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
int ret;
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ if (!state)
+ goto unlock;
+
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
@@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
- if (!state) {
- /*
- * Flips in the rings have been nuked by the reset,
- * so update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * FIXME: Atomic will make this obsolete since we won't schedule
- * CS-based flips (which might get lost in gpu resets) any more.
- */
- intel_update_primary_planes(dev);
- } else {
- ret = __intel_display_resume(dev, state, ctx);
+ /* for testing only restore the display */
+ ret = __intel_display_resume(dev, state, ctx);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- }
} else {
/*
* The display has been reset as well,
@@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
- if (state)
- drm_atomic_state_put(state);
+ drm_atomic_state_put(state);
+unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -9117,6 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_crtc_init_scalers(crtc, pipe_config);
+
+ pipe_config->scaler_state.scaler_id = -1;
+ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+ }
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9145,13 +9128,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_crtc_init_scalers(crtc, pipe_config);
-
- pipe_config->scaler_state.scaler_id = -1;
- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -9540,7 +9516,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself.
+ * arm itself. Thus we always start the full update
+ * with a CURCNTR write.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
*
* CURCNTR and CUR_FBC_CTL are always
* armed by the CURBASE write only.
@@ -9559,6 +9544,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.cntl = cntl;
} else {
I915_WRITE_FW(CURPOS(pipe), pos);
+ I915_WRITE_FW(CURBASE(pipe), base);
}
POSTING_READ_FW(CURBASE(pipe));
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
- if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ if (IS_KABYLAKE(dev_priv))
return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..40b224b44d1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation && ddb_allocation /
- fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ else if (ddb_allocation >=
+ fixed_16_16_to_u32_round_up(plane_blocks_per_line))
selected_result = min_fixed_16_16(method1, method2);
else if (latency >= linetime_us)
selected_result = min_fixed_16_16(method1, method2);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..8cdec455cf7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->engine[RCS] = mock_engine(i915, "mock");
if (!i915->engine[RCS])
- goto err_dependencies;
+ goto err_priorities;
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..6276bb834b4f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8aca20209cb8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 147b22163f9f..dab78c660dd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- if (msg->size == 0)
- return msg->size;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8d1df5678eaa..f362c9fa8b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- struct drm_crtc *crtc;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
drm_crtc_force_disable_all(dev);
}
- /* Make sure that drm and hw vblank irqs get properly disabled. */
- drm_for_each_crtc(crtc, dev)
- drm_crtc_vblank_off(crtc);
-
/* disable flip completion events */
nvif_notify_put(&drm->flip);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e3132a2ce34d..2bc0dc985214 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &nv_connector->aux.ddc;
+ if (disp->disp->oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
nv_encoder->aux = aux;
}
/*TODO: Use DP Info Table to check for support. */
- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ if (disp->disp->oclass >= GF110_DISP) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
@@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
+ if (crtc_state->active && !asyh->state.active)
+ drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_head_flush_set(head, asyh);
interlock_core = 1;
}
- }
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc->state->event)
- drm_crtc_vblank_get(crtc);
+ if (asyh->state.active) {
+ if (!crtc_state->active)
+ drm_crtc_vblank_on(crtc);
+ if (asyh->state.event)
+ drm_crtc_vblank_get(crtc);
+ }
}
/* Update plane(s). */
@@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (crtc->state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
- drm_accurate_vblank_count(crtc);
+ if (crtc->state->active)
+ drm_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
- drm_crtc_vblank_put(crtc);
+ if (crtc->state->active)
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
unsigned proto_evo:4;
enum nvkm_ior_proto {
CRT,
+ TV,
TMDS,
LVDS,
DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
u8 type[3];
} pior;
- struct nv50_disp_chan *chan[17];
+ struct nv50_disp_chan *chan[21];
};
void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
+ case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..6d8f21290aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
+ if (!*size && !aux->func->address_only) {
+ AUX_ERR(aux, "address-only transaction dropped");
+ return -ENOSYS;
+ }
return aux->func->xfer(aux, retry, type, addr, data, size);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
#include "pad.h"
struct nvkm_i2c_aux_func {
+ bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
+int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int, u8, struct nvkm_i2c_aux **);
+
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
+int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
return 0;
}
-static int
+int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nvkm_i2c_aux_func
-g94_i2c_aux_func = {
- .xfer = g94_i2c_aux_xfer,
-};
-
int
-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
- struct nvkm_i2c_aux **paux)
+g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "aux.h"
+
+static const struct nvkm_i2c_aux_func
+gf119_i2c_aux = {
+ .address_only = true,
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
+ .address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
};
int
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..a2ab6dcdf4a2 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
if (rdev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = 1,
.num_pipe_per_mec = 4,
.num_queue_per_pipe = 8
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
+ select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+if DRM_ROCKCHIP
+
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
- depends on DRM_ROCKCHIP
- select DRM_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
- depends on DRM_ROCKCHIP
- depends on EXTCON
- select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
- depends on DRM_ROCKCHIP
- select DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_ROCKCHIP
- select DRM_MIPI_DSI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
- depends on DRM_ROCKCHIP
help
This selects support for Rockchip SoC specific extensions
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+
+endif
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
SCALER_DISPSTATX_EMPTY);
}
+static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+
+ if (crtc->state->event) {
+ unsigned long flags;
+
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+}
+
static void vc4_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
require_hvs_enabled(dev);
+ /* Enable vblank irq handling before crtc is started otherwise
+ * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
+ */
+ drm_crtc_vblank_on(crtc);
+ vc4_crtc_update_dlist(crtc);
+
/* Turn on the scaler, which will wait for vstart to start
* compositing.
*/
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
-
- /* Enable vblank irq handling after crtc is started. */
- drm_crtc_vblank_on(crtc);
}
static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (crtc->state->event) {
- unsigned long flags;
-
- crtc->state->event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
-
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- } else {
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
- }
+ /* Only update DISPLIST if the CRTC was already running and is not
+ * being disabled.
+ * vc4_crtc_enable() takes care of updating the dlist just after
+ * re-enabling VBLANK interrupts and before enabling the engine.
+ * If the CRTC is being disabled, there's no point in updating this
+ * information.
+ */
+ if (crtc->state->active && old_state->active)
+ vc4_crtc_update_dlist(crtc);
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static struct ttm_place vram_placement_flags = {
+static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place vram_ne_placement_flags = {
+static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place sys_placement_flags = {
+static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place sys_ne_placement_flags = {
+static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place gmr_placement_flags = {
+static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static struct ttm_place gmr_ne_placement_flags = {
+static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place mob_placement_flags = {
+static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
-static struct ttm_place mob_ne_placement_flags = {
+static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
-static struct ttm_place vram_gmr_placement_flags[] = {
+static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static struct ttm_place gmr_vram_placement_flags[] = {
+static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static struct ttm_place vram_gmr_ne_placement_flags[] = {
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
.busy_placement = &sys_ne_placement_flags
};
-static struct ttm_place evictable_placement_flags[] = {
+static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..86178796de6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (ret)
return ret;
- header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
- &header->handle);
+ header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+ &header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
@@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
- memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
- dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
- &header->handle);
+ dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
if (!dheader)
return -ENOMEM;
@@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
- memset(dheader, 0, sizeof(*dheader));
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
int ret;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
- if (unlikely(cres == NULL))
+ if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
+ if (!man)
return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(uctx->cotables[i] == NULL)) {
- ret = -ENOMEM;
+ if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
}
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
+ if (unlikely(!ctx)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
return ERR_PTR(ret);
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
- if (unlikely(vcotbl == NULL)) {
+ if (unlikely(!vcotbl)) {
ret = -ENOMEM;
goto out_no_alloc;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a641555b960..4436d53ae16c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
};
-static struct pci_device_id vmw_pci_id_list[] = {
+static const struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
@@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
char host_log[100] = {0};
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(dev_priv == NULL)) {
+ if (unlikely(!dev_priv)) {
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
@@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
int ret = -ENOMEM;
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
- if (unlikely(vmw_fp == NULL))
+ if (unlikely(!vmw_fp))
return ret;
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
struct vmw_master *vmaster;
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(vmaster == NULL))
+ if (unlikely(!vmaster))
return -ENOMEM;
vmw_master_init(vmaster);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..2cfb3c93f42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(node == NULL)) {
+ if (unlikely(!node)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
@@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
- if (unlikely(rel == NULL)) {
+ if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
/**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
- * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..b8bc5bc7de7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- if (unlikely(fman == NULL))
+ if (unlikely(!fman))
return NULL;
fman->dev_priv = dev_priv;
@@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL))
+ if (unlikely(!fence))
return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
return ret;
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
- if (unlikely(ufence == NULL)) {
+ if (unlikely(!ufence)) {
ret = -ENOMEM;
goto out_no_object;
}
@@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_fence_manager *fman = fman_from_fence(fence);
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL))
+ if (unlikely(!eaction))
return -ENOMEM;
eaction->event = event;
@@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
+ if (unlikely(!event)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
goto out_no_space;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..d2b03d4a3c86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
- if (unlikely(gman == NULL))
+ if (unlikely(!gman))
return -ENOMEM;
spin_lock_init(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3d94ea67a825..61e06f0e8cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
+
+ if (plane->fb) {
+ hotspot_x += plane->fb->hot_x;
+ hotspot_y += plane->fb->hot_y;
+ }
+
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
+
+ du->core_hotspot_x = hotspot_x - du->hotspot_x;
+ du->core_hotspot_y = hotspot_y - du->hotspot_y;
} else {
DRM_ERROR("Failed to update cursor image\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
if (dev_priv->has_dx) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
{
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
- if (unlikely(mob == NULL))
+ if (unlikely(!mob))
return NULL;
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
- if (reply == NULL) {
+ if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n");
return -ENOMEM;
}
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
msg_len = strlen(log) + strlen("log ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(user_bo == NULL)) {
+ if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
}
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(backup == NULL))
+ if (unlikely(!backup))
return -ENOMEM;
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
+ if (unlikely(!ushader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
}
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
- if (unlikely(shader == NULL)) {
+ if (unlikely(!shader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(buf == NULL))
+ if (unlikely(!buf))
return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 50be1f034f9e..5284e8d2f7ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
* something arbitrarily large and we will reject any layout
* that doesn't fit prim_bb_mem later
*/
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..778272514164 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
return -ENOMEM;
err = iommu_attach_device(host->domain, &pdev->dev);
- if (err)
+ if (err == -ENODEV) {
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+ goto skip_iommu;
+ } else if (err) {
goto fail_free_domain;
+ }
geometry = &host->domain->geometry;
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
host->iova_end = geometry->aperture_end;
}
+skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9017dcc14502 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
#endif
#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..c9ba4c6db74c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -824,6 +824,7 @@
#define USB_VENDOR_ID_ORTEK 0x05a4
#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..501e16a9227d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp_battery_props,
sizeof(hidpp_battery_props),
GFP_KERNEL);
+ if (!battery_props)
+ return -ENOMEM;
+
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..aff20f4b6d97 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -620,16 +620,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- set_bit(usage->type, hi->input->evbit);
-
- return -1;
-}
-
static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
{
__s32 quirks = td->mtclass.quirks;
@@ -969,8 +959,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD)
- return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+ field->application == HID_DG_TOUCHPAD) {
+ /* We own these mappings, tell hid-input to ignore them */
+ return -1;
+ }
/* let hid-core decide for the others */
return 0;
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
*
* Ortek PKB-1700
* Ortek WKB-2000
+ * iHome IMAC-A210S
* Skycable wireless presenter
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
rdesc[55] = 0x92;
} else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
rdesc[53] = 0x65;
}
return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id ortek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ }
};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..c008847e0b20 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ set_bit(HID_OPENED, &usbhid->iofl);
+
if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
return 0;
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
- if (res < 0)
+ if (res < 0) {
+ clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO;
+ }
usbhid->intf->needs_remote_wakeup = 1;
set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
- set_bit(HID_OPENED, &usbhid->iofl);
set_bit(HID_IN_POLLING, &usbhid->iofl);
res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
-
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
* with hid_start_in().
*/
spin_lock_irq(&usbhid->lock);
- clear_bit(HID_IN_POLLING, &usbhid->iofl);
clear_bit(HID_OPENED, &usbhid->iofl);
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+ clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+ return;
+
hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e9bf0bb87ac4..e57cc40cb768 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
+ /* re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
return ret;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0af7fd311979..76c34f4fde13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void)
if (ret)
return ret;
s->fan_count = tmp[0];
+ if (s->fan_count > 10)
+ s->fan_count = 10;
ret = applesmc_get_lower_bound(&s->temp_begin, "T");
if (ret)
@@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
speed = ((buffer[0] << 8 | buffer[1]) >> 2);
@@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
buffer[0] = (speed >> 6) & 0xff;
buffer[1] = (speed << 2) & 0xff;
@@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
char newkey[5];
u8 buffer[17];
- sprintf(newkey, FAN_ID_FMT, to_index(attr));
+ scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr));
ret = applesmc_read_key(newkey, buffer, 16);
buffer[16] = 0;
@@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
}
for (i = 0; i < num; i++) {
node = &grp->nodes[i];
- sprintf(node->name, grp->format, i + 1);
+ scnprintf(node->name, sizeof(node->name), grp->format,
+ i + 1);
node->sda.index = (grp->option << 16) | (i & 0xffff);
node->sda.dev_attr.show = grp->show;
node->sda.dev_attr.store = grp->store;
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
-#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0)
+#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
int T, int UT)
{
- q->setup = EZ(t->setup * 1000, T);
- q->act8b = EZ(t->act8b * 1000, T);
- q->rec8b = EZ(t->rec8b * 1000, T);
- q->cyc8b = EZ(t->cyc8b * 1000, T);
- q->active = EZ(t->active * 1000, T);
- q->recover = EZ(t->recover * 1000, T);
- q->cycle = EZ(t->cycle * 1000, T);
- q->udma = EZ(t->udma * 1000, UT);
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
}
void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..01236cef7bfb 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -268,6 +268,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
@@ -280,6 +281,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
@@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
fl4.saddr = src_ip;
fl4.flowi4_oif = addr->bound_dev_if;
rt = ip_route_output_key(addr->net, &fl4);
- if (IS_ERR(rt)) {
- ret = PTR_ERR(rt);
- goto out;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
+
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
@@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
*prt = rt;
return 0;
-out:
- return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in,
struct dst_entry *dst;
int ret;
+ if (!addr->net) {
+ pr_warn_ratelimited("%s: missing namespace\n", __func__);
+ return -EINVAL;
+ }
+
if (src_in->sa_family == AF_INET) {
struct rtable *rt = NULL;
const struct sockaddr_in *dst_in4 =
@@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = rt->dst.dev;
+ dev_hold(ndev);
+ }
ip_rt_put(rt);
} else {
@@ -539,14 +548,27 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(dst, dst_in, addr, seq);
- ndev = dst->dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = dst->dev;
+ dev_hold(ndev);
+ }
dst_release(dst);
}
- addr->bound_dev_if = ndev->ifindex;
- addr->net = dev_net(ndev);
+ if (ndev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip(dst_in, addr, NULL);
+ /*
+ * Put the loopback device and get the translated
+ * device instead.
+ */
+ dev_put(ndev);
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ addr->bound_dev_if = ndev->ifindex;
+ }
dev_put(ndev);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..0eb393237ba2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
ndev = dev_get_by_index(&init_net, bound_if_index);
- if (ndev && ndev->flags & IFF_LOOPBACK) {
- pr_info("detected loopback device\n");
- dev_put(ndev);
-
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- ndev = device->get_netdev(device, port);
- if (!ndev)
- return -ENODEV;
- }
- } else {
+ else
gid_type = IB_GID_TYPE_IB;
- }
+
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -1044,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
@@ -2569,21 +2560,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- if (ndev->flags & IFF_LOOPBACK) {
- dev_put(ndev);
- if (!id_priv->id.device->get_netdev) {
- ret = -EOPNOTSUPP;
- goto err2;
- }
-
- ndev = id_priv->id.device->get_netdev(id_priv->id.device,
- id_priv->id.port_num);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
- }
-
supported_gids = roce_gid_type_mask_support(id_priv->id.device,
id_priv->id.port_num);
gid_type = cma_route_gid_type(addr->dev_addr.network,
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..94a9eefb3cfc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+static struct workqueue_struct *gid_cache_wq;
+
enum gid_op_type {
GID_DEL = 0,
GID_ADD
@@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
- queue_work(ib_wq, &ndev_work->work);
+ queue_work(gid_cache_wq, &ndev_work->work);
return NOTIFY_DONE;
}
@@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
dev_hold(ndev);
work->gid_attr.ndev = ndev;
- queue_work(ib_wq, &work->work);
+ queue_work(gid_cache_wq, &work->work);
return NOTIFY_DONE;
}
@@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = {
int __init roce_gid_mgmt_init(void)
{
+ gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
+ if (!gid_cache_wq)
+ return -ENOMEM;
+
register_inetaddr_notifier(&nb_inetaddr);
if (IS_ENABLED(CONFIG_IPV6))
register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void)
* ib-core is removed, all physical devices have been removed,
* so no issue with remaining hardware contexts.
*/
+ destroy_workqueue(gid_cache_wq);
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..2c98533a0203 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_cq *cq;
struct ib_ucq_object *obj;
- struct ib_uverbs_event_queue *ev_queue;
int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
*/
uverbs_uobject_get(uobj);
cq = uobj->object;
- ev_queue = cq->cq_context;
obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp));
@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file,
goto out;
}
- if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+ if ((cmd->base.attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
ret = -EINVAL;
goto release_qp;
}
@@ -2005,28 +2004,13 @@ static int modify_qp(struct ib_uverbs_file *file,
rdma_ah_set_port_num(&attr->alt_ah_attr,
cmd->base.alt_dest.port_num);
- if (qp->real_qp == qp) {
- if (cmd->base.attr_mask & IB_QP_AV) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto release_qp;
- }
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- udata);
- } else {
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- NULL);
- }
+ ret = ib_modify_qp_with_udata(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ udata);
release_qp:
uobj_put_obj_read(qp);
-
out:
kfree(attr);
@@ -2103,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp cmd;
struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj;
- struct ib_qp *qp;
struct ib_uqp_object *obj;
int ret = -EINVAL;
@@ -2117,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- qp = uobj->object;
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -3019,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
{
struct ib_uverbs_ex_destroy_wq cmd = {};
struct ib_uverbs_ex_destroy_wq_resp resp = {};
- struct ib_wq *wq;
struct ib_uobject *uobj;
struct ib_uwq_object *obj;
size_t required_cmd_sz;
@@ -3053,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- wq = uobj->object;
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -3743,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq cmd;
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
- struct ib_srq *srq;
struct ib_uevent_object *obj;
int ret = -EINVAL;
- enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
@@ -3756,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject);
- srq_type = srq->srq_type;
/*
* Make sure we don't free the memory in remove_commit as we still
* needs the uobject memory to create the response.
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..fb98ed67d5bc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/*
+ * This function creates ah from the incoming packet.
+ * Incoming packet has dgid of the receiver node on which this code is
+ * getting executed and, sgid contains the GID of the sender.
+ *
+ * When resolving mac address of destination, the arrived dgid is used
+ * as sgid and, sgid is used as dgid because sgid contains destinations
+ * GID whom to respond to.
+ *
+ * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
+ * position of arguments dgid and sgid do not match the order of the
+ * parameters.
+ */
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
@@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
}
resolved_dev = dev_get_by_index(&init_net, if_index);
- if (resolved_dev->flags & IFF_LOOPBACK) {
- dev_put(resolved_dev);
- resolved_dev = idev;
- dev_hold(resolved_dev);
- }
rcu_read_lock();
if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
resolved_dev))
@@ -887,6 +895,7 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = {
.valid = 1,
.req_param = {
@@ -1268,20 +1277,36 @@ out:
}
EXPORT_SYMBOL(ib_resolve_eth_dmac);
-int ib_modify_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask)
+/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
{
+ int ret;
- if (qp_attr_mask & IB_QP_AV) {
- int ret;
-
- ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+ if (attr_mask & IB_QP_AV) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
+ return ib_security_modify_qp(qp, attr, attr_mask, udata);
+}
+EXPORT_SYMBOL(ib_modify_qp_with_udata);
- return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+int ib_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask)
+{
+ return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..85527532c49d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
+#define BNXT_RE_MAX_MR_SIZE BIT(30)
+
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
@@ -60,6 +62,13 @@
#define BNXT_RE_RQ_WQE_THRESHOLD 32
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY 16
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..f0e01b3ac711 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
- ib_attr->max_mr_size = ~0ull;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
- BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
- BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
+ ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_mr = dev_attr->max_mr;
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->atomic_cap = IB_ATOMIC_HCA;
- ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ }
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
ib_attr->max_pkeys = 1;
- ib_attr->local_ca_ack_delay = 0;
+ ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
return 0;
}
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
return -EINVAL;
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid
- (sgid_tbl,
- &sgid_tbl->tbl[ctx->idx], true);
- if (rc)
+ rc = bnxt_qplib_del_sgid(sgid_tbl,
+ &sgid_tbl->tbl[ctx->idx],
+ true);
+ if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
- ctx_tbl = sgid_tbl->ctx;
- ctx_tbl[ctx->idx] = NULL;
- kfree(ctx);
+ } else {
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ }
}
} else {
return -EINVAL;
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
- if (!mw) {
+ if (IS_ERR(mw)) {
dev_err(rdev_to_dev(rdev),
"Failed to create fence-MW for PD: %p\n", pd);
- rc = -EINVAL;
+ rc = PTR_ERR(mw);
goto fail;
}
fence->mw = mw;
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
int rc;
bnxt_re_destroy_fence_mr(pd);
- if (ib_pd->uobject && pd->dpi.dbr) {
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
- struct bnxt_re_ucontext *ucntx;
- /* Free DPI only if this is the first PD allocated by the
- * application and mark the context dpi as NULL
- */
- ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
-
- rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl,
- &pd->dpi);
+ if (pd->qplib_pd.id) {
+ rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
- /* Don't fail, continue*/
- ucntx->dpi = NULL;
- }
-
- rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
- &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
- return rc;
+ dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
}
kfree(pd);
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (udata) {
struct bnxt_re_pd_resp resp;
- if (!ucntx->dpi) {
+ if (!ucntx->dpi.dbr) {
/* Allocate DPI in alloc_pd to avoid failing of
* ibv_devinfo and family of application when DPIs
* are depleted.
*/
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
- &pd->dpi, ucntx)) {
+ &ucntx->dpi, ucntx)) {
rc = -ENOMEM;
goto dbfail;
}
- ucntx->dpi = &pd->dpi;
}
resp.pdid = pd->qplib_pd.id;
/* Still allow mapping this DBR to the new user PD. */
- resp.dpi = ucntx->dpi->dpi;
- resp.dbr = (u64)ucntx->dpi->umdbr;
+ resp.dpi = ucntx->dpi.dpi;
+ resp.dbr = (u64)ucntx->dpi.umdbr;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->rq.nmap = umem->nmap;
}
- qplib_qp->dpi = cntx->dpi;
+ qplib_qp->dpi = &cntx->dpi;
return 0;
rqfail:
ib_umem_release(qp->sumem);
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
- qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
+ /* Cap the max_rd_atomic to device max */
+ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
}
if (qp_attr_mask & IB_QP_SQ_PSN) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
qp->qplib_qp.sq.psn = qp_attr->sq_psn;
}
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (qp_attr->max_dest_rd_atomic >
+ dev_attr->max_qp_init_rd_atom) {
+ dev_err(rdev_to_dev(rdev),
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
+ return -EINVAL;
+ }
+
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
}
cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.nmap = cq->umem->nmap;
- cq->qplib_cq.dpi = uctx->dpi;
+ cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
budget = min_t(u32, num_entries, cq->max_cql);
+ num_entries = budget;
if (!cq->cql) {
dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
goto exit;
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
else if (ib_cqn_flags & IB_CQ_SOLICITED)
type = DBR_DBR_TYPE_CQ_ARMSE;
+ /* Poll to see if there are missed events */
+ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
+ return 1;
+
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
return 0;
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct scatterlist *sg;
int entry;
+ if (length > BNXT_RE_MAX_MR_SIZE) {
+ dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
+ return ERR_PTR(-ENOMEM);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext,
ib_uctx);
+
+ struct bnxt_re_dev *rdev = uctx->rdev;
+ int rc = 0;
+
if (uctx->shpg)
free_page((unsigned long)uctx->shpg);
+
+ if (uctx->dpi.dbr) {
+ /* Free DPI only if this is the first PD allocated by the
+ * application and mark the context dpi as NULL
+ */
+ rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl,
+ &uctx->dpi);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
+ /* Don't fail, continue*/
+ uctx->dpi.dbr = NULL;
+ }
+
kfree(uctx);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
- struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
};
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx;
- struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_dpi dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
};
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..ceae2d92fb08 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..9af1514e5944 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
/* Each SGE entry = 1 WQE size16 */
wqe_size16 = wqe->num_sge;
+ /* HW requires wqe size has room for atleast one SGE even if
+ * none was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ wqe_size16++;
}
/* Specifics */
@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rqe->flags = wqe->flags;
rqe->wqe_size = wqe->num_sge +
((offsetof(typeof(*rqe), data) + 15) >> 4);
+ /* HW requires wqe size has room for atleast one SGE even if none
+ * was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ rqe->wqe_size++;
/* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe->wr_id[0] = cpu_to_le32(sw_prod);
@@ -1885,6 +1895,25 @@ flush_rq:
return rc;
}
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+ struct cq_base *hw_cqe, **hw_cqe_ptr;
+ unsigned long flags;
+ u32 sw_cons, raw_cons;
+ bool rc = true;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ raw_cons = cq->hwq.cons;
+ sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+ hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+ hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+
+ /* Check for Valid bit. If the CQE is valid, return false */
+ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+ return rc;
+}
+
static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
struct cq_res_raweth_qp1 *hwcqe,
struct bnxt_qplib_cqe **pcqe,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..19176e06c98a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..ef91ab786dd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 } };
/* Device */
+
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ int rc;
+ u16 pcie_ctl2;
+
+ rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
+ &pcie_ctl2);
+ if (rc)
+ return false;
+ return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
{
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
+ /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..2ce7e2a32cf0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
#define BNXT_QPLIB_RESERVED_QP_WRS 128
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
+
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
u32 max_inline_data;
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
};
struct bnxt_qplib_pd {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..0cd0c1fa27d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
- int ret = 0;
+ int ret = -ENOMEM;
if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
goto err;
mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages) {
- ret = -ENOMEM;
+ if (!mhp->pages)
goto pl_err;
- }
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (ret)
goto err3;
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err3;
if (ucontext) {
+ ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
- if (wr->num_sge) {
+ if (wr->num_sge && wr->sg_list[0].length) {
wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
>> 32));
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
/* clear from the handled mask of the general interrupt */
m = isrc / 64;
n = isrc % 64;
- dd->gi_mask[m] &= ~((u64)1 << n);
+ if (likely(m < CCE_NUM_INT_CSRS)) {
+ dd->gi_mask[m] &= ~((u64)1 << n);
+ } else {
+ dd_dev_err(dd, "remap interrupt err\n");
+ return;
+ }
/* direct the chip source to the given MSI-X interrupt */
m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->pid);
}
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp)
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv;
- priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
rdi->dparms.node);
if (!priv->s_ahg) {
kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
/*
* Functions provided by hfi1 driver for rdmavt to use
*/
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
unsigned free_all_qps(struct rvt_dev_info *rdi);
void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..23fad6d96944 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_READ:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_SEND:
case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
union ib_gid dgid;
u64 subnet_prefix;
int attr_mask = 0;
- int i;
+ int i, j;
int ret;
+ u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
+ u8 port = 0;
u8 sl;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.rnr_retry = 7;
attr.timeout = 0x12;
attr.path_mtu = IB_MTU_256;
+ attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
rdma_ah_set_static_rate(&attr.ah_attr, 3);
subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+ (i % HNS_ROCE_MAX_PORTS);
+ sl = i / HNS_ROCE_MAX_PORTS;
+
+ for (j = 0; j < caps->num_ports; j++) {
+ if (hr_dev->iboe.phy_port[j] == phy_port) {
+ queue_en[i] = 1;
+ port = j;
+ break;
+ }
+ }
+
+ if (!queue_en[i])
+ continue;
+
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (IS_ERR(free_mr->mr_free_qp[i])) {
dev_err(dev, "Create loop qp failed!\n");
@@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
}
hr_qp = free_mr->mr_free_qp[i];
- sl = i / caps->num_ports;
-
- if (caps->num_ports == HNS_ROCE_MAX_PORTS)
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % caps->num_ports);
- else
- phy_port = i % caps->num_ports;
-
- hr_qp->port = phy_port + 1;
+ hr_qp->port = port;
hr_qp->phy_port = phy_port;
hr_qp->ibqp.qp_type = IB_QPT_RC;
hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
hr_qp->ibqp.recv_cq = cq;
hr_qp->ibqp.send_cq = cq;
- rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1);
- rdma_ah_set_sl(&attr.ah_attr, phy_port + 1);
- attr.port_num = phy_port + 1;
+ rdma_ah_set_port_num(&attr.ah_attr, port + 1);
+ rdma_ah_set_sl(&attr.ah_attr, sl);
+ attr.port_num = port + 1;
attr.dest_qp_num = hr_qp->qpn;
memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[phy_port],
+ hr_dev->dev_addr[port],
MAC_ADDR_OCTET_NUM);
memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3);
+ memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
+ memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
dgid.raw[11] = 0xff;
dgid.raw[12] = 0xfe;
dgid.raw[8] ^= 2;
rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
- attr_mask |= IB_QP_PORT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+
ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
int i;
int ret;
- int ne;
+ int ne = 0;
mr_work = container_of(work, struct hns_roce_mr_free_work, work);
hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+ ne++;
+
ret = hns_roce_v1_send_lp_wqe(hr_qp);
if (ret) {
dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
}
}
- ne = HNS_ROCE_V1_RESV_QP;
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
goto free_work;
}
ne -= ret;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
+ (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
} while (ne && time_before_eq(jiffies, end));
if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
- } else {
+ } else {
/* RQ conrespond to CQE */
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
old_cnt = roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ if (cur_cnt - old_cnt >
+ SDB_ST_CMP_VAL) {
success_flags = 1;
- else {
- send_ptr = roce_get_field(old_send,
+ } else {
+ send_ptr =
+ roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
struct hns_roce_dev *hr_dev;
struct hns_roce_qp *hr_qp;
struct device *dev;
+ unsigned long qpn;
int ret;
qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
hr_qp = qp_work_entry->qp;
+ qpn = hr_qp->qpn;
- dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
qp_work_entry->sche_cnt++;
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
&qp_work_entry->db_wait_stage);
if (ret) {
dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
- hr_qp->qpn);
+ qpn);
return;
}
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
IB_QPS_RESET);
if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
return;
}
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
/* RC QP, release QPN */
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ hns_roce_release_range_qp(hr_dev, qpn, 1);
kfree(hr_qp);
} else
kfree(hr_to_hr_sqp(hr_qp));
kfree(qp_work_entry);
- dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
}
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
- spin_lock_bh(&hr_dev->iboe.lock);
-
switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
break;
}
- spin_unlock_bh(&hr_dev->iboe.lock);
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..5a2fa743676c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
(last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
+ iwdev->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
if (!iwqp->flush_issued) {
@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
attr.qp_state = IB_QPS_ERR;
i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->reset)
+ i40iw_cm_disconn(cm_node->iwqp);
i40iw_rem_ref_cm_node(cm_node);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..9ec1ae9a82c9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
}
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+
return ret_code;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..ae8463ff59a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
if (free_hwcqp)
dev->cqp_ops->cqp_destroy(dev->cqp);
+ i40iw_cleanup_pending_cqp_op(iwdev);
+
i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array);
iwdev->cqp.scratch_array = NULL;
@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
/**
* i40iw_destroy_aeq - destroy aeq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue a destroy aeq request and
* free the resources associated with the aeq
* The function is called during driver unload
*/
-static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
{
enum i40iw_status_code status = I40IW_ERR_NOT_READY;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
if (!iwdev->msix_shared)
i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
- if (reset)
+ if (iwdev->reset)
goto exit;
if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +305,17 @@ exit:
* i40iw_destroy_ceq - destroy ceq
* @iwdev: iwarp device
* @iwceq: ceq to be destroyed
- * @reset: true if called before reset
*
* Issue a destroy ceq request and
* free the resources associated with the ceq
*/
static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq,
- bool reset)
+ struct i40iw_ceq *iwceq)
{
enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- if (reset)
+ if (iwdev->reset)
goto exit;
status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +334,11 @@ exit:
/**
* i40iw_dele_ceqs - destroy all ceq's
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Go through all of the device ceq's and for each ceq
* disable the ceq interrupt and destroy the ceq
*/
-static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
{
u32 i = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
if (iwdev->msix_shared) {
i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
iwceq++;
i++;
}
for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
}
}
/**
* i40iw_destroy_ccq - destroy control cq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue destroy ccq request and
* free the resources associated with the ccq
*/
-static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_ccq *ccq = &iwdev->ccq;
enum i40iw_status_code status = 0;
- if (!reset)
+ if (!iwdev->reset)
status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
if (status)
i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
iwceq->msix_idx = msix_vec->idx;
status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
if (status) {
- i40iw_destroy_ceq(iwdev, iwceq, false);
+ i40iw_destroy_ceq(iwdev, iwceq);
break;
}
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
status = i40iw_configure_aeq_vector(iwdev);
if (status) {
- i40iw_destroy_aeq(iwdev, false);
+ i40iw_destroy_aeq(iwdev);
return status;
}
@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
/**
* i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects
*/
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+static void i40iw_deinit_device(struct i40iw_device *iwdev)
{
struct i40e_info *ldev = iwdev->ldev;
@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
i40iw_destroy_rdma_device(iwdev->iwibdev);
/* fallthrough */
case IP_ADDR_REGISTERED:
- if (!reset)
+ if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
case INET_NOTIFIER:
@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/* fallthrough */
+ case PBLE_CHUNK_MEM:
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
case CEQ_CREATED:
- i40iw_dele_ceqs(iwdev, reset);
+ i40iw_dele_ceqs(iwdev);
/* fallthrough */
case AEQ_CREATED:
- i40iw_destroy_aeq(iwdev, reset);
+ i40iw_destroy_aeq(iwdev);
/* fallthrough */
case IEQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
/* fallthrough */
case ILQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
/* fallthrough */
case CCQ_CREATED:
- i40iw_destroy_ccq(iwdev, reset);
- /* fallthrough */
- case PBLE_CHUNK_MEM:
- i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ i40iw_destroy_ccq(iwdev);
/* fallthrough */
case HMC_OBJS_CREATED:
- i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
/* fallthrough */
case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
break;
+ iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
} while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev, false);
+ i40iw_deinit_device(iwdev);
return -ERESTART;
}
@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
iwdev = &hdl->device;
iwdev->closing = true;
+ if (reset)
+ iwdev->reset = true;
+
i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev, reset);
+ i40iw_deinit_device(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..71050c5d29a0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 0, info->paddr);
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
+
+ /* Ensure all data is written before writing valid bit */
+ wmb();
set_64bit_val(wqe, 24, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
if (!list_empty(rxlist)) {
tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
- plist = &tmpbuf->list;
while ((struct list_head *)tmpbuf != rxlist) {
if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
break;
+ plist = &tmpbuf->list;
tmpbuf = (struct i40iw_puda_buf *)plist->next;
}
/* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..e311ec559f4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
*/
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
unsigned long flags;
if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
spin_unlock_irqrestore(&cqp->req_lock, flags);
}
+ wake_up(&iwdev->close_wq);
}
/**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
}
/**
+ * i40iw_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
+ struct i40iw_cqp_request *cqp_request)
+{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
+
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ }
+ i40iw_put_cqp_request(cqp, cqp_request);
+ wait_event_timeout(iwdev->close_wq,
+ !atomic_read(&cqp_request->refcount),
+ 1000);
+}
+
+/**
+ * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
+ * @iwdev: iwarp device
+ */
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request = NULL;
+ struct cqp_commands_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
+ wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+ cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+/**
* i40iw_free_qp - callback after destroy cqp completes
* @cqp_request: cqp request for destroy qp
* @num: not used
@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.qp_destroy.remove_hash_idx = true;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Destroy QP fail");
+ if (!status)
+ return;
+
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_rem_devusecount(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..02d871db7ca5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num)
{
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
struct i40iw_qp_init_info *init_info)
{
- struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
ucontext = to_ucontext(ibpd->uobject->context);
if (req.user_wqe_buffers) {
+ struct i40iw_pbl *iwpbl;
+
spin_lock_irqsave(
&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = i40iw_get_pbl(
+ iwpbl = i40iw_get_pbl(
(unsigned long)req.user_wqe_buffers,
&ucontext->qp_reg_mem_list);
spin_unlock_irqrestore(
&ucontext->qp_reg_mem_list_lock, flags);
- if (!iwqp->iwpbl) {
+ if (!iwpbl) {
err_code = -ENODATA;
i40iw_pr_err("no pbl info\n");
goto error;
}
+ memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
}
}
err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
memset(&req, 0, sizeof(req));
iwcq->user_mode = true;
ucontext = to_ucontext(context);
- if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
+ err_code = -EFAULT;
goto cq_free_resources;
+ }
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext);
}
- if (iwpbl->pbl_allocated)
+ if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
kfree(iwmr);
return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer;
- struct i40iw_pbl *iwpbl;
+ struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
sl_cm_id = get_local_comm_id(mad);
+ id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+ if (id)
+ goto cont;
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
return -EINVAL;
}
+cont:
set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
if (err)
goto err_mtt;
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* call to mlx4_ib_vma_close.
*/
put_task_struct(owning_process);
- msleep(1);
+ usleep_range(1000, 2000);
owning_process = get_pid_task(ibcontext->tgid,
PIDTYPE_PID);
if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
if (!count)
break;
- msleep(1);
+ usleep_range(1000, 2000);
} while (time_after(end, jiffies));
flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
- MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
- gfp_t gfp)
+ struct ib_udata *udata, int sqpn,
+ struct mlx4_ib_qp **caller_qp)
{
int qpn;
int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
(qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
+ sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
+ qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
- &qp->buf, gfp)) {
+ &qp->buf)) {
memcpy(&init_attr->cap, &backup_cap,
sizeof(backup_cap));
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_db;
if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf, gfp)) {
+ PAGE_SIZE * 2, &qp->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
if (err)
goto err_mtt;
qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->sq.wrid)
qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->rq.wrid)
qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
int err;
int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
- gfp_t gfp;
- gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
- GFP_NOIO : GFP_KERNEL;
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
- MLX4_IB_QP_CREATE_ROCE_V2_GSI |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO))
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO |
MLX4_IB_QP_CREATE_ROCE_V2_GSI |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, gfp);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_UD:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp, gfp);
+ udata, 0, &qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
}
err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
- sqpn,
- &qp, gfp);
+ sqpn, &qp);
if (err)
return ERR_PTR(err);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+ &srq->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
if (err)
goto err_mtt;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..2c40a2e989d2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
}
+static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root)
+ return;
+
+ debugfs_remove_recursive(dev->cache.root);
+ dev->cache.root = NULL;
+}
+
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
sprintf(ent->name, "%d", ent->order);
ent->dir = debugfs_create_dir(ent->name, cache->root);
if (!ent->dir)
- return -ENOMEM;
+ goto err;
ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
&size_fops);
if (!ent->fsize)
- return -ENOMEM;
+ goto err;
ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
&limit_fops);
if (!ent->flimit)
- return -ENOMEM;
+ goto err;
ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
&ent->cur);
if (!ent->fcur)
- return -ENOMEM;
+ goto err;
ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
&ent->miss);
if (!ent->fmiss)
- return -ENOMEM;
+ goto err;
}
return 0;
-}
-
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
-{
- if (!mlx5_debugfs_root)
- return;
+err:
+ mlx5_mr_cache_debugfs_cleanup(dev);
- debugfs_remove_recursive(dev->cache.root);
+ return -ENOMEM;
}
static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
if (err)
mlx5_ib_warn(dev, "cache debugfs failure\n");
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+
return 0;
}
@@ -825,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) {
- mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err;
}
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
- if (unlikely(i > mr->max_descs))
+ if (unlikely(i >= mr->max_descs))
break;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
|| (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
int_cnt++;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (int_cnt > 1) {
spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
break;
}
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
if (is_uctx_pd) {
ocrdma_release_ucontext_pd(uctx);
} else {
- status = _ocrdma_dealloc_pd(dev, pd);
+ if (_ocrdma_dealloc_pd(dev, pd))
+ pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
}
exit:
return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto err;
if (udata == NULL) {
+ status = -ENOMEM;
srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
GFP_KERNEL);
if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..2ae71b8f1ba8 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+ size_t len)
+{
+ size_t min_len = min_t(size_t, len, udata->outlen);
+
+ return ib_copy_to_udata(udata, src, min_len);
+}
+
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
uresp.max_cqes = QEDR_MAX_CQES;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
goto err;
@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
uresp.pd_id = pd_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
uresp.icid = cq->icid;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
};
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
- gfp_t gfp)
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp)
+ enum ib_qp_type type, u8 port)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
return ib_mtu_enum_to_int(pmtu);
}
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct qib_qp_priv *priv;
- priv = kzalloc(sizeof(*priv), gfp);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
if (!priv->s_hdr) {
kfree(priv);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
* Functions provided by qib driver for rdmavt to use
*/
unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_notify_qp_reset(struct rvt_qp *qp);
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp);
+ enum ib_qp_type type, u8 port);
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..8876ee7bc326 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
EXPORT_SYMBOL(ib_rvt_state_ops);
static void get_map_page(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map,
- gfp_t gfp)
+ struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
if (!map->page) {
- get_map_page(qpt, map, GFP_KERNEL);
+ get_map_page(qpt, map);
if (!map->page) {
ret = -ENOMEM;
break;
@@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp)
+ enum ib_qp_type type, u8 port_num)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
if (rdi->driver_f.alloc_qpn)
- return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_qp *ret = ERR_PTR(-ENOMEM);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
- gfp_t gfp;
size_t sqsize;
if (!rdi)
@@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ init_attr->create_flags)
return ERR_PTR(-EINVAL);
- /* GFP_NOIO is applicable to RC QP's only */
-
- if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
- init_attr->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
-
- gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
- GFP_NOIO : GFP_KERNEL;
-
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sz = sizeof(struct rvt_sge) *
init_attr->cap.max_send_sge +
sizeof(struct rvt_swqe);
- if (gfp == GFP_NOIO)
- swq = __vmalloc(
- sqsize * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
- else
- swq = vzalloc_node(
- sqsize * sz,
- rdi->dparms.node);
+ swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
if (!swq)
return ERR_PTR(-ENOMEM);
@@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1);
- qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+ qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
+ rdi->dparms.node);
if (!qp)
goto bail_swq;
@@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
kzalloc_node(
sizeof(*qp->s_ack_queue) *
rvt_max_atomic(rdi),
- gfp,
+ GFP_KERNEL,
rdi->dparms.node);
if (!qp->s_ack_queue)
goto bail_qp;
@@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
* Driver needs to set up it's private QP structure and do any
* initialization that is needed.
*/
- priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
if (IS_ERR(priv)) {
ret = priv;
goto bail_qp;
@@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = vmalloc_user(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz);
- else if (gfp == GFP_NOIO)
- qp->r_rq.wq = __vmalloc(
- sizeof(struct rvt_rwq) +
- qp->r_rq.size * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
else
qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
@@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num, gfp);
+ init_attr->port_num);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
@@ -1280,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
+ qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
}
if (attr_mask & IB_QP_QKEY)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..08f3f90d2912 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
+
+ rxe_drop_ref(qp);
}
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
+ rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..a958ee918a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
kfree_skb(skb);
}
+ if (notify)
+ return;
+
while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
advance_consumer(qp->rq.queue);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..af90a7d42b96 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
+ if (qp->resp.state == QP_STATE_ERROR)
+ rxe_run_task(&qp->resp.task, 1);
+
err1:
return err;
}
@@ -1240,6 +1243,8 @@ int rxe_register_device(struct rxe_dev *rxe)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dma_coerce_mask_and_coherent(&dev->dev,
+ dma_get_required_mask(dev->dev.parent));
dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..f87d104837dc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "ipoib.h"
@@ -954,7 +955,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
break;
}
spin_unlock_irq(&priv->lock);
- msleep(1);
+ usleep_range(1000, 2000);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
@@ -1047,9 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx,
- .create_flags = IB_QP_CREATE_USE_GFP_NOIO
+ .create_flags = 0
};
-
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
@@ -1057,10 +1057,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
- if (PTR_ERR(tx_qp) == -EINVAL) {
- attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
- tx_qp = ib_create_qp(priv->pd, &attr);
- }
tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
@@ -1131,10 +1127,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
int ret;
- p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
- GFP_NOIO, PAGE_KERNEL);
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
ret = -ENOMEM;
goto err_tx;
@@ -1142,9 +1139,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
- ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
goto err_qp;
}
@@ -1206,7 +1204,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
goto timeout;
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..57a9655e844d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -770,7 +770,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_drain_cq(dev);
- msleep(1);
+ usleep_range(1000, 2000);
}
ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..4ce315c92b48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
/* dev->mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
ipoib_dbg(priv, "MTU must be smaller than the underlying "
"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
- dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
- return 0;
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ dev->mtu = new_mtu;
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
}
/* Called with an RCU read lock taken */
@@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -2212,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed;
}
+ result = -ENOMEM;
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..37b33d708c2d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
@@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
struct iser_conn *iser_conn;
struct ib_device *ib_dev;
+ mutex_lock(&unbind_iser_conn_mutex);
+
session = starget_to_session(scsi_target(sdev))->dd_data;
iser_conn = session->leadconn->dd_data;
+ if (!iser_conn) {
+ mutex_unlock(&unbind_iser_conn_mutex);
+ return -ENOTCONN;
+ }
ib_dev = iser_conn->ib_conn.device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ mutex_unlock(&unbind_iser_conn_mutex);
+
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(mem_reg->rkey);
- hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ if (buf_out->data_len > imm_sz) {
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ }
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..26a004e97ae0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned short sg_tablesize, sup_sg_tablesize;
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->ib_device->attrs.max_fast_reg_page_list_len);
+ if (device->ib_device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)
+ sup_sg_tablesize =
+ min_t(
+ uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ device->ib_device->attrs.max_fast_reg_page_list_len);
+ else
+ sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..3aae015469a5 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
static int __init digicolor_of_init(struct device_node *node,
struct device_node *parent)
{
- static void __iomem *reg_base;
+ void __iomem *reg_base;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct regmap *ucregs;
int ret;
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
static int __init
realview_gic_of_init(struct device_node *node, struct device_node *parent)
{
- static struct regmap *map;
+ struct regmap *map;
struct device_node *np;
const struct of_device_id *gic_id;
u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- static struct irq_chip *chip;
+ struct irq_chip *chip;
if (hw < 2 && cpu_has_mipsmt) {
/* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..6ab1d3afec02 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
- gic_irq_domain->name = "mips-gic-irq";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
- gic_ipi_domain->name = "mips-gic-ipi";
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
if (node &&
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
- sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
- cs->akt_state,
- cs->divert_id,
- divert_if.drv_to_name(cs->ics.driver),
- (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
- cs->ics.parm.setup.phone,
- cs->ics.parm.setup.eazmsn,
- cs->ics.parm.setup.si1,
- cs->ics.parm.setup.si2,
- cs->ics.parm.setup.screen,
- dv->rule.waittime,
- cs->deflect_dest);
+ snprintf(cs->info, sizeof(cs->info),
+ "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
+ cs->akt_state,
+ cs->divert_id,
+ divert_if.drv_to_name(cs->ics.driver),
+ (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
+ cs->ics.parm.setup.phone,
+ cs->ics.parm.setup.eazmsn,
+ cs->ics.parm.setup.si1,
+ cs->ics.parm.setup.si2,
+ cs->ics.parm.setup.screen,
+ dv->rule.waittime,
+ cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
(dv->rule.action == DEFLECT_REJECT)) {
put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
static bool suppress_pollack;
-static struct pci_device_id c4_pci_tbl[] = {
+static const struct pci_device_id c4_pci_tbl[] = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
{ } /* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
/*
This table should be sorted by PCI device ID
*/
-static struct pci_device_id divas_pci_tbl[] = {
+static const struct pci_device_id divas_pci_tbl[] = {
/* Diva Server BRI-2M PCI 0xE010 */
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
pr_info("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] = {
+static const struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
{},
};
-static struct pci_device_id hfc_ids[] =
+static const struct pci_device_id hfc_ids[] =
{
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
(unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] = {
+static const struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
pr_notice("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id w6692_ids[] = {
+static const struct pci_device_id w6692_ids[] = {
{ PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
-static struct pci_device_id hisax_pci_tbl[] __used = {
+static const struct pci_device_id hisax_pci_tbl[] __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
char *device_name;
} hfc4s8s_param;
-static struct pci_device_id hfc4s8s_ids[] = {
+static const struct pci_device_id hfc4s8s_ids[] = {
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_4S,
.subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ .vendor = PCI_VENDOR_ID_AVM,
.device = PCI_DEVICE_ID_AVM_A1,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter)
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
* filled with data from the cache). If part of the data resides on the
* media, we will read later on
*/
- if (unlikely(!bio->bi_iter.bi_idx))
+ if (unlikely(!advanced_bio))
bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
- int bio_iter)
+ int bio_iter, bool advanced_bio)
{
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
+ bio_iter, advanced_bio);
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
- int advanced_bio = 0;
+ bool advanced_bio = false;
int i, j = 0;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
- continue;
+
+ if (unlikely(!advanced_bio)) {
+ bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
+ advanced_bio = true;
+ }
+
+ goto next;
}
/* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, p, i,
+ advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
- advanced_bio = 1;
+ advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
@@ -83,6 +91,7 @@ retry:
rqd->ppa_list[j++] = p;
}
+next:
if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
@@ -282,7 +291,7 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter);
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..40f3cd7eab0f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
rdev_for_each(rdev, mddev) {
if (! test_bit(In_sync, &rdev->flags)
- || test_bit(Faulty, &rdev->flags))
+ || test_bit(Faulty, &rdev->flags)
+ || test_bit(Bitmap_sync, &rdev->flags))
continue;
target = offset + index * (PAGE_SIZE/512);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
*/
int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{
- blk_status_t a;
- int f;
+ int a, f;
unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
if (likely(ic->mode == 'J')) {
if (dio->write) {
unsigned next_entry, i, pos;
- unsigned ws, we;
+ unsigned ws, we, range_sectors;
- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
+ dio->range.n_sectors = min(dio->range.n_sectors,
+ ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors))
goto sleep;
- ic->free_sectors -= dio->range.n_sectors;
+ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
+ ic->free_sectors -= range_sectors;
journal_section = ic->free_section;
journal_entry = ic->free_section_entry;
- next_entry = ic->free_section_entry + dio->range.n_sectors;
+ next_entry = ic->free_section_entry + range_sectors;
ic->free_section_entry = next_entry % ic->journal_section_entries;
ic->free_section += next_entry / ic->journal_section_entries;
ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
+ WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
}
static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
{
unsigned i, j, n;
struct journal_completion comp;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
comp.ic = ic;
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
dm_bufio_write_dirty_buffers_async(ic->bufio);
+ blk_finish_plug(&plug);
+
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is 0";
+ goto bad;
+ }
/* make sure that ti->max_io_len doesn't overflow */
if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
+#define RT_FLAG_RS_SUSPENDED 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
if (__raid10_near_copies(layout) > 1)
return "near";
- WARN_ON(__raid10_far_copies(layout) < 2);
+ if (__raid10_far_copies(layout) > 1)
+ return "far";
- return "far";
+ return "unknown";
}
/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (!freshest)
return 0;
- if (validate_raid_redundancy(rs)) {
- rs->ti->error = "Insufficient redundancy to activate array";
- return -EINVAL;
- }
-
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (super_validate(rs, freshest))
return -EINVAL;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
mddev_suspend(&rs->md);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!rs->md.suspended)
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_suspend(&rs->md);
rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
return r;
/* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
/*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
}
/* Suspend because a resume will happen in raid_resume() */
- if (!mddev->suspended)
- mddev_suspend(mddev);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
+ mddev_suspend(mddev);
/*
* Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 11, 1},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
return false;
}
+static int device_dax_write_cache_enabled(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct dax_device *dax_dev = dev->dax_dev;
+
+ if (!dax_dev)
+ return false;
+
+ if (dax_write_cache_enabled(dax_dev))
+ return true;
+ return false;
+}
+
+static int dm_table_supports_dax_write_cache(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti,
+ device_dax_write_cache_enabled, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax_write_cache(t))
+ dax_write_cache(t->md->dax_dev, true);
+
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned n;
- if (!fio->rs) {
- fio->rs = mempool_alloc(v->fec->rs_pool, 0);
- if (unlikely(!fio->rs)) {
- DMERR("failed to allocate RS");
- return -ENOMEM;
- }
- }
+ if (!fio->rs)
+ fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
}
fio->nbufs = n;
- if (!fio->output) {
+ if (!fio->output)
fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
- if (!fio->output) {
- DMERR("failed to allocate FEC page");
- return -ENOMEM;
- }
- }
-
return 0;
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
goto out;
}
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
}
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz, GFP_KERNEL);
+ &blkz, &nr_blkz, GFP_NOIO);
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
ret = blkdev_reset_zones(dev->bdev,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_KERNEL);
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
nr_blocks = block - wp_block;
ret = blkdev_issue_zeroout(zrc->dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
- dmz_blk2sect(nr_blocks), GFP_NOFS, false);
+ dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
dmz_dev_err(zrc->dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
int ret;
/* Create a new chunk work */
- cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw)
goto out;
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = dev->bdev;
- if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE))
+ if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
/* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
- if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) {
+ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
spin_lock(&dmz->flush_lock);
bio_list_add(&dmz->flush_list, bio);
spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..c99634612fc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
+ WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
Bitmap_sync, /* ..actually, not quite In_sync. Need a
- * bitmap-based recovery to get fully in sync
+ * bitmap-based recovery to get fully in sync.
+ * The bit is only meaningful before device
+ * has been passed to pers->hot_add_disk.
*/
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
@@ -729,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
-
-/* Maximum size of each resync request */
-#define RESYNC_BLOCK_SIZE (64*1024)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-
-/* for managing resync I/O pages */
-struct resync_pages {
- unsigned idx; /* for get/put page from the pool */
- void *raid_bio;
- struct page *pages[RESYNC_PAGES];
-};
-
-static inline int resync_alloc_pages(struct resync_pages *rp,
- gfp_t gfp_flags)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++) {
- rp->pages[i] = alloc_page(gfp_flags);
- if (!rp->pages[i])
- goto out_free;
- }
-
- return 0;
-
-out_free:
- while (--i >= 0)
- put_page(rp->pages[i]);
- return -ENOMEM;
-}
-
-static inline void resync_free_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- put_page(rp->pages[i]);
-}
-
-static inline void resync_get_all_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- get_page(rp->pages[i]);
-}
-
-static inline struct page *resync_fetch_page(struct resync_pages *rp,
- unsigned idx)
-{
- if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
- return NULL;
- return rp->pages[idx];
-}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
+/* Maximum size of each resync request */
+#define RESYNC_BLOCK_SIZE (64*1024)
+#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+
+/* for managing resync I/O pages */
+struct resync_pages {
+ void *raid_bio;
+ struct page *pages[RESYNC_PAGES];
+};
+
+static inline int resync_alloc_pages(struct resync_pages *rp,
+ gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++) {
+ rp->pages[i] = alloc_page(gfp_flags);
+ if (!rp->pages[i])
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0)
+ put_page(rp->pages[i]);
+ return -ENOMEM;
+}
+
+static inline void resync_free_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ put_page(rp->pages[i]);
+}
+
+static inline void resync_get_all_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ get_page(rp->pages[i]);
+}
+
+static inline struct page *resync_fetch_page(struct resync_pages *rp,
+ unsigned idx)
+{
+ if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
+ return NULL;
+ return rp->pages[idx];
+}
+
+/*
+ * 'strct resync_pages' stores actual pages used for doing the resync
+ * IO, and it is per-bio, so make .bi_private points to it.
+ */
+static inline struct resync_pages *get_resync_pages(struct bio *bio)
+{
+ return bio->bi_private;
+}
+
+/* generally called after bio_reset() for reseting bvec */
+static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ int size)
+{
+ int idx = 0;
+
+ /* initialize bvec table again */
+ do {
+ struct page *page = resync_fetch_page(rp, idx);
+ int len = min_t(int, size, PAGE_SIZE);
+
+ /*
+ * won't fail because the vec table is big
+ * enough to hold all these pages
+ */
+ bio_add_page(bio, page, len, 0);
+ size -= len;
+ } while (idx++ < RESYNC_PAGES && size > 0);
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r1_bio;
bio->bi_private = rp;
}
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- /* we release behind master bio when all write are done */
- if (r1_bio->behind_master_bio == bio)
- to_put = NULL;
-
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
struct bio *bio)
{
int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
if (!behind_bio)
- goto fail;
+ return;
/* discard op, we don't support writezero/writesame yet */
- if (!bio_has_data(bio))
+ if (!bio_has_data(bio)) {
+ behind_bio->bi_iter.bi_size = size;
goto skip_copy;
+ }
while (i < vcnt && size) {
struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
r1_bio->behind_master_bio = behind_bio;;
set_bit(R1BIO_BehindIO, &r1_bio->state);
- return behind_bio;
+ return;
free_pages:
pr_debug("%dB behind alloc failed, doing sync I/O\n",
bio->bi_iter.bi_size);
bio_free_pages(behind_bio);
-fail:
- return behind_bio;
+ bio_put(behind_bio);
}
struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
!waitqueue_active(&bitmap->behind_wait)) {
- mbio = alloc_behind_master_bio(r1_bio, bio);
+ alloc_behind_master_bio(r1_bio, bio);
}
bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (!mbio) {
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- }
+ if (r1_bio->behind_master_bio)
+ mbio = bio_clone_fast(r1_bio->behind_master_bio,
+ GFP_NOIO, mddev->bio_set);
+ else
+ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
if (r1_bio->behind_master_bio) {
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
/* Fix variable parts of all bios */
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
- int size;
blk_status_t status;
- struct bio_vec *bi;
struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b);
if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
status = b->bi_status;
bio_reset(b);
b->bi_status = status;
- b->bi_vcnt = vcnt;
- b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
rp->raid_bio = r1_bio;
b->bi_private = rp;
- size = b->bi_iter.bi_size;
- bio_for_each_segment_all(bi, b, j) {
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }
+ /* initialize bvec table again */
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO,
mddev->bio_set);
- /* We really need a _all clone */
- wbio->bi_iter = (struct bvec_iter){ 0 };
} else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr);
+ int page_idx = 0;
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
rp = get_resync_pages(bio);
if (bio->bi_end_io) {
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
nr_sectors += len>>9;
sector_nr += len>>9;
sync_blocks -= (len>>9);
- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r1_bio->sectors = nr_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
#define raid10_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r10_bio;
bio->bi_private = rp;
if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev;
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
rp = get_resync_pages(tbio);
bio_reset(tbio);
- tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
+ md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
+
rp->raid_bio = r10_bio;
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask;
+ int page_idx = 0;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
break;
for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big enough
* to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
nr_sectors += len>>9;
sector_nr += len>>9;
- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
while (biolist) {
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..44ad5baf3206 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
- ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
+ ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
if (!ppl_conf->bs) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = bi2;
}
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
}
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
@@ -7951,12 +7950,10 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9dfb26972f2..250dc6ec4c82 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
}
/* find out number of slots supported */
- if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
+ if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
dev_info(dev, "'num-slots' was deprecated.\n");
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c12f3715676..04ff3c97a535 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
struct mmc_host *mmc = host->mmc;
int ret = 0;
- if (mmc_pdata(host)->set_power)
- return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
-
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
@@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- if (mmc_pdata(host)->before_set_reg)
- mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
-
ret = omap_hsmmc_set_pbias(host, false, 0);
if (ret)
return ret;
@@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
return ret;
}
- if (mmc_pdata(host)->after_set_reg)
- mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
-
return 0;
err_set_voltage:
@@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
int ret;
struct mmc_host *mmc = host->mmc;
- if (mmc_pdata(host)->set_power)
- return 0;
ret = mmc_regulator_get_supply(mmc);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d6fa2214aaae..0fb4e4c119e1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
- if (host->cfg->needs_new_timings)
- mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
+ if (host->cfg->needs_new_timings) {
+ /* Don't touch the delay bits */
+ rval = mmc_readl(host, REG_SD_NTSR);
+ rval |= SDXC_2X_TIMING_MODE;
+ mmc_writel(host, REG_SD_NTSR, rval);
+ }
ret = sunxi_mmc_clk_set_phase(host, ios, rate);
if (ret)
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 7c754a0f14bb..19e4e904c9bf 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -2,20 +2,11 @@
# Multiplexer devices
#
-menuconfig MULTIPLEXER
- tristate "Multiplexer subsystem"
- help
- Multiplexer controller subsystem. Multiplexers are used in a
- variety of settings, and this subsystem abstracts their use
- so that the rest of the kernel sees a common interface. When
- multiple parallel multiplexers are controlled by one single
- multiplexer controller, this subsystem also coordinates the
- multiplexer accesses.
-
- To compile the subsystem as a module, choose M here: the module will
- be called mux-core.
+config MULTIPLEXER
+ tristate
-if MULTIPLEXER
+menu "Multiplexer drivers"
+ depends on MULTIPLEXER
config MUX_ADG792A
tristate "Analog Devices ADG792A/ADG792G Multiplexers"
@@ -56,4 +47,4 @@ config MUX_MMIO
To compile the driver as a module, choose M here: the module will
be called mux-mmio.
-endif
+endmenu
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
index 90b8995f07cb..2fe96c470112 100644
--- a/drivers/mux/mux-core.c
+++ b/drivers/mux/mux-core.c
@@ -46,7 +46,7 @@ static int __init mux_init(void)
static void __exit mux_exit(void)
{
- class_register(&mux_class);
+ class_unregister(&mux_class);
ida_destroy(&mux_ida);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 14ff622190a5..181839d6fbea 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4596,7 +4596,7 @@ static int bond_check_params(struct bond_params *params)
}
ad_user_port_key = valptr->value;
- if (bond_mode == BOND_MODE_TLB) {
+ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
&newval);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e68d368e20ac..7f36d3e3c98b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53125",
.vlans = 4096,
.enabled_ports = 0xff,
+ .arl_entries = 4,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 53b088166c28..5bcdd33101b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3178,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d3906f6b01bd..86058a9f3417 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,16 +1785,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_enet_gpiod_get(pdata);
- pdata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->clk)) {
- /* Abort if the clock is defined but couldn't be retrived.
- * Always abort if the clock is missing on DT system as
- * the driver can't cope with this case.
- */
- if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
- return PTR_ERR(pdata->clk);
- /* Firmware may have set up the clock already. */
- dev_info(dev, "clocks have been setup already\n");
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ /* Abort if the clock is defined but couldn't be
+ * retrived. Always abort if the clock is missing on
+ * DT system as the driver can't cope with this case.
+ */
+ if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
+ return PTR_ERR(pdata->clk);
+ /* Firmware may have set up the clock already. */
+ dev_info(dev, "clocks have been setup already\n");
+ }
}
if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 73aca97a96bc..d937083db9a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
{
- return writel(value, bgmac->plat.idm_base + offset);
+ writel(value, bgmac->plat.idm_base + offset);
}
static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
{
+ if (!bgmac->plat.idm_base)
+ return true;
+
if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
return false;
if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
@@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
{
u32 val;
+ if (!bgmac->plat.idm_base)
+ return;
+
/* The Reset Control register only contains a single bit to show if the
* controller is currently in reset. Do a sanity check here, just in
* case the bootloader happened to leave the device in reset.
@@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK;
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
@@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev)
return PTR_ERR(bgmac->plat.base);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
- if (!regs) {
- dev_err(&pdev->dev, "Unable to obtain idm resource\n");
- return -EINVAL;
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+ bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
}
- bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
-
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
if (regs) {
bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ba4d2e145bb9..48d672b204a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
- if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
- dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
- return -ENOTSUPP;
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
+ return -ENOTSUPP;
+ }
}
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
static void bgmac_miiconfig(struct bgmac *bgmac)
{
if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
- bgmac_idm_write(bgmac, BCMA_IOCTL,
- bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
- BGMAC_BCMA_IOCTL_SW_CLKEN);
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) |
+ 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
+ }
bgmac->mac_speed = SPEED_2500;
bgmac->mac_duplex = DUPLEX_FULL;
bgmac_mac_speed(bgmac);
@@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
}
}
+static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
+{
+ u32 iost;
+
+ iost = bgmac_idm_read(bgmac, BCMA_IOST);
+ if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
+ iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+ /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
+ u32 flags = 0;
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+ bgmac_clk_enable(bgmac, flags);
+ }
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
+}
+
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
u32 cmdcfg_sr;
- u32 iost;
int i;
if (bgmac_clk_enabled(bgmac)) {
@@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
/* TODO: Clear software multicast filter list */
}
- iost = bgmac_idm_read(bgmac, BCMA_IOST);
- if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
- iost &= ~BGMAC_BCMA_IOST_ATTACHED;
-
- /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
- if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
- u32 flags = 0;
- if (iost & BGMAC_BCMA_IOST_ATTACHED) {
- flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
- flags |= BGMAC_BCMA_IOCTL_SW_RESET;
- }
- bgmac_clk_enable(bgmac, flags);
- }
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
+ bgmac_chip_reset_idm_config(bgmac);
/* Request Misc PLL for corerev > 2 */
if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
@@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
- bgmac_idm_write(bgmac, BCMA_IOCTL,
- bgmac_idm_read(bgmac, BCMA_IOCTL) &
- ~BGMAC_BCMA_IOCTL_SW_RESET);
-
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
* Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
* BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
@@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac)
bgmac_clk_enable(bgmac, 0);
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
- if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
- bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+ bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+ }
bgmac_chip_reset(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index c1818766c501..443d57b10264 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -425,6 +425,7 @@
#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
+#define BGMAC_FEAT_IDM_MASK BIT(20)
struct bgmac_slot_info {
union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 43423744fdfa..1e33abde4a3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
static int bnx2x_test_nvram(struct bnx2x *bp)
{
- const struct crc_pair nvram_tbl[] = {
+ static const struct crc_pair nvram_tbl[] = {
{ 0, 0x14 }, /* bootstrap */
{ 0x14, 0xec }, /* dir */
{ 0x100, 0x350 }, /* manuf_info */
@@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- const struct crc_pair nvram_tbl2[] = {
+ static const struct crc_pair nvram_tbl2[] = {
{ 0x7e8, 0x350 }, /* manuf_info2 */
{ 0xb38, 0xf0 }, /* feature_info */
{ 0, 0 }
@@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
- sprintf(queue_name, "%d", i);
+ snprintf(queue_name, sizeof(queue_name),
+ "%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index daca1c9d254b..7b0b399aaedd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-/* Simple helper to free a control block's resources */
-static void bcmgenet_free_cb(struct enet_cb *cb)
+static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
{
- dev_kfree_skb_any(cb->skb);
- cb->skb = NULL;
- dma_unmap_addr_set(cb, dma_addr, 0);
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+
+ /* Rewinding local write pointer */
+ if (ring->write_ptr == ring->cb_ptr)
+ ring->write_ptr = ring->end_ptr;
+ else
+ ring->write_ptr--;
+
+ return tx_cb_ptr;
}
static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
INTRL2_CPU_MASK_SET);
}
+/* Simple helper to free a transmit control block's resources
+ * Returns an skb when the last transmit control block associated with the
+ * skb is freed. The skb should be freed by the caller if necessary.
+ */
+static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+
+ if (skb) {
+ cb->skb = NULL;
+ if (cb == GENET_CB(skb)->first_cb)
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+
+ if (cb == GENET_CB(skb)->last_cb)
+ return skb;
+
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_page(dev,
+ dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return 0;
+}
+
+/* Simple helper to free a receive control block's resources */
+static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
+ struct enet_cb *cb)
+{
+ struct sk_buff *skb;
+
+ skb = cb->skb;
+ cb->skb = NULL;
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ return skb;
+}
+
/* Unlocked version of the reclaim routine */
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int pkts_compl = 0;
+ unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
- unsigned int c_index;
+ unsigned int pkts_compl = 0;
unsigned int txbds_ready;
- unsigned int txbds_processed = 0;
+ unsigned int c_index;
+ struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX)
@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (txbds_processed < txbds_ready) {
- tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
- if (tx_cb_ptr->skb) {
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
+ &priv->tx_cbs[ring->clean_ptr]);
+ if (skb) {
pkts_compl++;
- bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
- dma_unmap_single(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- bcmgenet_free_cb(tx_cb_ptr);
- } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dma_unmap_page(kdev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ bytes_compl += GENET_CB(skb)->bytes_sent;
+ dev_kfree_skb_any(skb);
}
txbds_processed++;
@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
}
-/* Transmits a single SKB (either head of a fragment or a single SKB)
- * caller must hold priv->lock
- */
-static int bcmgenet_xmit_single(struct net_device *dev,
- struct sk_buff *skb,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int skb_len;
- dma_addr_t mapping;
- u32 length_status;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = skb;
-
- skb_len = skb_headlen(skb);
-
- mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
- dev_kfree_skb(skb);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
- length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
- DMA_TX_APPEND_CRC;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- length_status |= DMA_TX_DO_CSUM;
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
-
- return 0;
-}
-
-/* Transmit a SKB fragment */
-static int bcmgenet_xmit_frag(struct net_device *dev,
- skb_frag_t *frag,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
- struct enet_cb *tx_cb_ptr;
- unsigned int frag_size;
- dma_addr_t mapping;
- int ret;
-
- tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
- if (unlikely(!tx_cb_ptr))
- BUG();
-
- tx_cb_ptr->skb = NULL;
-
- frag_size = skb_frag_size(frag);
-
- mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
- priv->mib.tx_dma_failed++;
- netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
- __func__);
- return ret;
- }
-
- dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
-
- dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
-
- return 0;
-}
-
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
struct bcmgenet_tx_ring *ring = NULL;
+ struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned long flags = 0;
int nr_frags, index;
- u16 dma_desc_flags;
+ dma_addr_t mapping;
+ unsigned int size;
+ skb_frag_t *frag;
+ u32 len_stat;
int ret;
int i;
@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dma_desc_flags = DMA_SOP;
- if (nr_frags == 0)
- dma_desc_flags |= DMA_EOP;
+ for (i = 0; i <= nr_frags; i++) {
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
- /* Transmit single SKB or head of fragment list */
- ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
- if (ret) {
- ret = NETDEV_TX_OK;
- goto out;
- }
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ if (!i) {
+ /* Transmit single SKB or head of fragment list */
+ GENET_CB(skb)->first_cb = tx_cb_ptr;
+ size = skb_headlen(skb);
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ /* xmit fragment */
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
- /* xmit fragment */
- for (i = 0; i < nr_frags; i++) {
- ret = bcmgenet_xmit_frag(dev,
- &skb_shinfo(skb)->frags[i],
- (i == nr_frags - 1) ? DMA_EOP : 0,
- ring);
+ ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
ret = NETDEV_TX_OK;
- goto out;
+ goto out_unmap_frags;
+ }
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, size);
+
+ tx_cb_ptr->skb = skb;
+
+ len_stat = (size << DMA_BUFLENGTH_SHIFT) |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+
+ if (!i) {
+ len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_stat |= DMA_TX_DO_CSUM;
}
+ if (i == nr_frags)
+ len_stat |= DMA_EOP;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
}
+ GENET_CB(skb)->last_cb = tx_cb_ptr;
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -1635,6 +1629,19 @@ out:
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
+
+out_unmap_frags:
+ /* Back up for failed control block mapping */
+ bcmgenet_put_txcb(priv, ring);
+
+ /* Unmap successfully mapped control blocks */
+ while (i-- > 0) {
+ tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
+ bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
+ }
+
+ dev_kfree_skb(skb);
+ goto out;
}
static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
}
/* Grab the current Rx skb from the ring and DMA-unmap it */
- rx_skb = cb->skb;
- if (likely(rx_skb))
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ rx_skb = bcmgenet_free_rx_cb(kdev, cb);
/* Put the new Rx skb on the ring */
cb->skb = skb;
dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
dmadesc_set_addr(priv, cb->bd_addr, mapping);
/* Return the current Rx skb to caller */
@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
{
- struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
struct enet_cb *cb;
int i;
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
- if (dma_unmap_addr(cb, dma_addr)) {
- dma_unmap_single(kdev,
- dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
- dma_unmap_addr_set(cb, dma_addr, 0);
- }
-
- if (cb->skb)
- bcmgenet_free_cb(cb);
+ skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb_any(skb);
}
}
@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
- int i;
struct netdev_queue *txq;
+ struct sk_buff *skb;
+ struct enet_cb *cb;
+ int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
- if (priv->tx_cbs[i].skb != NULL) {
- dev_kfree_skb(priv->tx_cbs[i].skb);
- priv->tx_cbs[i].skb = NULL;
- }
+ cb = priv->tx_cbs + i;
+ skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
+ if (skb)
+ dev_kfree_skb(skb);
}
for (i = 0; i < priv->hw_params->tx_queues; i++) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index efd07020b89f..b9344de669f8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
};
struct bcmgenet_skb_cb {
+ struct enet_cb *first_cb; /* First control block of SKB */
+ struct enet_cb *last_cb; /* Last control block of SKB */
unsigned int bytes_sent; /* bytes on the wire (no TSB) */
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 28ecda3d3404..ebd353bc78ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -335,7 +335,7 @@ lio_ethtool_get_channels(struct net_device *dev,
static int lio_get_eeprom_len(struct net_device *netdev)
{
- u8 buf[128];
+ u8 buf[192];
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_board_info *board_info;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a0ca68ce3fbb..79112563a25a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1008,7 +1008,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
{
struct device *dev = &bgx->pdev->dev;
struct lmac *lmac;
- char str[20];
+ char str[27];
if (!bgx->is_dlm && lmacid)
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 50517cfd9671..9f9d6cae39d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter)
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
&adapter->pdev->dev);
- if (!adapter->ptp_clock) {
+ if (IS_ERR_OR_NULL(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
dev_err(adapter->pdev_dev,
"PTP %s Clock registration has failed\n", __func__);
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 99987d8e437e..aa28299aef5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ff864a187d5a..a37166ee577b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
assert(handle);
mac_cb = hns_get_mac_cb(handle);
- if (!mac_cb->cpld_ctrl)
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
return;
+
hns_set_led_opt(mac_cb);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 7a8addda726e..408b63faf9a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
return ret;
}
+static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
+ u32 link, u32 port, u32 act)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = link;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = act;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+ link, port, act);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
}
}
+static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_set mac_cb is null!\n");
+ return;
+ }
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ link_status, mac_cb->mac_id, data);
+}
+
static void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
if (!mac_cb || !mac_cb->cpld_ctrl)
@@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
+static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_reset mac_cb is null!\n");
+ return;
+ }
+
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ 0, mac_cb->mac_id, 0);
+}
+
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
@@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
- misc_op->cpld_set_led = hns_cpld_set_led;
- misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led = hns_cpld_set_led_acpi;
+ misc_op->cpld_reset_led = cpld_led_reset_acpi;
misc_op->cpld_set_led_id = cpld_set_led_id;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 249a4584401a..b651c1210555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
}
/* Should be called under a lock */
-static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
+static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
{
struct mlx4_zone_allocator *zone_alloc = entry->allocator;
@@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
}
zone_alloc->mask = mask;
}
-
- return 0;
}
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
@@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
{
struct mlx4_zone_entry *zone;
- int res;
+ int res = 0;
spin_lock(&zones->lock);
@@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
goto out;
}
- res = __mlx4_zone_remove_one_entry(zone);
+ __mlx4_zone_remove_one_entry(zone);
out:
spin_unlock(&zones->lock);
@@ -578,7 +576,7 @@ out:
}
static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
dma_addr_t t;
@@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
+ size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
if (size <= max_direct) {
- return mlx4_buf_direct_alloc(dev, size, buf, gfp);
+ return mlx4_buf_direct_alloc(dev, size, buf);
} else {
dma_addr_t t;
int i;
@@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- gfp);
+ GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, gfp);
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
@@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
- gfp_t gfp)
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof *pgdir, gfp);
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
@@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, gfp);
+ &pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -716,7 +713,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
@@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
- pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
+ pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
{
int err;
- err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
- err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
if (err)
goto err_db;
@@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index fa6d2354a0e9..c56a511b918e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
if (*cqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
if (err)
goto err_put;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e5fb89505a13..436f7689a032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1042,7 +1042,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
if (!context)
return -ENOMEM;
- err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
@@ -1086,7 +1086,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
en_err(priv, "Failed reserving drop qpn\n");
return err;
}
- err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
if (err) {
en_err(priv, "Failed allocating drop qp\n");
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1158,8 +1158,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
}
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
- GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4f3a9b27ce4a..73faa3d77921 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_hwq_res;
}
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index e1f9e7cebf8f..5a7816e7c7b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp)
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
- (table->lowmem ? gfp : GFP_HIGHUSER) |
+ (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
@@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 i;
for (i = start; i <= end; i += inc) {
- err = mlx4_table_get(dev, table, i, GFP_KERNEL);
+ err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 0c7364550150..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 30616cd0140d..706d7f21ac5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index ce852ca22a96..24282cd017d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
__mlx4_mpt_release(dev, index);
}
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
- return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
-static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param = 0;
@@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mpt_alloc_icm(dev, index, gfp);
+ return __mlx4_mpt_alloc_icm(dev, index);
}
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
u64 *page_list;
int err;
int i;
- page_list = kmalloc(buf->npages * sizeof *page_list,
- gfp);
+ page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
@@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 5a310d313e94..26747212526b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
@@ -345,7 +345,7 @@ err_out:
return err;
}
-static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
u64 param = 0;
@@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_qp_alloc_icm(dev, qpn, gfp);
+ return __mlx4_qp_alloc_icm(dev, qpn);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
return qp;
}
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
qp->qpn = qpn;
- err = mlx4_qp_alloc_icm(dev, qpn, gfp);
+ err = mlx4_qp_alloc_icm(dev, qpn);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 812783865205..215e21c3dc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
if (!fw_reserved(dev, qpn)) {
- err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
+ err = __mlx4_qp_alloc_icm(dev, qpn);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
@@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f44d089e2ca6..bedf52126824 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 746d94e28470..60850bfa3d32 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev)
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_sgmii *sgmii = &adpt->phy;
- /* Closing the SGMII turns off its interrupts */
- sgmii->close(adpt);
+ if (netdev->flags & IFF_UP) {
+ /* Closing the SGMII turns off its interrupts */
+ sgmii->close(adpt);
- /* Resetting the MAC turns off all DMA and its interrupts */
- emac_mac_reset(adpt);
+ /* Resetting the MAC turns off all DMA and its interrupts */
+ emac_mac_reset(adpt);
+ }
}
static struct platform_driver emac_platform_driver = {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b607936e1b3e..9c0488e0f08e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -90,17 +90,13 @@ struct ioc3_private {
spinlock_t ioc3_lock;
struct mii_if_info mii;
+ struct net_device *dev;
struct pci_dev *pdev;
/* Members used by autonegotiation */
struct timer_list ioc3_timer;
};
-static inline struct net_device *priv_netdev(struct ioc3_private *dev)
-{
- return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
-}
-
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
nic[i] = nic_read_byte(ioc3);
for (i = 2; i < 8; i++)
- priv_netdev(ip)->dev_addr[i - 2] = nic[i];
+ ip->dev->dev_addr[i - 2] = nic[i];
}
/*
@@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
{
ioc3_get_eaddr_nic(ip);
- printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
+ printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
}
static void __ioc3_set_mac_address(struct net_device *dev)
@@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data)
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- struct net_device *dev = priv_netdev(ip);
int i, found = 0, res = 0;
int ioc3_phy_workaround = 1;
u16 word;
for (i = 0; i < 32; i++) {
- word = ioc3_mdio_read(dev, i, MII_PHYSID1);
+ word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
found = 1;
@@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
+ ip->dev = dev;
dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f233bf8b4ebb..c4407e8e39a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
void __iomem *ioaddr = hw->pcsr;
u32 value;
- const struct stmmac_rx_routing route_possibilities[] = {
+ static const struct stmmac_rx_routing route_possibilities[] = {
{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1853f7ff6657..1763e48c84e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device,
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
- if (priv->plat->stmmac_rst)
+ if (priv->plat->stmmac_rst) {
+ ret = reset_control_assert(priv->plat->stmmac_rst);
reset_control_deassert(priv->plat->stmmac_rst);
+ /* Some reset controllers have only reset callback instead of
+ * assert + deassert callbacks pair.
+ */
+ if (ret == -ENOTSUPP)
+ reset_control_reset(priv->plat->stmmac_rst);
+ }
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 46cb7f8955a2..4bb04aaf9650 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np,
p = niu_new_parent(np, id, ptype);
if (p) {
- char port_name[6];
+ char port_name[8];
int err;
sprintf(port_name, "port%d", port);
@@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np)
{
struct niu_parent *p = np->parent;
u8 port = np->port;
- char port_name[6];
+ char port_name[8];
BUG_ON(!p || p->ports[port] != np);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 711fbbbc4b1f..163d8d16bc24 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EFAULT);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
+ } else {
+ return -EOPNOTSUPP;
}
if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1850e348f555..badd0a8caeb9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->quirk_irq = true;
}
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ cpsw_split_res(ndev);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "error registering net device\n");
+ ret = -ENODEV;
+ goto clean_ale_ret;
+ }
+
+ if (cpsw->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_unregister_netdev_ret;
+ }
+ }
+
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
* we will not request them.
@@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- ndev->netdev_ops = &cpsw_netdev_ops;
- ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
- cpsw_split_res(ndev);
-
- /* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(priv->dev, "error registering net device\n");
- ret = -ENODEV;
- goto clean_ale_ret;
- }
-
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, ndev->irq, dma_params.descs_pool_size);
- if (cpsw->data.dual_emac) {
- ret = cpsw_probe_dual_emac(priv);
- if (ret) {
- cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_unregister_netdev_ret;
- }
- }
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 00755b6a42cf..c608e1dfaf09 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev,
for_each_available_child_of_node(dev->of_node, child_bus_node) {
int v;
- v = of_mdio_parse_addr(dev, child_bus_node);
- if (v < 0) {
+ r = of_property_read_u32(child_bus_node, "reg", &v);
+ if (r) {
dev_err(dev,
"Error: Failed to find reg for child %s\n",
of_node_full_name(child_bus_node));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 13028833bee3..bd4303944e44 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,6 +120,7 @@ struct ppp {
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
+ int *xmit_recursion __percpu; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
struct ppp *ppp = netdev_priv(dev);
int indx;
int err;
+ int cpu;
ppp->dev = dev;
ppp->ppp_net = src_net;
@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
INIT_LIST_HEAD(&ppp->channels);
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
+
+ ppp->xmit_recursion = alloc_percpu(int);
+ if (!ppp->xmit_recursion) {
+ err = -ENOMEM;
+ goto err1;
+ }
+ for_each_possible_cpu(cpu)
+ (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
if (err < 0)
- return err;
+ goto err2;
conf->file->private_data = &ppp->file;
return 0;
+err2:
+ free_percpu(ppp->xmit_recursion);
+err1:
+ return err;
}
static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
-static DEFINE_PER_CPU(int, ppp_xmit_recursion);
-
static void ppp_xmit_process(struct ppp *ppp)
{
local_bh_disable();
- if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
+ if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
goto err;
- __this_cpu_inc(ppp_xmit_recursion);
+ (*this_cpu_ptr(ppp->xmit_recursion))++;
__ppp_xmit_process(ppp);
- __this_cpu_dec(ppp_xmit_recursion);
+ (*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1905,7 +1918,7 @@ static void __ppp_channel_push(struct channel *pch)
read_lock(&pch->upl);
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp);
read_unlock(&pch->upl);
}
}
@@ -1914,9 +1927,7 @@ static void ppp_channel_push(struct channel *pch)
{
local_bh_disable();
- __this_cpu_inc(ppp_xmit_recursion);
__ppp_channel_push(pch);
- __this_cpu_dec(ppp_xmit_recursion);
local_bh_enable();
}
@@ -3057,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
#endif /* CONFIG_PPP_FILTER */
kfree_skb(ppp->xmit_pending);
+ free_percpu(ppp->xmit_recursion);
free_netdev(ppp->dev);
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d103a1d4fb36..8f572b9f3625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
+ int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
+ u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
+ /*
+ * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+ * Let's check if this is the case, and set the device to NDP16 mode again if
+ * needed.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &curr_ntb_format, 2);
+ if (err < 0) {
+ goto error2;
+ }
+
+ if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+
+ if (err < 0)
+ goto error2;
+ }
+ }
+
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+ /* Additionally, it has been reported that some Huawei E3372H devices, with
+ * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+ * needing to be set to the NTB16 one again.
+ */
+ drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2dfca96a63b6..340c13484e5c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.set_wol = smsc95xx_ethtool_set_wol,
.get_link_ksettings = smsc95xx_get_link_ksettings,
.set_link_ksettings = smsc95xx_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99a26a9efec1..f41ab0ea942a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2743,9 +2743,9 @@ module_init(virtio_net_driver_init);
static __exit void virtio_net_driver_exit(void)
{
+ unregister_virtio_driver(&virtio_net_driver);
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
cpuhp_remove_multi_state(virtionet_online);
- unregister_virtio_driver(&virtio_net_driver);
}
module_exit(virtio_net_driver_exit);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index ba1c9f93592b..9c51b8be0038 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -311,7 +311,7 @@ struct vmxnet3_intr {
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
- char event_msi_vector_name[IFNAMSIZ+11];
+ char event_msi_vector_name[IFNAMSIZ+17];
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6e2e760d98b1..0b75def39c6c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
{
- const u8 glrt_table[] = {
+ static const u8 glrt_table[] = {
0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7cd99b1f8596..75bc08c6838c 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
{
const unsigned int sector_size = 512;
- sector_t start_sector;
+ sector_t start_sector, end_sector;
u64 num_sectors;
u32 rem;
start_sector = div_u64(ns_offset, sector_size);
- num_sectors = div_u64_rem(len, sector_size, &rem);
+ end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
if (rem)
- num_sectors++;
+ end_sector++;
+ num_sectors = end_sector - start_sector;
if (unlikely(num_sectors > (u64)INT_MAX)) {
u64 remaining = num_sectors;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cb96f4a7ae3a..c49f1f8b2e57 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = sizeof(*s);
+ c.directive.numd = cpu_to_le32(sizeof(*s));
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -1995,6 +1995,9 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
+ if (!uuid_is_null(&ns->uuid))
+ return sprintf(buf, "uuid.%pU\n", &ns->uuid);
+
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return sprintf(buf, "eui.%16phN\n", ns->nguid);
@@ -2709,7 +2712,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
/* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ if (ctrl->admin_q)
+ blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d666ada39a9b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- return BLK_STS_IOERR;
+ goto busy;
if (!nvme_fc_ctrl_get(ctrl))
return BLK_STS_IOERR;
@@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue->lldd_handle, &op->fcp_req);
if (ret) {
- if (op->rq) /* normal request */
+ if (!(op->flags & FCOP_FLAGS_AEN))
nvme_fc_unmap_data(ctrl, op->rq, op);
- /* else - aen. no cleanup needed */
nvme_fc_ctrl_put(ctrl);
- if (ret != -EBUSY)
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
+ ret != -EBUSY)
return BLK_STS_IOERR;
- if (op->rq)
- blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
-
- return BLK_STS_RESOURCE;
+ goto busy;
}
return BLK_STS_OK;
+
+busy:
+ if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
+ blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+
+ return BLK_STS_RESOURCE;
}
static blk_status_t
@@ -2802,66 +2805,70 @@ out_fail:
return ERR_PTR(ret);
}
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
+
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static struct nvme_ctrl *
@@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
unsigned long flags;
int ret;
- ret = nvme_fc_parse_address(&raddr, opts->traddr);
+ ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
if (ret || !raddr.nn || !raddr.pn)
return ERR_PTR(-EINVAL);
- ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+ ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
if (ret || !laddr.nn || !laddr.pn)
return ERR_PTR(-EINVAL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d10d2f279d19..cd888a47d0fc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
length -= (page_size - offset);
if (length <= 0)
- return true;
+ return BLK_STS_OK;
dma_len -= (page_size - offset);
if (dma_len) {
@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (length <= page_size) {
iod->first_dma = dma_addr;
- return true;
+ return BLK_STS_OK;
}
nprps = DIV_ROUND_UP(length, page_size);
@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (!prp_list) {
iod->first_dma = dma_addr;
iod->npages = -1;
- return false;
+ return BLK_STS_RESOURCE;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return false;
+ return BLK_STS_RESOURCE;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
break;
if (dma_len > 0)
continue;
- BUG_ON(dma_len < 0);
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- return true;
+ return BLK_STS_OK;
+
+ bad_sgl:
+ if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->nents)) {
+ for_each_sg(iod->sg, sg, iod->nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+ "dma_address:%pad dma_length:%d\n", i, &phys,
+ sg->offset, sg->length,
+ &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+ }
+ return BLK_STS_IOERR;
+
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req))
+ ret = nvme_setup_prps(dev, req);
+ if (ret != BLK_STS_OK)
goto out_unmap;
ret = BLK_STS_IOERR;
@@ -1602,7 +1619,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
- u32 chunk_size, max_entries;
+ u32 chunk_size, max_entries, len;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1621,10 +1638,10 @@ retry:
if (!bufs)
goto out_free_descs;
- for (size = 0; size < preferred; size += chunk_size) {
- u32 len = min_t(u64, chunk_size, preferred - size);
+ for (size = 0; size < preferred; size += len) {
dma_addr_t dma_addr;
+ len = min_t(u64, chunk_size, preferred - size);
bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
if (!bufs[i])
@@ -2282,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_dev_map(dev);
if (result)
- goto free;
+ goto put_pci;
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
@@ -2291,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_setup_prp_pools(dev);
if (result)
- goto put_pci;
+ goto unmap;
quirks |= check_dell_samsung_bug(pdev);
@@ -2308,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_pools:
nvme_release_prp_pools(dev);
+ unmap:
+ nvme_dev_unmap(dev);
put_pci:
put_device(dev->dev);
- nvme_dev_unmap(dev);
free:
kfree(dev->queues);
kfree(dev);
@@ -2466,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a55),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 35f930db3c02..2d7a98ab53fb 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -168,11 +168,21 @@ out:
nvmet_req_complete(req, status);
}
+static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
+{
+ int len = min(src_len, dst_len);
+
+ memcpy(dst, src, len);
+ if (dst_len > len)
+ memset(dst + len, ' ', dst_len - len);
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
u16 status = 0;
+ const char model[] = "Linux";
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
@@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
- memset(id->sn, ' ', sizeof(id->sn));
- snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
+ copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn));
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index a358ecd93e11..0a0067e771f5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -650,7 +650,7 @@ out_unlock:
CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
-static ssize_t nvmet_subsys_version_show(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item,
(int)NVME_MINOR(subsys->ver));
}
-static ssize_t nvmet_subsys_version_store(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item,
return count;
}
-CONFIGFS_ATTR(nvmet_subsys_, version);
+CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+
+static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ down_write(&nvmet_config_sem);
+ sscanf(page, "%llx\n", &subsys->serial);
+ up_write(&nvmet_config_sem);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
- &nvmet_subsys_attr_version,
+ &nvmet_subsys_attr_attr_version,
+ &nvmet_subsys_attr_attr_serial,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5b4ac103748..f4b02bb4a1a8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
- /* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
-
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
return NULL;
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&subsys->serial, sizeof(subsys->serial));
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e6dcc241b3c..31ca55dfcb1d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1174,14 +1174,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
*/
if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
ret = VERR_CR_ASSOC_LEN;
- else if (rqst->desc_list_len <
- cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN))
+ else if (be32_to_cpu(rqst->desc_list_len) <
+ FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
ret = VERR_CR_ASSOC_RQST_LEN;
else if (rqst->assoc_cmd.desc_tag !=
cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
ret = VERR_CR_ASSOC_CMD;
- else if (rqst->assoc_cmd.desc_len <
- cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN))
+ else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
+ FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
ret = VERR_CR_ASSOC_CMD_LEN;
else if (!rqst->assoc_cmd.ersp_ratio ||
(be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
@@ -2293,66 +2293,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
-enum {
- FCT_TRADDR_ERR = 0,
- FCT_TRADDR_WWNN = 1 << 0,
- FCT_TRADDR_WWPN = 1 << 1,
-};
struct nvmet_fc_traddr {
u64 nn;
u64 pn;
};
-static const match_table_t traddr_opt_tokens = {
- { FCT_TRADDR_WWNN, "nn-%s" },
- { FCT_TRADDR_WWPN, "pn-%s" },
- { FCT_TRADDR_ERR, NULL }
-};
-
static int
-nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{
- substring_t args[MAX_OPT_ARGS];
- char *options, *o, *p;
- int token, ret = 0;
u64 token64;
- options = o = kstrdup(buf, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
+ if (match_u64(sstr, &token64))
+ return -EINVAL;
+ *val = token64;
- while ((p = strsep(&o, ":\n")) != NULL) {
- if (!*p)
- continue;
+ return 0;
+}
- token = match_token(p, traddr_opt_tokens, args);
- switch (token) {
- case FCT_TRADDR_WWNN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->nn = token64;
- break;
- case FCT_TRADDR_WWPN:
- if (match_u64(args, &token64)) {
- ret = -EINVAL;
- goto out;
- }
- traddr->pn = token64;
- break;
- default:
- pr_warn("unknown traddr token or missing value '%s'\n",
- p);
- ret = -EINVAL;
- goto out;
- }
- }
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+ char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+ substring_t wwn = { name, &name[sizeof(name)-1] };
+ int nnoffset, pnoffset;
+
+ /* validate it string one of the 2 allowed formats */
+ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+ !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+ "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+ nnoffset = NVME_FC_TRADDR_OXNNLEN;
+ pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+ NVME_FC_TRADDR_OXNNLEN;
+ } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+ !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+ !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+ "pn-", NVME_FC_TRADDR_NNLEN))) {
+ nnoffset = NVME_FC_TRADDR_NNLEN;
+ pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+ } else
+ goto out_einval;
+
+ name[0] = '0';
+ name[1] = 'x';
+ name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+ memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+ goto out_einval;
+
+ memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+ if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+ goto out_einval;
-out:
- kfree(options);
- return ret;
+ return 0;
+
+out_einval:
+ pr_warn("%s: bad traddr string\n", __func__);
+ return -EINVAL;
}
static int
@@ -2370,7 +2374,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
/* map the traddr address info to a target port */
- ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+ ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr));
if (ret)
return ret;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 747bbdb4f9c6..e3b244c7e443 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -112,7 +112,6 @@ struct nvmet_ctrl {
struct mutex lock;
u64 cap;
- u64 serial;
u32 cc;
u32 csts;
@@ -152,6 +151,7 @@ struct nvmet_subsys {
u16 max_qid;
u64 ver;
+ u64 serial;
char *subsysnqn;
struct config_group group;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a0d4ede9b8fc..63e3eb55f3ac 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,7 +170,7 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
- .compatible = "rockchip,rk322x-efuse",
+ .compatible = "rockchip,rk3228-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ce72aa65425..ab21c846eb27 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (!of_irq_to_resource(dev, i, res))
+ if (of_irq_to_resource(dev, i, res) <= 0)
break;
return i;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 055f83fddc18..7147aa53e9a2 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = {
NULL,
};
-static struct attribute_group pdcs_attr_group = {
+static const struct attribute_group pdcs_attr_group = {
.attrs = pdcs_subsys_attrs,
};
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index dc459eb1246b..1c5e0f333779 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (irq != other_irq) {
pr_warn("mismatched PPIs detected.\n");
err = -EINVAL;
+ goto err_out;
}
} else {
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+ unsigned long irq_flags;
+
+ err = irq_force_affinity(irq, cpumask_of(cpu));
+
+ if (err && num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ goto err_out;
+ }
+
+ if (platdata && platdata->irq_flags) {
+ irq_flags = platdata->irq_flags;
+ } else {
+ irq_flags = IRQF_PERCPU |
+ IRQF_NOBALANCING |
+ IRQF_NO_THREAD;
+ }
+
+ err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ if (err)
+ goto err_out;
cpumask_set_cpu(cpu, &armpmu->active_irqs);
-
return 0;
+
+err_out:
+ pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
+ return err;
}
int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
-
- if (irq_force_affinity(irq, cpumask_of(cpu)) &&
- num_possible_cpus() > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- }
}
return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 69255f53057a..4eafa7a42e52 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
}
if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
- pr_warn("no interrupt-affinity property for %s, guessing.\n",
- of_node_full_name(pdev->dev.of_node));
+ pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
+ pdev->dev.of_node);
}
/*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ pr_info("%pOF: failed to probe PMU!\n", node);
goto out_free;
}
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
+ pr_info("%pOF: failed to register PMU devices!\n", node);
armpmu_free(pmu);
return ret;
}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index c259848228b4..b242cce10468 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
}
if ((event != event->group_leader) &&
+ !is_software_event(event->group_leader) &&
(L2_EVT_GROUP(event->group_leader->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
list_for_each_entry(sibling, &event->group_leader->sibling_list,
group_entry) {
if ((sibling != event) &&
+ !is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 7e0d4f724dda..432fc40990bd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
+ case CRW_ERC_INIT:
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 077f62e208aa..6a4367cc9caa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
if (is_write) {
req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
- rc = copy_from_user(kbuf, ubuf, ulen);
- if (unlikely(rc))
+ if (copy_from_user(kbuf, ubuf, ulen)) {
+ rc = -EFAULT;
goto out;
+ }
}
}
@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
goto out;
}
- if (ulen && !is_write)
- rc = copy_to_user(ubuf, kbuf, ulen);
+ if (ulen && !is_write) {
+ if (copy_to_user(ubuf, kbuf, ulen))
+ rc = -EFAULT;
+ }
out:
kfree(buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 551d103c27f1..2bfea7082e3a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
{
- const u8 trans_tx_err_code_prio[] = {
+ static const u8 trans_tx_err_code_prio[] = {
TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
TRANS_TX_ERR_PHY_NOT_ENABLE,
TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 trans_rx_err_code_prio[] = {
+ static const u8 trans_rx_err_code_prio[] = {
TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
{
- const u8 dma_tx_err_code_prio[] = {
+ static const u8 dma_tx_err_code_prio[] = {
DMA_TX_UNEXP_XFER_ERR,
DMA_TX_UNEXP_RETRANS_ERR,
DMA_TX_XFER_LEN_OVERFLOW,
@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 sipc_rx_err_code_prio[] = {
+ static const u8 sipc_rx_err_code_prio[] = {
SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
{
- const u8 dma_rx_err_code_prio[] = {
+ static const u8 dma_rx_err_code_prio[] = {
DMA_RX_UNKNOWN_FRM_ERR,
DMA_RX_DATA_LEN_OVERFLOW,
DMA_RX_DATA_LEN_UNDERFLOW,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8914eab84337..4f7cdb28bd38 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
- .max_sectors = 8192,
+ .max_sectors = 1024,
.no_write_same = 1,
};
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 47f66e949745..ed197bc8e801 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
* @task_context:
*
*/
-static void scu_ssp_reqeust_construct_task_context(
+static void scu_ssp_request_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
u8 prot_type = scsi_get_prot_type(scmd);
u8 prot_op = scsi_get_prot_op(scmd);
- scu_ssp_reqeust_construct_task_context(ireq, task_context);
+ scu_ssp_request_construct_task_context(ireq, task_context);
task_context->ssp_command_iu_length =
sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
{
struct scu_task_context *task_context = ireq->tc;
- scu_ssp_reqeust_construct_task_context(ireq, task_context);
+ scu_ssp_request_construct_task_context(ireq, task_context);
task_context->control_frame = 1;
task_context->priority = SCU_TASK_PRIORITY_HIGH;
@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
* the command buffer is complete. none Revisit task context construction to
* determine what is common for SSP/SMP/STP task context structures.
*/
-static void scu_sata_reqeust_construct_task_context(
+static void scu_sata_request_construct_task_context(
struct isci_request *ireq,
struct scu_task_context *task_context)
{
@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
{
struct scu_task_context *task_context = ireq->tc;
- scu_sata_reqeust_construct_task_context(ireq, task_context);
+ scu_sata_request_construct_task_context(ireq, task_context);
task_context->control_frame = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL;
@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
struct scu_task_context *task_context = ireq->tc;
/* Build the STP task context structure */
- scu_sata_reqeust_construct_task_context(ireq, task_context);
+ scu_sata_request_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */
sci_request_build_sgl(ireq);
@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
* @data_buffer: The buffer of data to be copied.
* @length: The length of the data transfer.
*
- * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * Copy the data from the buffer for the length specified to the IO request SGL
* specified data region. enum sci_status
*/
static enum sci_status
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index fd501f8dbb11..8660f923ace0 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
event = DISC_EV_FAILED;
}
if (error)
- fc_disc_error(disc, fp);
+ fc_disc_error(disc, ERR_PTR(error));
else if (event != DISC_EV_NONE)
fc_disc_done(disc, event);
fc_frame_free(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b58bba4604e8..7786c97e033f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (rdata->spp_type != FC_TYPE_FCP) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "Not offlading since since spp type isn't FCP\n");
+ "Not offloading since spp type isn't FCP\n");
break;
}
if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 32632c9b2276..91d2f51c351b 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -23,11 +23,17 @@
#include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_ll2_if.h>
#include "qedi_version.h"
+#include "qedi_nvm_iscsi_cfg.h"
#define QEDI_MODULE_NAME "qedi"
struct qedi_endpoint;
+#ifndef GET_FIELD2
+#define GET_FIELD2(value, name) \
+ (((value) & (name ## _MASK)) >> (name ## _OFFSET))
+#endif
+
/*
* PCI function probe defines
*/
@@ -66,6 +72,11 @@ struct qedi_endpoint;
#define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL
+enum qedi_nvm_tgts {
+ QEDI_NVM_TGT_PRI,
+ QEDI_NVM_TGT_SEC,
+};
+
struct qedi_uio_ctrl {
/* meta data */
u32 uio_hsi_version;
@@ -283,6 +294,8 @@ struct qedi_ctx {
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
+ struct nvm_iscsi_cfg *iscsi_cfg;
+ dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
u16 bdq_prod_idx;
@@ -337,6 +350,10 @@ struct qedi_ctx {
bool use_fast_sge;
atomic_t num_offloads;
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+#define IPV6_LEN 41
+#define IPV4_LEN 17
+ struct iscsi_boot_kset *boot_kset;
};
struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 19254bd739d9..93d54acd4a22 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
if (!list_work) {
- QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+ QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
goto abort_ret;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5f5a4ef2e529..2c3783684815 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
+#include <linux/iscsi_boot_sysfs.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -1143,6 +1144,30 @@ exit_setup_int:
return rc;
}
+static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+ if (qedi->iscsi_cfg)
+ dma_free_coherent(&qedi->pdev->dev,
+ sizeof(struct nvm_iscsi_cfg),
+ qedi->iscsi_cfg, qedi->nvm_buf_dma);
+}
+
+static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+ qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
+ sizeof(struct nvm_iscsi_cfg),
+ &qedi->nvm_buf_dma, GFP_KERNEL);
+ if (!qedi->iscsi_cfg) {
+ QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
+ return -ENOMEM;
+ }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+ qedi->nvm_buf_dma);
+
+ return 0;
+}
+
static void qedi_free_bdq(struct qedi_ctx *qedi)
{
int i;
@@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi)
kfree(gl[i]);
}
qedi_free_bdq(qedi);
+ qedi_free_nvm_iscsi_cfg(qedi);
}
static int qedi_alloc_bdq(struct qedi_ctx *qedi)
@@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (rc)
goto mem_alloc_failure;
+ /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
+ rc = qedi_alloc_nvm_iscsi_cfg(qedi);
+ if (rc)
+ goto mem_alloc_failure;
+
/* Allocate a CQ and an associated PBL for each MSI-X
* vector.
*/
@@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
qedi_ops->ll2->start(qedi->cdev, &params);
}
+/**
+ * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
+ * for gaps) for the matching absolute-pf-id of the QEDI device.
+ */
+static struct nvm_iscsi_block *
+qedi_get_nvram_block(struct qedi_ctx *qedi)
+{
+ int i;
+ u8 pf;
+ u32 flags;
+ struct nvm_iscsi_block *block;
+
+ pf = qedi->dev_info.common.abs_pf_id;
+ block = &qedi->iscsi_cfg->block[0];
+ for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
+ flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
+ NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
+ if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
+ NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
+ (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
+ >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
+ return block;
+ }
+ return NULL;
+}
+
+static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+ struct nvm_iscsi_initiator *initiator;
+ char *str = buf;
+ int rc = 1;
+ u32 ipv6_en, dhcp_en, ip_len;
+ struct nvm_iscsi_block *block;
+ char *fmt, *ip, *sub, *gw;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ return 0;
+
+ initiator = &block->initiator;
+ ipv6_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+ dhcp_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
+ /* Static IP assignments. */
+ fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
+ ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
+ ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+ sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
+ initiator->ipv4.subnet_mask.byte;
+ gw = ipv6_en ? initiator->ipv6.gateway.byte :
+ initiator->ipv4.gateway.byte;
+ /* DHCP IP adjustments. */
+ fmt = dhcp_en ? "%s\n" : fmt;
+ if (dhcp_en) {
+ ip = ipv6_en ? "0::0" : "0.0.0.0";
+ sub = ip;
+ gw = ip;
+ ip_len = ipv6_en ? 5 : 8;
+ }
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ rc = snprintf(str, ip_len, fmt, ip);
+ break;
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ rc = snprintf(str, ip_len, fmt, sub);
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ rc = snprintf(str, ip_len, fmt, gw);
+ break;
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = snprintf(str, 3, "%hhd\n",
+ SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_ETH_INDEX:
+ rc = snprintf(str, 3, "0\n");
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ rc = snprintf(str, 12, "%d\n",
+ GET_FIELD2(initiator->generic_cont0,
+ NVM_ISCSI_CFG_INITIATOR_VLAN));
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ if (dhcp_en)
+ rc = snprintf(str, 3, "3\n");
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t qedi_eth_get_attr_visibility(void *data, int type)
+{
+ int rc = 1;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_FLAGS:
+ case ISCSI_BOOT_ETH_MAC:
+ case ISCSI_BOOT_ETH_INDEX:
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ case ISCSI_BOOT_ETH_GATEWAY:
+ case ISCSI_BOOT_ETH_ORIGIN:
+ case ISCSI_BOOT_ETH_VLAN:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+ struct nvm_iscsi_initiator *initiator;
+ char *str = buf;
+ int rc;
+ struct nvm_iscsi_block *block;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ return 0;
+
+ initiator = &block->initiator;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ initiator->initiator_name.byte);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static umode_t qedi_ini_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static ssize_t
+qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+ char *buf, enum qedi_nvm_tgts idx)
+{
+ char *str = buf;
+ int rc = 1;
+ u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
+ struct nvm_iscsi_block *block;
+ char *chap_name, *chap_secret;
+ char *mchap_name, *mchap_secret;
+
+ block = qedi_get_nvram_block(qedi);
+ if (!block)
+ goto exit_show_tgt_info;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+ "Port:%d, tgt_idx:%d\n",
+ GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
+
+ ctrl_flags = block->target[idx].ctrl_flags &
+ NVM_ISCSI_CFG_TARGET_ENABLED;
+
+ if (!ctrl_flags) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+ "Target disabled\n");
+ goto exit_show_tgt_info;
+ }
+
+ ipv6_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+ ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+ chap_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
+ chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
+ chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
+
+ mchap_en = block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
+ mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
+ mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ block->target[idx].target_name.byte);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (ipv6_en)
+ rc = snprintf(str, ip_len, "%pI6\n",
+ block->target[idx].ipv6_addr.byte);
+ else
+ rc = snprintf(str, ip_len, "%pI4\n",
+ block->target[idx].ipv4_addr.byte);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ rc = snprintf(str, 12, "%d\n",
+ GET_FIELD2(block->target[idx].generic_cont0,
+ NVM_ISCSI_CFG_TARGET_TCP_PORT));
+ break;
+ case ISCSI_BOOT_TGT_LUN:
+ rc = snprintf(str, 22, "%.*d\n",
+ block->target[idx].lun.value[1],
+ block->target[idx].lun.value[0]);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+ chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+ chap_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+ mchap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+ mchap_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ rc = snprintf(str, 3, "0\n");
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+exit_show_tgt_info:
+ return rc;
+}
+
+static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+
+ return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
+}
+
+static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+ struct qedi_ctx *qedi = data;
+
+ return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
+}
+
+static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
+{
+ int rc;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_LUN:
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = 0444;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static void qedi_boot_release(void *data)
+{
+ struct qedi_ctx *qedi = data;
+
+ scsi_host_put(qedi->shost);
+}
+
+static int qedi_get_boot_info(struct qedi_ctx *qedi)
+{
+ int ret = 1;
+ u16 len;
+
+ len = sizeof(struct nvm_iscsi_cfg);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Get NVM iSCSI CFG image\n");
+ ret = qedi_ops->common->nvm_get_image(qedi->cdev,
+ QED_NVM_IMAGE_ISCSI_CFG,
+ (char *)qedi->iscsi_cfg, len);
+ if (ret)
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not get NVM image. ret = %d\n", ret);
+
+ return ret;
+}
+
+static int qedi_setup_boot_info(struct qedi_ctx *qedi)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ if (qedi_get_boot_info(qedi))
+ return -EPERM;
+
+ qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
+ if (!qedi->boot_kset)
+ goto kset_free;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_tgt_pri_info,
+ qedi_tgt_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
+ qedi_show_boot_tgt_sec_info,
+ qedi_tgt_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_ini_info,
+ qedi_ini_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ if (!scsi_host_get(qedi->shost))
+ goto kset_free;
+
+ boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
+ qedi_show_boot_eth_info,
+ qedi_eth_get_attr_visibility,
+ qedi_boot_release);
+ if (!boot_kobj)
+ goto put_host;
+
+ return 0;
+
+put_host:
+ scsi_host_put(qedi->shost);
+kset_free:
+ iscsi_boot_destroy_kset(qedi->boot_kset);
+ return -ENOMEM;
+}
+
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi->ll2_recv_thread = NULL;
}
qedi_ll2_free_skbs(qedi);
+
+ if (qedi->boot_kset)
+ iscsi_boot_destroy_kset(qedi->boot_kset);
}
}
@@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
atomic_set(&qedi->num_offloads, 0);
+
+ if (qedi_setup_boot_info(qedi))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "No iSCSI boot target configured\n");
}
return 0;
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
new file mode 100644
index 000000000000..df39b69b366d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -0,0 +1,210 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef NVM_ISCSI_CFG_H
+#define NVM_ISCSI_CFG_H
+
+#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the
+ * ISCSI IBFT constraint
+ */
+#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port -
+ * assuming 4 port card
+ */
+
+#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256
+
+union nvm_iscsi_dhcp_vendor_id {
+ u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
+union nvm_iscsi_ipv4_addr {
+ u32 addr;
+ u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
+};
+
+#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
+union nvm_iscsi_ipv6_addr {
+ u32 addr[4];
+ u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
+};
+
+struct nvm_iscsi_initiator_ipv4 {
+ union nvm_iscsi_ipv4_addr addr; /* 0x0 */
+ union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */
+ union nvm_iscsi_ipv4_addr gateway; /* 0x8 */
+ union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */
+ union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */
+ union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */
+
+ union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */
+ union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */
+ union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */
+ union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */
+
+ union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */
+};
+
+struct nvm_iscsi_initiator_ipv6 {
+ union nvm_iscsi_ipv6_addr addr; /* 0x0 */
+ union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */
+ union nvm_iscsi_ipv6_addr gateway; /* 0x20 */
+ union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */
+ union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */
+ union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */
+
+ union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */
+ union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */
+ union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */
+ union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */
+
+ union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */
+
+ u32 config; /* 0xD0 */
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0
+
+ u32 rsvd_1[3];
+};
+
+#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256
+union nvm_iscsi_name {
+ u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256
+union nvm_iscsi_chap_name {
+ u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996
+ * is 16 octets
+ */
+union nvm_iscsi_chap_password {
+ u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
+ u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
+};
+
+union nvm_iscsi_lun {
+ u8 byte[8];
+ u32 value[2];
+};
+
+struct nvm_iscsi_generic {
+ u32 ctrl_flags; /* 0x0 */
+#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0)
+#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1)
+#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2)
+#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3)
+#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4)
+#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5)
+#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6)
+#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7)
+#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8)
+
+ u32 timeout; /* 0x4 */
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16
+
+ union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */
+ u32 rsvd[62]; /* 0x108 */
+};
+
+struct nvm_iscsi_initiator {
+ struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */
+ struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */
+
+ union nvm_iscsi_name initiator_name; /* 0x118 */
+ union nvm_iscsi_chap_name chap_name; /* 0x218 */
+ union nvm_iscsi_chap_password chap_password; /* 0x318 */
+
+ u32 generic_cont0; /* 0x398 */
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3
+
+ u32 ctrl_flags;
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0)
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1)
+
+ u32 rsvd[116]; /* 0x32C */
+};
+
+struct nvm_iscsi_target {
+ u32 ctrl_flags; /* 0x0 */
+#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0)
+#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1)
+
+ u32 generic_cont0; /* 0x4 */
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0
+
+ u32 ip_ver;
+#define NVM_ISCSI_CFG_IPv4 4
+#define NVM_ISCSI_CFG_IPv6 6
+
+ u32 rsvd_1[7]; /* 0x24 */
+ union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */
+ union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */
+ union nvm_iscsi_lun lun; /* 0x3C */
+
+ union nvm_iscsi_name target_name; /* 0x44 */
+ union nvm_iscsi_chap_name chap_name; /* 0x144 */
+ union nvm_iscsi_chap_password chap_password; /* 0x244 */
+
+ u32 rsvd_2[107]; /* 0x2C4 */
+};
+
+struct nvm_iscsi_block {
+ u32 id; /* 0x0 */
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0)
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1)
+
+ u32 rsvd_1[5]; /* 0x4 */
+
+ struct nvm_iscsi_generic generic; /* 0x18 */
+ struct nvm_iscsi_initiator initiator; /* 0x218 */
+ struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF];
+ /* 0x718 */
+
+ u32 rsvd_2[58]; /* 0x1718 */
+ /* total size - 0x1800 - 6K block */
+};
+
+struct nvm_iscsi_cfg {
+ u32 id; /* 0x0 */
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00
+#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000
+#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi
+ * Config
+ */
+
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10
+#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
+ NVM_ISCSI_CFG_BLK_VERSION_MINOR)
+
+ struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
+};
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c2dc836dc484..e101cd3043b9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK;
if (h != QLA_TGT_NULL_HANDLE) {
- if (unlikely(h > req->num_outstanding_cmds)) {
+ if (unlikely(h >= req->num_outstanding_cmds)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 21225d62b0c1..4fe606b000b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,14 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
if (hp->dxferp || hp->dxfer_len > 0)
return false;
return true;
- case SG_DXFER_TO_DEV:
case SG_DXFER_FROM_DEV:
+ /*
+ * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
+ * can either be NULL or != NULL so there's no point in checking
+ * it either. So just return true.
+ */
+ return true;
+ case SG_DXFER_TO_DEV:
case SG_DXFER_TO_FROM_DEV:
if (!hp->dxferp || hp->dxfer_len == 0)
return false;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07ec8a8877de..e164ffade38a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8b93197daefe..9be211d68b15 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2afe3597982e..f4b7a98a7913 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -134,7 +134,6 @@ struct apid_data {
* @spmic: SPMI controller object
* @ver_ops: version dependent operations.
* @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
- * v2 only.
*/
struct spmi_pmic_arb {
void __iomem *rd_base;
@@ -1016,6 +1015,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
+ pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+ sizeof(*pa->ppid_to_apid), GFP_KERNEL);
+ if (!pa->ppid_to_apid) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1048,15 +1054,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = PTR_ERR(pa->wr_base);
goto err_put_ctrl;
}
-
- pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
- PMIC_ARB_MAX_PPID,
- sizeof(*pa->ppid_to_apid),
- GFP_KERNEL);
- if (!pa->ppid_to_apid) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
}
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 2b9b0941d9eb..6d23226e5f69 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev)
return 0;
}
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
+ .uevent = spmi_drv_uevent,
};
/**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 268d4e6ef48a..ef28a1cb64ae 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
source "drivers/staging/typec/Kconfig"
+source "drivers/staging/vboxvideo/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b93e6f5f0f6e..2918580bdb9e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e382888981..2f7bfc1c59e5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
/* following line: 2-1 per STC */
ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
} else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 85b242ec5f9b..8fc191d99927 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
- &from);
+ rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
+ &from);
+ if (rc != payload_nob) {
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+ return -EFAULT;
+ }
+
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
break;
}
- copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
- IBLND_MSG_SIZE, to);
+ rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
+ to);
+ if (rc != rlen) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = 0;
lnet_finalize(ni, lntmsg, 0);
break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 002d09159896..a69007ef77bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
kfree(pcmd->parmbuf);
}
- if (!pcmd->rsp) {
+ if (pcmd->rsp) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
kfree(pcmd->rsp);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 963235fd7292..d283341cfe43 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 944dd25924be..4754f7a20684 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
pll_reg = peek32(MXCLK_PLL_CTRL);
M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
- N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
+ N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3aa4128703d5..67207b0554cd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1053,6 +1053,26 @@ release_fb:
return err;
}
+static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
+ kfree(ap);
+ return 0;
+}
+
static int lynxfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
int fbidx;
int err;
+ err = lynxfb_kick_out_firmware_fb(pdev);
+ if (err)
+ return err;
+
/* enable device */
err = pcim_enable_device(pdev);
if (err)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 82e5de248947..67956e24779c 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
+ spk_ttyio_unregister_ldisc();
speakup_kobj_exit();
@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
if (err)
goto error_kobjects;
+ spk_ttyio_register_ldisc();
synth_init(synth_name);
speakup_register_devsynth();
/*
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 87b6a0a4c54d..046040ac074c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
int spk_wait_for_xmitr(struct spk_synth *in_synth);
void spk_serial_release(void);
void spk_ttyio_release(void);
+void spk_ttyio_register_ldisc(void);
+void spk_ttyio_unregister_ldisc(void);
void synth_buffer_skip_nonlatin1(void);
u16 synth_buffer_getc(void);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index ed8e96b06ead..fe340b07c482 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
struct ktermios tmp_termios;
dev_t dev;
- ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
- if (ret) {
- pr_err("Error registering line discipline.\n");
- return ret;
- }
-
ret = get_dev_to_use(synth, &dev);
if (ret)
return ret;
@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tty_unlock(tty);
ret = tty_set_ldisc(tty, N_SPEAKUP);
+ if (ret)
+ pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
return ret;
}
+void spk_ttyio_register_ldisc(void)
+{
+ if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
+ pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
+}
+
+void spk_ttyio_unregister_ldisc(void)
+{
+ if (tty_unregister_ldisc(N_SPEAKUP))
+ pr_warn("speakup: Couldn't unregister ldisc\n");
+}
+
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
tty_ldisc_flush(speakup_tty);
tty_unlock(speakup_tty);
- tty_ldisc_release(speakup_tty);
+ tty_release_struct(speakup_tty, speakup_tty->index);
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig
new file mode 100644
index 000000000000..a52746f9a670
--- /dev/null
+++ b/drivers/staging/vboxvideo/Kconfig
@@ -0,0 +1,12 @@
+config DRM_VBOXVIDEO
+ tristate "Virtual Box Graphics Card"
+ depends on DRM && X86 && PCI
+ select DRM_KMS_HELPER
+ help
+ This is a KMS driver for the virtual Graphics Card used in
+ Virtual Box virtual machines.
+
+ Although it is possible to builtin this module, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to built this driver as a
+ module and add support for these devices via drm/kms interfaces.
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
new file mode 100644
index 000000000000..2d0b3bc7ad73
--- /dev/null
+++ b/drivers/staging/vboxvideo/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+
+vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
+ vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_mode.o vbox_prime.o vbox_ttm.o
+
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
new file mode 100644
index 000000000000..ce764309b079
--- /dev/null
+++ b/drivers/staging/vboxvideo/TODO
@@ -0,0 +1,9 @@
+TODO:
+-Move the driver over to the atomic API
+-Stop using old load / unload drm_driver hooks
+-Get a full review from the drm-maintainers on dri-devel done on this driver
+-Extend this TODO with the results of that review
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Hans de Goede <hdegoede@redhat.com> and
+Michael Thayer <michael.thayer@oracle.com>.
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..15ff5f42e2cd
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_base.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+#include "hgsmi_ch_setup.h"
+
+/**
+ * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * @param ctx the context of the guest heap to use.
+ * @param location the offset chosen for the flags within guest VRAM.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
+{
+ struct hgsmi_buffer_location *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
+ HGSMI_CC_HOST_FLAGS_LOCATION);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf_location = location;
+ p->buf_len = sizeof(struct hgsmi_host_flags);
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * @param ctx the context of the guest heap to use.
+ * @param caps the capabilities to report, see vbva_caps.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
+{
+ struct vbva_caps *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
+ if (!p)
+ return -ENOMEM;
+
+ p->rc = VERR_NOT_IMPLEMENTED;
+ p->caps = caps;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ WARN_ON_ONCE(RT_FAILURE(p->rc));
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+int hgsmi_test_query_conf(struct gen_pool *ctx)
+{
+ u32 value = 0;
+ int ret;
+
+ ret = hgsmi_query_conf(ctx, U32_MAX, &value);
+ if (ret)
+ return ret;
+
+ return value == U32_MAX ? 0 : -EIO;
+}
+
+/**
+ * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * @param ctx the context containing the heap used
+ * @param index the index of the parameter to query,
+ * @see vbva_conf32::index
+ * @param value_ret where to store the value of the parameter on success
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
+{
+ struct vbva_conf32 *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_QUERY_CONF32);
+ if (!p)
+ return -ENOMEM;
+
+ p->index = index;
+ p->value = U32_MAX;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *value_ret = p->value;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Pass the host a new mouse pointer shape via an HGSMI command.
+ *
+ * @param ctx the context containing the heap to be used
+ * @param flags cursor flags, @see VMMDevReqMousePointer::flags
+ * @param hot_x horizontal position of the hot spot
+ * @param hot_y vertical position of the hot spot
+ * @param width width in pixels of the cursor
+ * @param height height in pixels of the cursor
+ * @param pixels pixel data, @see VMMDevReqMousePointer for the format
+ * @param len size in bytes of the pixel data
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len)
+{
+ struct vbva_mouse_pointer_shape *p;
+ u32 pixel_len = 0;
+ int rc;
+
+ if (flags & VBOX_MOUSE_POINTER_SHAPE) {
+ /*
+ * Size of the pointer data:
+ * sizeof (AND mask) + sizeof (XOR_MASK)
+ */
+ pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
+ width * 4 * height;
+ if (pixel_len > len)
+ return -EINVAL;
+
+ /*
+ * If shape is supplied, then always create the pointer visible.
+ * See comments in 'vboxUpdatePointerShape'
+ */
+ flags |= VBOX_MOUSE_POINTER_VISIBLE;
+ }
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
+ VBVA_MOUSE_POINTER_SHAPE);
+ if (!p)
+ return -ENOMEM;
+
+ p->result = VINF_SUCCESS;
+ p->flags = flags;
+ p->hot_X = hot_x;
+ p->hot_y = hot_y;
+ p->width = width;
+ p->height = height;
+ if (pixel_len)
+ memcpy(p->data, pixels, pixel_len);
+
+ hgsmi_buffer_submit(ctx, p);
+
+ switch (p->result) {
+ case VINF_SUCCESS:
+ rc = 0;
+ break;
+ case VERR_NO_MEMORY:
+ rc = -ENOMEM;
+ break;
+ case VERR_NOT_SUPPORTED:
+ rc = -EBUSY;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ hgsmi_buffer_free(ctx, p);
+
+ return rc;
+}
+
+/**
+ * Report the guest cursor position. The host may wish to use this information
+ * to re-position its own cursor (though this is currently unlikely). The
+ * current host cursor position is returned.
+ * @param ctx The context containing the heap used.
+ * @param report_position Are we reporting a position?
+ * @param x Guest cursor X position.
+ * @param y Guest cursor Y position.
+ * @param x_host Host cursor X position is stored here. Optional.
+ * @param y_host Host cursor Y position is stored here. Optional.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host)
+{
+ struct vbva_cursor_position *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_CURSOR_POSITION);
+ if (!p)
+ return -ENOMEM;
+
+ p->report_position = report_position;
+ p->x = x;
+ p->y = y;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *x_host = p->x;
+ *y_host = p->y;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * @todo Mouse pointer position to be read from VMMDev memory, address of the
+ * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
+ * region will contain host information which is needed by the guest.
+ *
+ * Reading will not cause a switch to the host.
+ *
+ * Have to take into account:
+ * * synchronization: host must write to the memory only from EMT,
+ * large structures must be read under flag, which tells the host
+ * that the guest is currently reading the memory (OWNER flag?).
+ * * guest writes: may be allocate a page for the host info and make
+ * the page readonly for the guest.
+ * * the information should be available only for additions drivers.
+ * * VMMDev additions driver will inform the host which version of the info
+ * it expects, host must support all versions.
+ */
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..8e6d9e11a69c
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CH_SETUP_H__
+#define __HGSMI_CH_SETUP_H__
+
+/*
+ * Tell the host the location of hgsmi_host_flags structure, where the host
+ * can write information about pending buffers, etc, and which can be quickly
+ * polled by the guest without a need to port IO.
+ */
+#define HGSMI_CC_HOST_FLAGS_LOCATION 0
+
+struct hgsmi_buffer_location {
+ u32 buf_location;
+ u32 buf_len;
+} __packed;
+
+/* HGSMI setup and configuration data structures. */
+/* host->guest commands pending, should be accessed under FIFO lock only */
+#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
+/* IRQ is fired, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_IRQ 0x02u
+/* vsync interrupt flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_VSYNC 0x10u
+/** monitor hotplug flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
+/**
+ * Cursor capability state change flag, should be accessed under
+ * VGAState::lock only. @see vbva_conf32.
+ */
+#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
+
+struct hgsmi_host_flags {
+ /*
+ * Host flags can be accessed and modified in multiple threads
+ * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
+ * HGSMI 3D and Video Accel respectively, EMT thread when dealing with
+ * HGSMI command processing, etc.
+ * Besides settings/cleaning flags atomically, some flags have their
+ * own special sync restrictions, see comments for flags above.
+ */
+ u32 host_flags;
+ u32 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..a2a34b2167b4
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CHANNELS_H__
+#define __HGSMI_CHANNELS_H__
+
+/*
+ * Each channel has an 8 bit identifier. There are a number of predefined
+ * (hardcoded) channels.
+ *
+ * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
+ * to a free 16 bit numerical value. values are allocated in range
+ * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
+ */
+
+/* A reserved channel value */
+#define HGSMI_CH_RESERVED 0x00
+/* HGCMI: setup and configuration */
+#define HGSMI_CH_HGSMI 0x01
+/* Graphics: VBVA */
+#define HGSMI_CH_VBVA 0x02
+/* Graphics: Seamless with a single guest region */
+#define HGSMI_CH_SEAMLESS 0x03
+/* Graphics: Seamless with separate host windows */
+#define HGSMI_CH_SEAMLESS2 0x04
+/* Graphics: OpenGL HW acceleration */
+#define HGSMI_CH_OPENGL 0x05
+
+/* The first channel index to be used for string mappings (inclusive) */
+#define HGSMI_CH_STRING_FIRST 0x20
+/* The last channel index for string mappings (inclusive) */
+#define HGSMI_CH_STRING_LAST 0xff
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..5b21fb974d20
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_DEFS_H__
+#define __HGSMI_DEFS_H__
+
+/* Buffer sequence type mask. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
+/* Single buffer, not a part of a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
+/* The first buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
+/* A middle buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
+/* The last buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
+
+/* 16 bytes buffer header. */
+struct hgsmi_buffer_header {
+ u32 data_size; /* Size of data that follows the header. */
+ u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
+ u8 channel; /* The channel the data must be routed to. */
+ u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
+
+ union {
+ /* Opaque placeholder to make the union 8 bytes. */
+ u8 header_data[8];
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
+ struct {
+ u32 reserved1; /* A reserved field, initialize to 0. */
+ u32 reserved2; /* A reserved field, initialize to 0. */
+ } buffer;
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_START */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* The total size of the sequence. */
+ u32 sequence_size;
+ } sequence_start;
+
+ /*
+ * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
+ * HGSMI_BUFFER_HEADER_F_SEQ_END
+ */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* Data offset in the entire sequence. */
+ u32 sequence_offset;
+ } sequence_continue;
+ } u;
+} __packed;
+
+/* 8 bytes buffer tail. */
+struct hgsmi_buffer_tail {
+ /* Reserved, must be initialized to 0. */
+ u32 reserved;
+ /*
+ * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
+ * Over the header, offset and for first 4 bytes of the tail.
+ */
+ u32 checksum;
+} __packed;
+
+/*
+ * The size of the array of channels. Array indexes are u8.
+ * Note: the value must not be changed.
+ */
+#define HGSMI_NUMBER_OF_CHANNELS 0x100
+
+#endif
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7616b8aab23a
--- /dev/null
+++ b/drivers/staging/vboxvideo/modesetting.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+
+/**
+ * Set a video mode via an HGSMI request. The views must have been
+ * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
+ * set on the first display then it must be set first using registers.
+ * @param ctx The context containing the heap to use
+ * @param display The screen number
+ * @param origin_x The horizontal displacement relative to the first scrn
+ * @param origin_y The vertical displacement relative to the first screen
+ * @param start_offset The offset of the visible area of the framebuffer
+ * relative to the framebuffer start
+ * @param pitch The offset in bytes between the starts of two adjecent
+ * scan lines in video RAM
+ * @param width The mode width
+ * @param height The mode height
+ * @param bpp The colour depth of the mode
+ * @param flags Flags
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags)
+{
+ struct vbva_infoscreen *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_INFO_SCREEN);
+ if (!p)
+ return;
+
+ p->view_index = display;
+ p->origin_x = origin_x;
+ p->origin_y = origin_y;
+ p->start_offset = start_offset;
+ p->line_size = pitch;
+ p->width = width;
+ p->height = height;
+ p->bits_per_pixel = bpp;
+ p->flags = flags;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens.
+ * @param ctx The context containing the heap to use.
+ * @param origin_x Upper left X co-ordinate relative to the first screen.
+ * @param origin_y Upper left Y co-ordinate relative to the first screen.
+ * @param width Rectangle width.
+ * @param height Rectangle height.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height)
+{
+ struct vbva_report_input_mapping *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_REPORT_INPUT_MAPPING);
+ if (!p)
+ return -ENOMEM;
+
+ p->x = origin_x;
+ p->y = origin_y;
+ p->cx = width;
+ p->cy = height;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Get most recent video mode hints.
+ * @param ctx The context containing the heap to use.
+ * @param screens The number of screens to query hints for, starting at 0.
+ * @param hints Array of vbva_modehint structures for receiving the hints.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints)
+{
+ struct vbva_query_mode_hints *p;
+ size_t size;
+
+ if (WARN_ON(!hints))
+ return -EINVAL;
+
+ size = screens * sizeof(struct vbva_modehint);
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
+ VBVA_QUERY_MODE_HINTS);
+ if (!p)
+ return -ENOMEM;
+
+ p->hints_queried_count = screens;
+ p->hint_structure_guest_size = sizeof(struct vbva_modehint);
+ p->rc = VERR_NOT_SUPPORTED;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (RT_FAILURE(p->rc)) {
+ hgsmi_buffer_free(ctx, p);
+ return -EIO;
+ }
+
+ memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..92ae1560a16d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+
+int vbox_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, vbox_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static const struct pci_device_id pciidlist[] = {
+ { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0, 0, 0},
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void vbox_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int vbox_drm_freeze(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
+
+ return 0;
+}
+
+static int vbox_drm_thaw(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ drm_mode_config_reset(dev);
+ drm_helper_resume_force_mode(dev);
+ drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
+
+ return 0;
+}
+
+static int vbox_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = vbox_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+
+ return 0;
+}
+
+static int vbox_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = vbox_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int vbox_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_resume(ddev);
+}
+
+static int vbox_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+
+ return vbox_drm_freeze(ddev);
+}
+
+static int vbox_pm_thaw(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_thaw(ddev);
+}
+
+static int vbox_pm_poweroff(struct device *dev)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+ return vbox_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops vbox_pm_ops = {
+ .suspend = vbox_pm_suspend,
+ .resume = vbox_pm_resume,
+ .freeze = vbox_pm_freeze,
+ .thaw = vbox_pm_thaw,
+ .poweroff = vbox_pm_poweroff,
+ .restore = vbox_pm_resume,
+};
+
+static struct pci_driver vbox_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
+ .driver.pm = &vbox_pm_ops,
+};
+
+static const struct file_operations vbox_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = vbox_mmap,
+ .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .read = drm_read,
+};
+
+static int vbox_master_set(struct drm_device *dev,
+ struct drm_file *file_priv, bool from_open)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /*
+ * We do not yet know whether the new owner can handle hotplug, so we
+ * do not advertise dynamic modes on the first query and send a
+ * tentative hotplug notification after that to see if they query again.
+ */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ /*
+ * Disable VBVA when someone releases master in case the next person
+ * tries tries to do VESA.
+ */
+ /** @todo work out if anyone is likely to and whether it will work. */
+ /*
+ * Update: we also disable it because if the new master does not do
+ * dirty rectangle reporting (e.g. old versions of Plymouth) then at
+ * least the first screen will still be updated. We enable it as soon
+ * as we receive a dirty rectangle report.
+ */
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return 0;
+}
+
+static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ /* See vbox_master_set() */
+ vbox->initial_mode_queried = false;
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_disable_accel(vbox);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_PRIME,
+ .dev_priv_size = 0,
+
+ .load = vbox_driver_load,
+ .unload = vbox_driver_unload,
+ .lastclose = vbox_driver_lastclose,
+ .master_set = vbox_master_set,
+ .master_drop = vbox_master_drop,
+ .set_busid = drm_pci_set_busid,
+
+ .fops = &vbox_fops,
+ .irq_handler = vbox_irq_handler,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object = vbox_gem_free_object,
+ .dumb_create = vbox_dumb_create,
+ .dumb_map_offset = vbox_dumb_mmap_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = vbox_gem_prime_pin,
+ .gem_prime_unpin = vbox_gem_prime_unpin,
+ .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
+ .gem_prime_vmap = vbox_gem_prime_vmap,
+ .gem_prime_vunmap = vbox_gem_prime_vunmap,
+ .gem_prime_mmap = vbox_gem_prime_mmap,
+};
+
+static int __init vbox_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && vbox_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (vbox_modeset == 0)
+ return -EINVAL;
+
+ return drm_pci_init(&driver, &vbox_pci_driver);
+}
+
+static void __exit vbox_exit(void)
+{
+ drm_pci_exit(&driver, &vbox_pci_driver);
+}
+
+module_init(vbox_init);
+module_exit(vbox_exit);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..4b9302703b36
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.h
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __VBOX_DRV_H__
+#define __VBOX_DRV_H__
+
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_ch_setup.h"
+
+#define DRIVER_NAME "vboxvideo"
+#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
+#define DRIVER_DATE "20130823"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define VBOX_MAX_CURSOR_WIDTH 64
+#define VBOX_MAX_CURSOR_HEIGHT 64
+#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
+#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
+
+#define VBOX_MAX_SCREENS 32
+
+#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
+ VBVA_ADAPTER_INFORMATION_SIZE)
+#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
+#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
+ sizeof(struct hgsmi_host_flags))
+#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
+
+struct vbox_fbdev;
+
+struct vbox_private {
+ struct drm_device *dev;
+
+ u8 __iomem *guest_heap;
+ u8 __iomem *vbva_buffers;
+ struct gen_pool *guest_pool;
+ struct vbva_buf_ctx *vbva_info;
+ bool any_pitch;
+ u32 num_crtcs;
+ /** Amount of available VRAM, including space used for buffers. */
+ u32 full_vram_size;
+ /** Amount of available VRAM, not including space used for buffers. */
+ u32 available_vram_size;
+ /** Array of structures for receiving mode hints. */
+ struct vbva_modehint *last_mode_hints;
+
+ struct vbox_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ } ttm;
+
+ struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
+ /**
+ * We decide whether or not user-space supports display hot-plug
+ * depending on whether they react to a hot-plug event after the initial
+ * mode query.
+ */
+ bool initial_mode_queried;
+ struct work_struct hotplug_work;
+ u32 input_mapping_width;
+ u32 input_mapping_height;
+ /**
+ * Is user-space using an X.Org-style layout of one large frame-buffer
+ * encompassing all screen ones or is the fbdev console active?
+ */
+ bool single_framebuffer;
+ u32 cursor_width;
+ u32 cursor_height;
+ u32 cursor_hot_x;
+ u32 cursor_hot_y;
+ size_t cursor_data_size;
+ u8 cursor_data[CURSOR_DATA_SIZE];
+};
+
+#undef CURSOR_PIXEL_COUNT
+#undef CURSOR_DATA_SIZE
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags);
+void vbox_driver_unload(struct drm_device *dev);
+void vbox_driver_lastclose(struct drm_device *dev);
+
+struct vbox_gem_object;
+
+struct vbox_connector {
+ struct drm_connector base;
+ char name[32];
+ struct vbox_crtc *vbox_crtc;
+ struct {
+ u16 width;
+ u16 height;
+ bool disconnected;
+ } mode_hint;
+};
+
+struct vbox_crtc {
+ struct drm_crtc base;
+ bool blanked;
+ bool disconnected;
+ unsigned int crtc_id;
+ u32 fb_offset;
+ bool cursor_enabled;
+ u16 x_hint;
+ u16 y_hint;
+};
+
+struct vbox_encoder {
+ struct drm_encoder base;
+};
+
+struct vbox_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct vbox_fbdev {
+ struct drm_fb_helper helper;
+ struct vbox_framebuffer afb;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+ int x1, y1, x2, y2; /* dirty rect */
+ spinlock_t dirty_lock;
+};
+
+#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
+#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
+#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
+
+int vbox_mode_init(struct drm_device *dev);
+void vbox_mode_fini(struct drm_device *dev);
+
+#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
+#define CRTC_FB(crtc) ((crtc)->primary->fb)
+
+void vbox_enable_accel(struct vbox_private *vbox);
+void vbox_disable_accel(struct vbox_private *vbox);
+void vbox_report_caps(struct vbox_private *vbox);
+
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects);
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj);
+
+int vbox_fbdev_init(struct drm_device *dev);
+void vbox_fbdev_fini(struct drm_device *dev);
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
+
+struct vbox_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
+
+static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vbox_bo, bo);
+}
+
+#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+void vbox_gem_free_object(struct drm_gem_object *obj);
+int vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
+
+int vbox_mm_init(struct vbox_private *vbox);
+void vbox_mm_fini(struct vbox_private *vbox);
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo);
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj);
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int vbox_bo_unpin(struct vbox_bo *bo);
+
+static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void vbox_bo_unreserve(struct vbox_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain);
+int vbox_bo_push_sysram(struct vbox_bo *bo);
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* vbox_prime.c */
+int vbox_gem_prime_pin(struct drm_gem_object *obj);
+void vbox_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table);
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int vbox_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area);
+
+/* vbox_irq.c */
+int vbox_irq_init(struct vbox_private *vbox);
+void vbox_irq_fini(struct vbox_private *vbox);
+void vbox_report_hotplug(struct vbox_private *vbox);
+irqreturn_t vbox_irq_handler(int irq, void *arg);
+
+/* vbox_hgsmi.c */
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info);
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
+
+static inline void vbox_write_ioport(u16 index, u16 data)
+{
+ outw(index, VBE_DISPI_IOPORT_INDEX);
+ outw(data, VBE_DISPI_IOPORT_DATA);
+}
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h
new file mode 100644
index 000000000000..562db8630eb0
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_err.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOX_ERR_H__
+#define __VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS 0
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_NO_MEMORY (-8)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_INTERNAL_ERROR (-225)
+
+#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS)
+#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc)))
+#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc)))
+
+/** @} */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..35f6d9f8c203
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_fb.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+#define VBOX_DIRTY_DELAY (HZ / 30)
+/**
+ * Tell the host about dirty rectangles to update.
+ */
+static void vbox_dirty_update(struct vbox_fbdev *fbdev,
+ int x, int y, int width, int height)
+{
+ struct drm_gem_object *obj;
+ struct vbox_bo *bo;
+ int ret = -EBUSY;
+ bool store_for_later = false;
+ int x2, y2;
+ unsigned long flags;
+ struct drm_clip_rect rect;
+
+ obj = fbdev->afb.obj;
+ bo = gem_to_vbox_bo(obj);
+
+ /*
+ * try and reserve the BO, if we fail with busy
+ * then the BO is being moved and we should
+ * store up the damage until later.
+ */
+ if (drm_can_sleep())
+ ret = vbox_bo_reserve(bo, true);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+
+ store_for_later = true;
+ }
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+ spin_lock_irqsave(&fbdev->dirty_lock, flags);
+
+ if (fbdev->y1 < y)
+ y = fbdev->y1;
+ if (fbdev->y2 > y2)
+ y2 = fbdev->y2;
+ if (fbdev->x1 < x)
+ x = fbdev->x1;
+ if (fbdev->x2 > x2)
+ x2 = fbdev->x2;
+
+ if (store_for_later) {
+ fbdev->x1 = x;
+ fbdev->x2 = x2;
+ fbdev->y1 = y;
+ fbdev->y2 = y2;
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+ return;
+ }
+
+ fbdev->x1 = INT_MAX;
+ fbdev->y1 = INT_MAX;
+ fbdev->x2 = 0;
+ fbdev->y2 = 0;
+
+ spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+
+ /*
+ * Not sure why the original code subtracted 1 here, but I will keep
+ * it that way to avoid unnecessary differences.
+ */
+ rect.x1 = x;
+ rect.x2 = x2 + 1;
+ rect.y1 = y;
+ rect.y2 = y2 + 1;
+ vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
+
+ vbox_bo_unreserve(bo);
+}
+
+#ifdef CONFIG_FB_DEFERRED_IO
+static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ struct vbox_fbdev *fbdev = info->par;
+ unsigned long start, end, min, max;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+ DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
+ __func__, y1, info->var.xres, y2 - y1 - 1);
+ vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
+ }
+}
+
+static struct fb_deferred_io vbox_defio = {
+ .delay = VBOX_DIRTY_DELAY,
+ .deferred_io = vbox_deferred_io,
+};
+#endif
+
+static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_fillrect(info, rect);
+ vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_copyarea(info, area);
+ vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
+}
+
+static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct vbox_fbdev *fbdev = info->par;
+
+ sys_imageblit(info, image);
+ vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops vboxfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = vbox_fillrect,
+ .fb_copyarea = vbox_copyarea,
+ .fb_imageblit = vbox_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vboxfb_create_object(struct vbox_fbdev *fbdev,
+ struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = fbdev->helper.dev;
+ u32 size;
+ struct drm_gem_object *gobj;
+ u32 pitch = mode_cmd->pitches[0];
+ int ret;
+
+ size = pitch * mode_cmd->height;
+ ret = vbox_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+
+ return 0;
+}
+
+static int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vbox_fbdev *fbdev =
+ container_of(helper, struct vbox_fbdev, helper);
+ struct drm_device *dev = fbdev->helper.dev;
+ struct DRM_MODE_FB_CMD mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ struct device *device = &dev->pdev->dev;
+ struct drm_gem_object *gobj;
+ struct vbox_bo *bo;
+ int size, ret;
+ u32 pitch;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd.pitches[0] = pitch;
+
+ size = pitch * mode_cmd.height;
+
+ ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bo = gem_to_vbox_bo(gobj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ vbox_bo_unreserve(bo);
+ if (ret) {
+ DRM_ERROR("failed to kmap fbcon\n");
+ return ret;
+ }
+
+ info = framebuffer_alloc(0, device);
+ if (!info)
+ return -ENOMEM;
+ info->par = fbdev;
+
+ fbdev->size = size;
+
+ fb = &fbdev->afb.base;
+ fbdev->helper.fb = fb;
+ fbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "vboxdrmfb");
+
+ /*
+ * The last flag forces a mode set on VT switches even if the kernel
+ * does not think it is needed.
+ */
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
+ FBINFO_MISC_ALWAYS_SETPAR;
+ info->fbops = &vboxfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ return -ENOMEM;
+
+ /*
+ * This seems to be done for safety checking that the framebuffer
+ * is not registered twice by different drivers.
+ */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures)
+ return -ENOMEM;
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = bo->kmap.virtual;
+ info->screen_size = size;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+ info->fbdefio = &vbox_defio;
+ fb_deferred_io_init(info);
+#endif
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
+
+ return 0;
+}
+
+static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = regno;
+ *green = regno;
+ *blue = regno;
+}
+
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+ .gamma_set = vbox_fb_gamma_set,
+ .gamma_get = vbox_fb_gamma_get,
+ .fb_probe = vboxfb_create,
+};
+
+void vbox_fbdev_fini(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_fbdev *fbdev = vbox->fbdev;
+ struct vbox_framebuffer *afb = &fbdev->afb;
+
+ drm_fb_helper_unregister_fbi(&fbdev->helper);
+
+ if (afb->obj) {
+ struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+
+ if (!vbox_bo_reserve(bo, false)) {
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+ /*
+ * QXL does this, but is it really needed before
+ * freeing?
+ */
+ if (bo->pin_count)
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&fbdev->helper);
+
+ drm_framebuffer_unregister_private(&afb->base);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int vbox_fbdev_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_fbdev *fbdev;
+ int ret;
+
+ fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return -ENOMEM;
+
+ vbox->fbdev = fbdev;
+ spin_lock_init(&fbdev->dirty_lock);
+
+ drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
+ ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
+ if (ret)
+ return ret;
+
+ ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
+ if (ret)
+ goto err_fini;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
+ if (ret)
+ goto err_fini;
+
+ return 0;
+
+err_fini:
+ drm_fb_helper_fini(&fbdev->helper);
+ return ret;
+}
+
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
+{
+ struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
+
+ fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
+ fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
+}
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..822fd31121cb
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include "vbox_drv.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_defs.h"
+
+/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
+static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
+{
+ while (size--) {
+ hash += *data++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ return hash;
+}
+
+static u32 hgsmi_hash_end(u32 hash)
+{
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* Not really a checksum but that is the naming used in all vbox code */
+static u32 hgsmi_checksum(u32 offset,
+ const struct hgsmi_buffer_header *header,
+ const struct hgsmi_buffer_tail *tail)
+{
+ u32 checksum;
+
+ checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
+ checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
+ /* 4 -> Do not checksum the checksum itself */
+ checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
+
+ return hgsmi_hash_end(checksum);
+}
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info)
+{
+ struct hgsmi_buffer_header *h;
+ struct hgsmi_buffer_tail *t;
+ size_t total_size;
+ dma_addr_t offset;
+
+ total_size = size + sizeof(*h) + sizeof(*t);
+ h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
+ if (!h)
+ return NULL;
+
+ t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
+
+ h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+ h->data_size = size;
+ h->channel = channel;
+ h->channel_info = channel_info;
+ memset(&h->u.header_data, 0, sizeof(h->u.header_data));
+
+ t->reserved = 0;
+ t->checksum = hgsmi_checksum(offset, h, t);
+
+ return (u8 *)h + sizeof(*h);
+}
+
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
+{
+ struct hgsmi_buffer_header *h =
+ (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
+ size_t total_size = h->data_size + sizeof(*h) +
+ sizeof(struct hgsmi_buffer_tail);
+
+ gen_pool_free(guest_pool, (unsigned long)h, total_size);
+}
+
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
+{
+ phys_addr_t offset;
+
+ offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
+ sizeof(struct hgsmi_buffer_header));
+ outl(offset, VGA_PORT_HGSMI_GUEST);
+ /* Make the compiler aware that the host has changed memory. */
+ mb();
+
+ return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..3ca8bec62ac4
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2016-2017 Oracle Corporation
+ * This file is based on qxl_irq.c
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alon Levy
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+static void vbox_clear_irq(void)
+{
+ outl((u32)~0, VGA_PORT_HGSMI_HOST);
+}
+
+static u32 vbox_get_flags(struct vbox_private *vbox)
+{
+ return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
+}
+
+void vbox_report_hotplug(struct vbox_private *vbox)
+{
+ schedule_work(&vbox->hotplug_work);
+}
+
+irqreturn_t vbox_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ u32 host_flags = vbox_get_flags(vbox);
+
+ if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
+ return IRQ_NONE;
+
+ /*
+ * Due to a bug in the initial host implementation of hot-plug irqs,
+ * the hot-plug and cursor capability flags were never cleared.
+ * Fortunately we can tell when they would have been set by checking
+ * that the VSYNC flag is not set.
+ */
+ if (host_flags &
+ (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
+ !(host_flags & HGSMIHOSTFLAGS_VSYNC))
+ vbox_report_hotplug(vbox);
+
+ vbox_clear_irq();
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Check that the position hints provided by the host are suitable for GNOME
+ * shell (i.e. all screens disjoint and hints for all enabled screens) and if
+ * not replace them with default ones. Providing valid hints improves the
+ * chances that we will get a known screen layout for pointer mapping.
+ */
+static void validate_or_set_position_hints(struct vbox_private *vbox)
+{
+ struct vbva_modehint *hintsi, *hintsj;
+ bool valid = true;
+ u16 currentx = 0;
+ int i, j;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ for (j = 0; j < i; ++j) {
+ hintsi = &vbox->last_mode_hints[i];
+ hintsj = &vbox->last_mode_hints[j];
+
+ if (hintsi->enabled && hintsj->enabled) {
+ if (hintsi->dx >= 0xffff ||
+ hintsi->dy >= 0xffff ||
+ hintsj->dx >= 0xffff ||
+ hintsj->dy >= 0xffff ||
+ (hintsi->dx <
+ hintsj->dx + (hintsj->cx & 0x8fff) &&
+ hintsi->dx + (hintsi->cx & 0x8fff) >
+ hintsj->dx) ||
+ (hintsi->dy <
+ hintsj->dy + (hintsj->cy & 0x8fff) &&
+ hintsi->dy + (hintsi->cy & 0x8fff) >
+ hintsj->dy))
+ valid = false;
+ }
+ }
+ }
+ if (!valid)
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->last_mode_hints[i].enabled) {
+ vbox->last_mode_hints[i].dx = currentx;
+ vbox->last_mode_hints[i].dy = 0;
+ currentx +=
+ vbox->last_mode_hints[i].cx & 0x8fff;
+ }
+ }
+}
+
+/**
+ * Query the host for the most recent video mode hints.
+ */
+static void vbox_update_mode_hints(struct vbox_private *vbox)
+{
+ struct drm_device *dev = vbox->dev;
+ struct drm_connector *connector;
+ struct vbox_connector *vbox_conn;
+ struct vbva_modehint *hints;
+ u16 flags;
+ bool disconnected;
+ unsigned int crtc_id;
+ int ret;
+
+ ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
+ vbox->last_mode_hints);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
+ return;
+ }
+
+ validate_or_set_position_hints(vbox);
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ vbox_conn = to_vbox_connector(connector);
+
+ hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
+ if (hints->magic != VBVAMODEHINT_MAGIC)
+ continue;
+
+ disconnected = !(hints->enabled);
+ crtc_id = vbox_conn->vbox_crtc->crtc_id;
+ vbox_conn->mode_hint.width = hints->cx & 0x8fff;
+ vbox_conn->mode_hint.height = hints->cy & 0x8fff;
+ vbox_conn->vbox_crtc->x_hint = hints->dx;
+ vbox_conn->vbox_crtc->y_hint = hints->dy;
+ vbox_conn->mode_hint.disconnected = disconnected;
+
+ if (vbox_conn->vbox_crtc->disconnected == disconnected)
+ continue;
+
+ if (disconnected)
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
+ else
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
+
+ hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
+ hints->cx * 4, hints->cx,
+ hints->cy, 0, flags);
+
+ vbox_conn->vbox_crtc->disconnected = disconnected;
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void vbox_hotplug_worker(struct work_struct *work)
+{
+ struct vbox_private *vbox = container_of(work, struct vbox_private,
+ hotplug_work);
+
+ vbox_update_mode_hints(vbox);
+ drm_kms_helper_hotplug_event(vbox->dev);
+}
+
+int vbox_irq_init(struct vbox_private *vbox)
+{
+ INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
+ vbox_update_mode_hints(vbox);
+
+ return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
+}
+
+void vbox_irq_fini(struct vbox_private *vbox)
+{
+ drm_irq_uninstall(vbox->dev);
+ flush_work(&vbox->hotplug_work);
+}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..d0c6ec75a3c7
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_main.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>,
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+
+static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
+
+ if (vbox_fb->obj)
+ drm_gem_object_unreference_unlocked(vbox_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+void vbox_enable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+ struct vbva_buffer *vbva;
+
+ if (!vbox->vbva_info || !vbox->vbva_buffers) {
+ /* Should never happen... */
+ DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
+ return;
+ }
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->vbva_info[i].vbva)
+ continue;
+
+ vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
+ if (!vbva_enable(&vbox->vbva_info[i],
+ vbox->guest_pool, vbva, i)) {
+ /* very old host or driver error. */
+ DRM_ERROR("vboxvideo: vbva_enable failed\n");
+ return;
+ }
+ }
+}
+
+void vbox_disable_accel(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
+}
+
+void vbox_report_caps(struct vbox_private *vbox)
+{
+ u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
+ VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
+
+ if (vbox->initial_mode_queried)
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
+
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+}
+
+/**
+ * Send information about dirty rectangles to VBVA. If necessary we enable
+ * VBVA first, as this is normally disabled after a change of master in case
+ * the new master does not send dirty rectangle information (is this even
+ * allowed?)
+ */
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ mutex_lock(&vbox->hw_mutex);
+ list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
+ if (CRTC_FB(crtc) != fb)
+ continue;
+
+ vbox_enable_accel(vbox);
+
+ for (i = 0; i < num_rects; ++i) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
+ (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
+ (rects[i].x2 < crtc->x) ||
+ (rects[i].y2 < crtc->y))
+ continue;
+
+ cmd_hdr.x = (s16)rects[i].x1;
+ cmd_hdr.y = (s16)rects[i].y1;
+ cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
+ cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+ }
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs vbox_fb_funcs = {
+ .destroy = vbox_user_framebuffer_destroy,
+ .dirty = vbox_user_framebuffer_dirty,
+};
+
+int vbox_framebuffer_init(struct drm_device *dev,
+ struct vbox_framebuffer *vbox_fb,
+ const struct DRM_MODE_FB_CMD *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
+ vbox_fb->obj = obj;
+ ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+ struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ int ret = -ENOMEM;
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+ if (!vbox_fb)
+ goto err_unref_obj;
+
+ ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
+ if (ret)
+ goto err_free_vbox_fb;
+
+ return &vbox_fb->base;
+
+err_free_vbox_fb:
+ kfree(vbox_fb);
+err_unref_obj:
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+ .fb_create = vbox_user_framebuffer_create,
+};
+
+static int vbox_accel_init(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ sizeof(*vbox->vbva_info), GFP_KERNEL);
+ if (!vbox->vbva_info)
+ return -ENOMEM;
+
+ /* Take a command buffer for each screen from the end of usable VRAM. */
+ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
+
+ vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
+ vbox->available_vram_size,
+ vbox->num_crtcs *
+ VBVA_MIN_BUFFER_SIZE);
+ if (!vbox->vbva_buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ vbva_setup_buffer_context(&vbox->vbva_info[i],
+ vbox->available_vram_size +
+ i * VBVA_MIN_BUFFER_SIZE,
+ VBVA_MIN_BUFFER_SIZE);
+
+ return 0;
+}
+
+static void vbox_accel_fini(struct vbox_private *vbox)
+{
+ vbox_disable_accel(vbox);
+ pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
+}
+
+/** Do we support the 4.3 plus mode hint reporting interface? */
+static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
+{
+ u32 have_hints, have_cursor;
+ int ret;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
+ &have_hints);
+ if (ret)
+ return false;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
+ &have_cursor);
+ if (ret)
+ return false;
+
+ return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
+}
+
+static bool vbox_check_supported(u16 id)
+{
+ u16 dispi_id;
+
+ vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
+ dispi_id = inw(VBE_DISPI_IOPORT_DATA);
+
+ return dispi_id == id;
+}
+
+/**
+ * Set up our heaps and data exchange buffers in VRAM before handing the rest
+ * to the memory manager.
+ */
+static int vbox_hw_init(struct vbox_private *vbox)
+{
+ int ret = -ENOMEM;
+
+ vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
+ vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
+
+ DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+
+ /* Map guest-heap at end of vram */
+ vbox->guest_heap =
+ pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_SIZE);
+ if (!vbox->guest_heap)
+ return -ENOMEM;
+
+ /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
+ vbox->guest_pool = gen_pool_create(4, -1);
+ if (!vbox->guest_pool)
+ goto err_unmap_guest_heap;
+
+ ret = gen_pool_add_virt(vbox->guest_pool,
+ (unsigned long)vbox->guest_heap,
+ GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_USABLE_SIZE, -1);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ ret = hgsmi_test_query_conf(vbox->guest_pool);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
+ goto err_destroy_guest_pool;
+ }
+
+ /* Reduce available VRAM size to reflect the guest heap. */
+ vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
+ /* Linux drm represents monitors as a 32-bit array. */
+ hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
+ &vbox->num_crtcs);
+ vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
+
+ if (!have_hgsmi_mode_hints(vbox)) {
+ ret = -ENOTSUPP;
+ goto err_destroy_guest_pool;
+ }
+
+ vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+ sizeof(struct vbva_modehint),
+ GFP_KERNEL);
+ if (!vbox->last_mode_hints) {
+ ret = -ENOMEM;
+ goto err_destroy_guest_pool;
+ }
+
+ ret = vbox_accel_init(vbox);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ return 0;
+
+err_destroy_guest_pool:
+ gen_pool_destroy(vbox->guest_pool);
+err_unmap_guest_heap:
+ pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+ return ret;
+}
+
+static void vbox_hw_fini(struct vbox_private *vbox)
+{
+ vbox_accel_fini(vbox);
+ gen_pool_destroy(vbox->guest_pool);
+ pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+}
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct vbox_private *vbox;
+ int ret = 0;
+
+ if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+ return -ENODEV;
+
+ vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
+
+ dev->dev_private = vbox;
+ vbox->dev = dev;
+
+ mutex_init(&vbox->hw_mutex);
+
+ ret = vbox_hw_init(vbox);
+ if (ret)
+ return ret;
+
+ ret = vbox_mm_init(vbox);
+ if (ret)
+ goto err_hw_fini;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+ dev->mode_config.min_width = 64;
+ dev->mode_config.min_height = 64;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+ dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+
+ ret = vbox_mode_init(dev);
+ if (ret)
+ goto err_drm_mode_cleanup;
+
+ ret = vbox_irq_init(vbox);
+ if (ret)
+ goto err_mode_fini;
+
+ ret = vbox_fbdev_init(dev);
+ if (ret)
+ goto err_irq_fini;
+
+ return 0;
+
+err_irq_fini:
+ vbox_irq_fini(vbox);
+err_mode_fini:
+ vbox_mode_fini(dev);
+err_drm_mode_cleanup:
+ drm_mode_config_cleanup(dev);
+ vbox_mm_fini(vbox);
+err_hw_fini:
+ vbox_hw_fini(vbox);
+ return ret;
+}
+
+void vbox_driver_unload(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ vbox_fbdev_fini(dev);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(dev);
+ drm_mode_config_cleanup(dev);
+ vbox_mm_fini(vbox);
+ vbox_hw_fini(vbox);
+}
+
+/**
+ * @note this is described in the DRM framework documentation. AST does not
+ * have it, but we get an oops on driver unload if it is not present.
+ */
+void vbox_driver_lastclose(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+
+ if (vbox->fbdev)
+ drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
+}
+
+int vbox_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel, struct drm_gem_object **obj)
+{
+ struct vbox_bo *vboxbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+
+ *obj = &vboxbo->gem;
+
+ return 0;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = vbox_gem_create(dev, args->size, false, &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+static void vbox_bo_unref(struct vbox_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (!tbo)
+ *bo = NULL;
+}
+
+void vbox_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
+
+ vbox_bo_unref(&vbox_bo);
+}
+
+static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct vbox_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ *offset = vbox_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..f2b85f3256fa
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_mode.c
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/export.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+#include "hgsmi_channels.h"
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y);
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/**
+ * Set a graphics mode. Poke any required values into registers, do an HGSMI
+ * mode set and tell the host we support advanced graphics functions.
+ */
+static void vbox_do_modeset(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox;
+ int width, height, bpp, pitch;
+ unsigned int crtc_id;
+ u16 flags;
+ s32 x_offset, y_offset;
+
+ vbox = crtc->dev->dev_private;
+ width = mode->hdisplay ? mode->hdisplay : 640;
+ height = mode->vdisplay ? mode->vdisplay : 480;
+ crtc_id = vbox_crtc->crtc_id;
+ bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
+ pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
+ x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
+ y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
+
+ /*
+ * This is the old way of setting graphics modes. It assumed one screen
+ * and a frame-buffer at the start of video RAM. On older versions of
+ * VirtualBox, certain parts of the code still assume that the first
+ * screen is programmed this way, so try to fake it.
+ */
+ if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
+ vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+ vbox_crtc->fb_offset % (bpp / 8) == 0) {
+ vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
+ vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
+ vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
+ vbox_write_ioport(VBE_DISPI_INDEX_BPP,
+ CRTC_FB(crtc)->format->cpp[0] * 8);
+ vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
+ vbox_write_ioport(
+ VBE_DISPI_INDEX_X_OFFSET,
+ vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
+ vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
+ vbox_crtc->fb_offset / pitch + crtc->y);
+ }
+
+ flags = VBVA_SCREEN_F_ACTIVE;
+ flags |= (crtc->enabled && !vbox_crtc->blanked) ?
+ 0 : VBVA_SCREEN_F_BLANK;
+ flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+ hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
+ x_offset, y_offset,
+ crtc->x * bpp / 8 + crtc->y * pitch,
+ pitch, width, height,
+ vbox_crtc->blanked ? 0 : bpp, flags);
+}
+
+static int vbox_set_view(struct drm_crtc *crtc)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbva_infoview *p;
+
+ /*
+ * Tell the host about the view. This design originally targeted the
+ * Windows XP driver architecture and assumed that each screen would
+ * have a dedicated frame buffer with the command buffer following it,
+ * the whole being a "view". The host works out which screen a command
+ * buffer belongs to by checking whether it is in the first view, then
+ * whether it is in the second and so on. The first match wins. We
+ * cheat around this by making the first view be the managed memory
+ * plus the first command buffer, the second the same plus the second
+ * buffer and so on.
+ */
+ p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
+ HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+ if (!p)
+ return -ENOMEM;
+
+ p->view_index = vbox_crtc->crtc_id;
+ p->view_offset = vbox_crtc->fb_offset;
+ p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
+ vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+ p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
+
+ hgsmi_buffer_submit(vbox->guest_pool, p);
+ hgsmi_buffer_free(vbox->guest_pool, p);
+
+ return 0;
+}
+
+static void vbox_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vbox_crtc->blanked = false;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vbox_crtc->blanked = true;
+ break;
+ }
+
+ mutex_lock(&vbox->hw_mutex);
+ vbox_do_modeset(crtc, &crtc->hwmode);
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/*
+ * Try to map the layout of virtual screens to the range of the input device.
+ * Return true if we need to re-set the crtc modes due to screen offset
+ * changes.
+ */
+static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+{
+ struct drm_crtc *crtci;
+ struct drm_connector *connectori;
+ struct drm_framebuffer *fb1 = NULL;
+ bool single_framebuffer = true;
+ bool old_single_framebuffer = vbox->single_framebuffer;
+ u16 width = 0, height = 0;
+
+ /*
+ * Are we using an X.Org-style single large frame-buffer for all crtcs?
+ * If so then screen layout can be deduced from the crtc offsets.
+ * Same fall-back if this is the fbdev frame-buffer.
+ */
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
+ if (!fb1) {
+ fb1 = CRTC_FB(crtci);
+ if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
+ break;
+ } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
+ single_framebuffer = false;
+ }
+ }
+ if (single_framebuffer) {
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ if (to_vbox_crtc(crtci)->crtc_id != 0)
+ continue;
+
+ vbox->single_framebuffer = true;
+ vbox->input_mapping_width = CRTC_FB(crtci)->width;
+ vbox->input_mapping_height = CRTC_FB(crtci)->height;
+ return old_single_framebuffer !=
+ vbox->single_framebuffer;
+ }
+ }
+ /* Otherwise calculate the total span of all screens. */
+ list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
+ head) {
+ struct vbox_connector *vbox_connector =
+ to_vbox_connector(connectori);
+ struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+
+ width = max_t(u16, width, vbox_crtc->x_hint +
+ vbox_connector->mode_hint.width);
+ height = max_t(u16, height, vbox_crtc->y_hint +
+ vbox_connector->mode_hint.height);
+ }
+
+ vbox->single_framebuffer = false;
+ vbox->input_mapping_width = width;
+ vbox->input_mapping_height = height;
+
+ return old_single_framebuffer != vbox->single_framebuffer;
+}
+
+static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *old_fb, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ struct vbox_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* Unpin the previous fb. */
+ if (old_fb) {
+ vbox_fb = to_vbox_framebuffer(old_fb);
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ vbox_bo_unpin(bo);
+ vbox_bo_unreserve(bo);
+ }
+
+ vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
+ obj = vbox_fb->obj;
+ bo = gem_to_vbox_bo(obj);
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ vbox_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&vbox->fbdev->afb == vbox_fb)
+ vbox_fbdev_set_base(vbox, gpu_addr);
+ vbox_bo_unreserve(bo);
+
+ /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
+ vbox_crtc->fb_offset = gpu_addr;
+ if (vbox_set_up_input_mapping(vbox)) {
+ struct drm_crtc *crtci;
+
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtci, &crtci->mode);
+ }
+ }
+
+ return 0;
+}
+
+static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return vbox_crtc_do_set_base(crtc, old_fb, x, y);
+}
+
+static int vbox_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ int ret;
+
+ vbox_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ mutex_lock(&vbox->hw_mutex);
+ ret = vbox_set_view(crtc);
+ if (!ret)
+ vbox_do_modeset(crtc, mode);
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ mutex_unlock(&vbox->hw_mutex);
+
+ return ret;
+}
+
+static void vbox_crtc_disable(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+ .dpms = vbox_crtc_dpms,
+ .mode_fixup = vbox_crtc_mode_fixup,
+ .mode_set = vbox_crtc_mode_set,
+ /* .mode_set_base = vbox_crtc_mode_set_base, */
+ .disable = vbox_crtc_disable,
+ .load_lut = vbox_crtc_load_lut,
+ .prepare = vbox_crtc_prepare,
+ .commit = vbox_crtc_commit,
+};
+
+static void vbox_crtc_reset(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+ .cursor_move = vbox_cursor_move,
+ .cursor_set2 = vbox_cursor_set2,
+ .reset = vbox_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ /* .gamma_set = vbox_crtc_gamma_set, */
+ .destroy = vbox_crtc_destroy,
+};
+
+static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+{
+ struct vbox_crtc *vbox_crtc;
+
+ vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+ if (!vbox_crtc)
+ return NULL;
+
+ vbox_crtc->crtc_id = i;
+
+ drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+ drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+
+ return vbox_crtc;
+}
+
+static void vbox_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+
+ /* pick the encoder ids */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+
+ return NULL;
+}
+
+static const struct drm_encoder_funcs vbox_enc_funcs = {
+ .destroy = vbox_encoder_destroy,
+};
+
+static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool vbox_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void vbox_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void vbox_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void vbox_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
+ .dpms = vbox_encoder_dpms,
+ .mode_fixup = vbox_mode_fixup,
+ .prepare = vbox_encoder_prepare,
+ .commit = vbox_encoder_commit,
+ .mode_set = vbox_encoder_mode_set,
+};
+
+static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+ unsigned int i)
+{
+ struct vbox_encoder *vbox_encoder;
+
+ vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+ if (!vbox_encoder)
+ return NULL;
+
+ drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
+
+ vbox_encoder->base.possible_crtcs = 1 << i;
+ return &vbox_encoder->base;
+}
+
+/**
+ * Generate EDID data with a mode-unique serial number for the virtual
+ * monitor to try to persuade Unity that different modes correspond to
+ * different monitors and it should not try to force the same resolution on
+ * them.
+ */
+static void vbox_set_edid(struct drm_connector *connector, int width,
+ int height)
+{
+ enum { EDID_SIZE = 128 };
+ unsigned char edid[EDID_SIZE] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
+ 0x58, 0x58, /* manufacturer (VBX) */
+ 0x00, 0x00, /* product code */
+ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
+ 0x01, /* week of manufacture */
+ 0x00, /* year of manufacture */
+ 0x01, 0x03, /* EDID version */
+ 0x80, /* capabilities - digital */
+ 0x00, /* horiz. res in cm, zero for projectors */
+ 0x00, /* vert. res in cm */
+ 0x78, /* display gamma (120 == 2.2). */
+ 0xEE, /* features (standby, suspend, off, RGB, std */
+ /* colour space, preferred timing mode) */
+ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+ /* chromaticity for standard colour space. */
+ 0x00, 0x00, 0x00, /* no default timings */
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, /* no standard timings */
+ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+ 0x02, 0x02,
+ /* descriptor block 1 goes below */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* descriptor block 2, monitor ranges */
+ 0x00, 0x00, 0x00, 0xFD, 0x00,
+ 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20,
+ /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+ 0x20,
+ /* descriptor block 3, monitor name */
+ 0x00, 0x00, 0x00, 0xFC, 0x00,
+ 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+ '\n',
+ /* descriptor block 4: dummy data */
+ 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20,
+ 0x00, /* number of extensions */
+ 0x00 /* checksum goes here */
+ };
+ int clock = (width + 6) * (height + 6) * 60 / 10000;
+ unsigned int i, sum = 0;
+
+ edid[12] = width & 0xff;
+ edid[13] = width >> 8;
+ edid[14] = height & 0xff;
+ edid[15] = height >> 8;
+ edid[54] = clock & 0xff;
+ edid[55] = clock >> 8;
+ edid[56] = width & 0xff;
+ edid[58] = (width >> 4) & 0xf0;
+ edid[59] = height & 0xff;
+ edid[61] = (height >> 4) & 0xf0;
+ for (i = 0; i < EDID_SIZE - 1; ++i)
+ sum += edid[i];
+ edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+ drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+}
+
+static int vbox_get_modes(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct vbox_private *vbox = NULL;
+ unsigned int num_modes = 0;
+ int preferred_width, preferred_height;
+
+ vbox_connector = to_vbox_connector(connector);
+ vbox = connector->dev->dev_private;
+ /*
+ * Heuristic: we do not want to tell the host that we support dynamic
+ * resizing unless we feel confident that the user space client using
+ * the video driver can handle hot-plug events. So the first time modes
+ * are queried after a "master" switch we tell the host that we do not,
+ * and immediately after we send the client a hot-plug notification as
+ * a test to see if they will respond and query again.
+ * That is also the reason why capabilities are reported to the host at
+ * this place in the code rather than elsewhere.
+ * We need to report the flags location before reporting the IRQ
+ * capability.
+ */
+ hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+ HOST_FLAGS_OFFSET);
+ if (vbox_connector->vbox_crtc->crtc_id == 0)
+ vbox_report_caps(vbox);
+ if (!vbox->initial_mode_queried) {
+ if (vbox_connector->vbox_crtc->crtc_id == 0) {
+ vbox->initial_mode_queried = true;
+ vbox_report_hotplug(vbox);
+ }
+ return drm_add_modes_noedid(connector, 800, 600);
+ }
+ num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+ preferred_width = vbox_connector->mode_hint.width ?
+ vbox_connector->mode_hint.width : 1024;
+ preferred_height = vbox_connector->mode_hint.height ?
+ vbox_connector->mode_hint.height : 768;
+ mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+ 60, false, false, false);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ ++num_modes;
+ }
+ vbox_set_edid(connector, preferred_width, preferred_height);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ drm_object_property_set_value(
+ &connector->base, vbox->dev->mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+
+ return num_modes;
+}
+
+static int vbox_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void vbox_connector_destroy(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector;
+
+ vbox_connector = to_vbox_connector(connector);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+vbox_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vbox_connector *vbox_connector;
+
+ vbox_connector = to_vbox_connector(connector);
+
+ return vbox_connector->mode_hint.disconnected ?
+ connector_status_disconnected : connector_status_connected;
+}
+
+static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+ u32 max_y)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_device *dev;
+ struct drm_display_mode *mode, *iterator;
+
+ vbox_connector = to_vbox_connector(connector);
+ dev = vbox_connector->base.dev;
+ list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+ list_del(&mode->head);
+ drm_mode_destroy(dev, mode);
+ }
+
+ return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+}
+
+static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+ .mode_valid = vbox_mode_valid,
+ .get_modes = vbox_get_modes,
+ .best_encoder = vbox_best_single_encoder,
+};
+
+static const struct drm_connector_funcs vbox_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = vbox_connector_detect,
+ .fill_modes = vbox_fill_modes,
+ .destroy = vbox_connector_destroy,
+};
+
+static int vbox_connector_init(struct drm_device *dev,
+ struct vbox_crtc *vbox_crtc,
+ struct drm_encoder *encoder)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_connector *connector;
+
+ vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+ if (!vbox_connector)
+ return -ENOMEM;
+
+ connector = &vbox_connector->base;
+ vbox_connector->vbox_crtc = vbox_crtc;
+
+ drm_connector_init(dev, connector, &vbox_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_create_suggested_offset_properties(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, -1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, -1);
+ drm_connector_register(connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+int vbox_mode_init(struct drm_device *dev)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct vbox_crtc *vbox_crtc;
+ unsigned int i;
+ int ret;
+
+ /* vbox_cursor_init(dev); */
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbox_crtc = vbox_crtc_init(dev, i);
+ if (!vbox_crtc)
+ return -ENOMEM;
+ encoder = vbox_encoder_init(dev, i);
+ if (!encoder)
+ return -ENOMEM;
+ ret = vbox_connector_init(dev, vbox_crtc, encoder);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void vbox_mode_fini(struct drm_device *dev)
+{
+ /* vbox_cursor_fini(dev); */
+}
+
+/**
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+ size_t mask_size)
+{
+ size_t line_size = (width + 7) / 8;
+ u32 i, j;
+
+ memcpy(dst + mask_size, src, width * height * 4);
+ for (i = 0; i < height; ++i)
+ for (j = 0; j < width; ++j)
+ if (((u32 *)src)[i * width + j] > 0xf0000000)
+ dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+}
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+ u32 handle, u32 width, u32 height,
+ s32 hot_x, s32 hot_y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct ttm_bo_kmap_obj uobj_map;
+ size_t data_size, mask_size;
+ struct drm_gem_object *obj;
+ u32 flags, caps = 0;
+ struct vbox_bo *bo;
+ bool src_isiomem;
+ u8 *dst = NULL;
+ u8 *src;
+ int ret;
+
+ /*
+ * Re-set this regularly as in 5.0.20 and earlier the information was
+ * lost on save and restore.
+ */
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+ if (!handle) {
+ bool cursor_enabled = false;
+ struct drm_crtc *crtci;
+
+ /* Hide cursor. */
+ vbox_crtc->cursor_enabled = false;
+ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+ head) {
+ if (to_vbox_crtc(crtci)->cursor_enabled)
+ cursor_enabled = true;
+ }
+
+ if (!cursor_enabled)
+ hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+ 0, 0, NULL, 0);
+ return 0;
+ }
+
+ vbox_crtc->cursor_enabled = true;
+
+ if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+ width == 0 || height == 0)
+ return -EINVAL;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+ if (ret)
+ return ret;
+
+ if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+ /*
+ * -EINVAL means cursor_set2() not supported, -EAGAIN means
+ * retry at once.
+ */
+ return -EBUSY;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ goto out_unref_obj;
+
+ /*
+ * The mask must be calculated based on the alpha
+ * channel, one bit per ARGB word, and must be 32-bit
+ * padded.
+ */
+ mask_size = ((width + 7) / 8 * height + 3) & ~3;
+ data_size = width * height * 4 + mask_size;
+ vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
+ vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
+ vbox->cursor_width = width;
+ vbox->cursor_height = height;
+ vbox->cursor_data_size = data_size;
+ dst = vbox->cursor_data;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+ if (ret) {
+ vbox->cursor_data_size = 0;
+ goto out_unreserve_bo;
+ }
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ if (src_isiomem) {
+ DRM_ERROR("src cursor bo not in main memory\n");
+ ret = -EIO;
+ goto out_unmap_bo;
+ }
+
+ copy_cursor_image(src, dst, width, height, mask_size);
+
+ flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+ VBOX_MOUSE_POINTER_ALPHA;
+ ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ vbox->cursor_hot_x, vbox->cursor_hot_y,
+ width, height, dst, data_size);
+out_unmap_bo:
+ ttm_bo_kunmap(&uobj_map);
+out_unreserve_bo:
+ vbox_bo_unreserve(bo);
+out_unref_obj:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
+ VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
+ s32 crtc_x =
+ vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
+ s32 crtc_y =
+ vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
+ u32 host_x, host_y;
+ u32 hot_x = 0;
+ u32 hot_y = 0;
+ int ret;
+
+ /*
+ * We compare these to unsigned later and don't
+ * need to handle negative.
+ */
+ if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
+ return 0;
+
+ ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
+ y + crtc_y, &host_x, &host_y);
+
+ /*
+ * The only reason we have vbox_cursor_move() is that some older clients
+ * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
+ * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
+ *
+ * However VirtualBox 5.0.20 and earlier has a bug causing it to return
+ * 0,0 as host cursor location after a save and restore.
+ *
+ * To work around this we ignore a 0, 0 return, since missing the odd
+ * time when it legitimately happens is not going to hurt much.
+ */
+ if (ret || (host_x == 0 && host_y == 0))
+ return ret;
+
+ if (x + crtc_x < host_x)
+ hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
+ if (y + crtc_y < host_y)
+ hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
+
+ if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
+ return 0;
+
+ vbox->cursor_hot_x = hot_x;
+ vbox->cursor_hot_y = hot_y;
+
+ return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
+ vbox->cursor_data, vbox->cursor_data_size);
+}
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..b7453e427a1d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_prime.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Copyright 2017 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "vbox_drv.h"
+
+/*
+ * Based on qxl_prime.c:
+ * Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with vboxvideo
+ */
+
+int vbox_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
+
+void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENOSYS);
+}
+
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENOSYS;
+}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..34a905d40735
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_ttm.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com>
+ */
+#include "vbox_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct vbox_private, ttm.bdev);
+}
+
+static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+/**
+ * Adds the vbox memory manager object/structures to the global memory manager.
+ */
+static int vbox_ttm_global_init(struct vbox_private *vbox)
+{
+ struct drm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &vbox->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vbox_ttm_mem_global_init;
+ global_ref->release = &vbox_ttm_mem_global_release;
+ ret = drm_global_item_ref(global_ref);
+ if (ret) {
+ DRM_ERROR("Failed setting up TTM memory subsystem.\n");
+ return ret;
+ }
+
+ vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
+ global_ref = &vbox->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = drm_global_item_ref(global_ref);
+ if (ret) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes the vbox memory manager object from the global memory manager.
+ */
+static void vbox_ttm_global_release(struct vbox_private *vbox)
+{
+ drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&vbox->ttm.mem_global_ref);
+}
+
+static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct vbox_bo *bo;
+
+ bo = container_of(tbo, struct vbox_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vbox_bo_ttm_destroy)
+ return true;
+
+ return false;
+}
+
+static int
+vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct vbox_bo *vboxbo = vbox_bo(bo);
+
+ if (!vbox_ttm_bo_is_vbox_bo(bo))
+ return;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
+ *pl = vboxbo->placement;
+}
+
+static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vbox_private *vbox = vbox_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static int vbox_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+ return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+}
+
+static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func vbox_tt_backend_func = {
+ .destroy = &vbox_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ u32 page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &vbox_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return tt;
+}
+
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver vbox_bo_driver = {
+ .ttm_tt_create = vbox_ttm_tt_create,
+ .ttm_tt_populate = vbox_ttm_tt_populate,
+ .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
+ .init_mem_type = vbox_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = vbox_bo_evict_flags,
+ .move = vbox_bo_move,
+ .verify_access = vbox_bo_verify_access,
+ .io_mem_reserve = &vbox_ttm_io_mem_reserve,
+ .io_mem_free = &vbox_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
+};
+
+int vbox_mm_init(struct vbox_private *vbox)
+{
+ int ret;
+ struct drm_device *dev = vbox->dev;
+ struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+
+ ret = vbox_ttm_global_init(vbox);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&vbox->ttm.bdev,
+ vbox->ttm.bo_global_ref.ref.object,
+ &vbox_bo_driver,
+ dev->anon_inode->i_mapping,
+ DRM_FILE_PAGE_OFFSET, true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ goto err_ttm_global_release;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ vbox->available_vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ goto err_device_release;
+ }
+
+#ifdef DRM_MTRR_WC
+ vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+#else
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+#endif
+ return 0;
+
+err_device_release:
+ ttm_bo_device_release(&vbox->ttm.bdev);
+err_ttm_global_release:
+ vbox_ttm_global_release(vbox);
+ return ret;
+}
+
+void vbox_mm_fini(struct vbox_private *vbox)
+{
+#ifdef DRM_MTRR_WC
+ drm_mtrr_del(vbox->fb_mtrr,
+ pci_resource_start(vbox->dev->pdev, 0),
+ pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
+#else
+ arch_phys_wc_del(vbox->fb_mtrr);
+#endif
+ ttm_bo_device_release(&vbox->ttm.bdev);
+ vbox_ttm_global_release(vbox);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain)
+{
+ unsigned int i;
+ u32 c = 0;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++].flags =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo)
+{
+ struct vbox_private *vbox = dev->dev_private;
+ struct vbox_bo *vboxbo;
+ size_t acc_size;
+ int ret;
+
+ vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
+ if (!vboxbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &vboxbo->gem, size);
+ if (ret)
+ goto err_free_vboxbo;
+
+ vboxbo->bo.bdev = &vbox->ttm.bdev;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
+ sizeof(struct vbox_bo));
+
+ ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
+ ttm_bo_type_device, &vboxbo->placement,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
+ NULL, NULL, vbox_bo_ttm_destroy);
+ if (ret)
+ goto err_free_vboxbo;
+
+ *pvboxbo = vboxbo;
+
+ return 0;
+
+err_free_vboxbo:
+ kfree(vboxbo);
+ return ret;
+}
+
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+ }
+
+ vbox_ttm_placement(bo, pl_flag);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+
+ if (gpu_addr)
+ *gpu_addr = vbox_bo_gpu_offset(bo);
+
+ return 0;
+}
+
+int vbox_bo_unpin(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Move a vbox-owned buffer object to system memory if no one else has it
+ * pinned. The caller must have pinned it previously, and this call will
+ * release the caller's pin.
+ */
+int vbox_bo_push_sysram(struct vbox_bo *bo)
+{
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vbox_private *vbox;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return -EINVAL;
+
+ file_priv = filp->private_data;
+ vbox = file_priv->minor->dev->dev_private;
+
+ return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
+}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..d835d75d761c
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo.h
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ */
+
+#ifndef __VBOXVIDEO_H__
+#define __VBOXVIDEO_H__
+
+/*
+ * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
+ * src/VBox/Main/xml/VirtualBox-settings-common.xsd
+ */
+#define VBOX_VIDEO_MAX_SCREENS 64
+
+/*
+ * The last 4096 bytes of the guest VRAM contains the generic info for all
+ * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
+ *
+ * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
+ * etc. This is used exclusively by the corresponding instance of a display
+ * driver.
+ *
+ * The VRAM layout:
+ * Last 4096 bytes - Adapter information area.
+ * 4096 bytes aligned miniport heap (value specified in the config rouded up).
+ * Slack - what left after dividing the VRAM.
+ * 4096 bytes aligned framebuffers:
+ * last 4096 bytes of each framebuffer is the display information area.
+ *
+ * The Virtual Graphics Adapter information in the guest VRAM is stored by the
+ * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
+ *
+ * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * the host starts to process the info. The first element at the start of
+ * the 4096 bytes region should be normally be a LINK that points to
+ * actual information chain. That way the guest driver can have some
+ * fixed layout of the information memory block and just rewrite
+ * the link to point to relevant memory chain.
+ *
+ * The processing stops at the END element.
+ *
+ * The host can access the memory only when the port IO is processed.
+ * All data that will be needed later must be copied from these 4096 bytes.
+ * But other VRAM can be used by host until the mode is disabled.
+ *
+ * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * to disable the mode.
+ *
+ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
+ * from the host and issue commands to the host.
+ *
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * following operations with the VBE data register can be performed:
+ *
+ * Operation Result
+ * write 16 bit value NOP
+ * read 16 bit value count of monitors
+ * write 32 bit value set the vbox cmd value and the cmd processed by the host
+ * read 32 bit value result of the last vbox command is returned
+ */
+
+/**
+ * VBVA command header.
+ *
+ * @todo Where does this fit in?
+ */
+struct vbva_cmd_hdr {
+ /** Coordinates of affected rectangle. */
+ s16 x;
+ s16 y;
+ u16 w;
+ u16 h;
+} __packed;
+
+/** @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases
+ * free_offset.
+ *
+ * The host reads the records on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * data_offset. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ */
+#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
+#define VBVA_RING_BUFFER_THRESHOLD (4096)
+
+#define VBVA_MAX_RECORDS (64)
+
+#define VBVA_F_MODE_ENABLED 0x00000001u
+#define VBVA_F_MODE_VRDP 0x00000002u
+#define VBVA_F_MODE_VRDP_RESET 0x00000004u
+#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
+
+#define VBVA_F_STATE_PROCESSING 0x00010000u
+
+#define VBVA_F_RECORD_PARTIAL 0x80000000u
+
+/**
+ * VBVA record.
+ */
+struct vbva_record {
+ /** The length of the record. Changed by guest. */
+ u32 len_and_flags;
+} __packed;
+
+/*
+ * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
+ * the runtime heapsimple API. Use minimum 2 pages here, because the info area
+ * also may contain other data (for example hgsmi_host_flags structure).
+ */
+#define VBVA_ADAPTER_INFORMATION_SIZE 65536
+#define VBVA_MIN_BUFFER_SIZE 65536
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
+
+/* The value for port IO to let the adapter to interpret the display memory.
+ * The display number is encoded in low 16 bits.
+ */
+#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
+
+struct vbva_host_flags {
+ u32 host_events;
+ u32 supported_orders;
+} __packed;
+
+struct vbva_buffer {
+ struct vbva_host_flags host_flags;
+
+ /* The offset where the data start in the buffer. */
+ u32 data_offset;
+ /* The offset where next data must be placed in the buffer. */
+ u32 free_offset;
+
+ /* The queue of record descriptions. */
+ struct vbva_record records[VBVA_MAX_RECORDS];
+ u32 record_first_index;
+ u32 record_free_index;
+
+ /* Space to leave free when large partial records are transferred. */
+ u32 partial_write_tresh;
+
+ u32 data_len;
+ /* variable size for the rest of the vbva_buffer area in VRAM. */
+ u8 data[0];
+} __packed;
+
+#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
+
+/* guest->host commands */
+#define VBVA_QUERY_CONF32 1
+#define VBVA_SET_CONF32 2
+#define VBVA_INFO_VIEW 3
+#define VBVA_INFO_HEAP 4
+#define VBVA_FLUSH 5
+#define VBVA_INFO_SCREEN 6
+#define VBVA_ENABLE 7
+#define VBVA_MOUSE_POINTER_SHAPE 8
+/* informs host about HGSMI caps. see vbva_caps below */
+#define VBVA_INFO_CAPS 12
+/* configures scanline, see VBVASCANLINECFG below */
+#define VBVA_SCANLINE_CFG 13
+/* requests scanline info, see VBVASCANLINEINFO below */
+#define VBVA_SCANLINE_INFO 14
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_SUBMIT 16
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_FLUSH 17
+/* G->H DMA command */
+#define VBVA_CMDVBVA_CTL 18
+/* Query most recent mode hints sent */
+#define VBVA_QUERY_MODE_HINTS 19
+/**
+ * Report the guest virtual desktop position and size for mapping host and
+ * guest pointer positions.
+ */
+#define VBVA_REPORT_INPUT_MAPPING 20
+/** Report the guest cursor position and query the host position. */
+#define VBVA_CURSOR_POSITION 21
+
+/* host->guest commands */
+#define VBVAHG_EVENT 1
+#define VBVAHG_DISPLAY_CUSTOM 2
+
+/* vbva_conf32::index */
+#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
+#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
+/**
+ * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
+ * Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
+/**
+ * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
+ * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
+/**
+ * Returns the currently available host cursor capabilities. Available if
+ * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
+ * @see VMMDevReqMouseStatus::mouseFeatures.
+ */
+#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
+/** Returns the supported flags in vbva_infoscreen::flags. */
+#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
+/** Returns the max size of VBVA record. */
+#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
+
+struct vbva_conf32 {
+ u32 index;
+ u32 value;
+} __packed;
+
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
+/**
+ * Guest cursor capability: can the host show a hardware cursor at the host
+ * pointer location?
+ */
+#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
+/** Reserved for historical reasons. Must always be unset. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
+
+struct vbva_infoview {
+ /* Index of the screen, assigned by the guest. */
+ u32 view_index;
+
+ /* The screen offset in VRAM, the framebuffer starts here. */
+ u32 view_offset;
+
+ /* The size of the VRAM memory that can be used for the view. */
+ u32 view_size;
+
+ /* The recommended maximum size of the VRAM memory for the screen. */
+ u32 max_screen_size;
+} __packed;
+
+struct vbva_flush {
+ u32 reserved;
+} __packed;
+
+/* vbva_infoscreen::flags */
+#define VBVA_SCREEN_F_NONE 0x0000
+#define VBVA_SCREEN_F_ACTIVE 0x0001
+/**
+ * The virtual monitor has been disabled by the guest and should be removed
+ * by the host and ignored for purposes of pointer position calculation.
+ */
+#define VBVA_SCREEN_F_DISABLED 0x0002
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using width, height, etc values from the vbva_infoscreen
+ * request.
+ */
+#define VBVA_SCREEN_F_BLANK 0x0004
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc.
+ */
+#define VBVA_SCREEN_F_BLANK2 0x0008
+
+struct vbva_infoscreen {
+ /* Which view contains the screen. */
+ u32 view_index;
+
+ /* Physical X origin relative to the primary screen. */
+ s32 origin_x;
+
+ /* Physical Y origin relative to the primary screen. */
+ s32 origin_y;
+
+ /* Offset of visible framebuffer relative to the framebuffer start. */
+ u32 start_offset;
+
+ /* The scan line size in bytes. */
+ u32 line_size;
+
+ /* Width of the screen. */
+ u32 width;
+
+ /* Height of the screen. */
+ u32 height;
+
+ /* Color depth. */
+ u16 bits_per_pixel;
+
+ /* VBVA_SCREEN_F_* */
+ u16 flags;
+} __packed;
+
+/* vbva_enable::flags */
+#define VBVA_F_NONE 0x00000000
+#define VBVA_F_ENABLE 0x00000001
+#define VBVA_F_DISABLE 0x00000002
+/* extended VBVA to be used with WDDM */
+#define VBVA_F_EXTENDED 0x00000004
+/* vbva offset is absolute VRAM offset */
+#define VBVA_F_ABSOFFSET 0x00000008
+
+struct vbva_enable {
+ u32 flags;
+ u32 offset;
+ s32 result;
+} __packed;
+
+struct vbva_enable_ex {
+ struct vbva_enable base;
+ u32 screen_id;
+} __packed;
+
+struct vbva_mouse_pointer_shape {
+ /* The host result. */
+ s32 result;
+
+ /* VBOX_MOUSE_POINTER_* bit flags. */
+ u32 flags;
+
+ /* X coordinate of the hot spot. */
+ u32 hot_X;
+
+ /* Y coordinate of the hot spot. */
+ u32 hot_y;
+
+ /* Width of the pointer in pixels. */
+ u32 width;
+
+ /* Height of the pointer in scanlines. */
+ u32 height;
+
+ /* Pointer data.
+ *
+ ****
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+ * mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit
+ * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
+ * consists of (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha chan.,
+ * so if host does not support alpha, the pointer could be displayed as
+ * a normal color pointer. The AND mask can be constructed from alpha
+ * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+ * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
+ * bits at the end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * u8 *xor = and + (and_len + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * xor_len = width * 4 * height.
+ ****
+ *
+ * Preallocate 4 bytes for accessing actual data as p->data.
+ */
+ u8 data[4];
+} __packed;
+
+/**
+ * @name vbva_mouse_pointer_shape::flags
+ * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
+ * values must be <= 0x8000 and must not be changed. (try make more sense
+ * of this, please).
+ * @{
+ */
+
+/** pointer is visible */
+#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
+/** pointer has alpha channel */
+#define VBOX_MOUSE_POINTER_ALPHA 0x0002
+/** pointerData contains new pointer shape */
+#define VBOX_MOUSE_POINTER_SHAPE 0x0004
+
+/** @} */
+
+/*
+ * The guest driver can handle asynch guest cmd completion by reading the
+ * command offset from io port.
+ */
+#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
+/* the guest driver can handle video adapter IRQs */
+#define VBVACAPS_IRQ 0x00000002
+/** The guest can read video mode hints sent via VBVA. */
+#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
+/** The guest can switch to a software cursor on demand. */
+#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
+/** The guest does not depend on host handling the VBE registers. */
+#define VBVACAPS_USE_VBVA_ONLY 0x00000010
+
+struct vbva_caps {
+ s32 rc;
+ u32 caps;
+} __packed;
+
+/** Query the most recent mode hints received from the host. */
+struct vbva_query_mode_hints {
+ /** The maximum number of screens to return hints for. */
+ u16 hints_queried_count;
+ /** The size of the mode hint structures directly following this one. */
+ u16 hint_structure_guest_size;
+ /** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
+ s32 rc;
+} __packed;
+
+/**
+ * Structure in which a mode hint is returned. The guest allocates an array
+ * of these immediately after the vbva_query_mode_hints structure.
+ * To accommodate future extensions, the vbva_query_mode_hints structure
+ * specifies the size of the vbva_modehint structures allocated by the guest,
+ * and the host only fills out structure elements which fit into that size. The
+ * host should fill any unused members (e.g. dx, dy) or structure space on the
+ * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
+ */
+struct vbva_modehint {
+ u32 magic;
+ u32 cx;
+ u32 cy;
+ u32 bpp; /* Which has never been used... */
+ u32 display;
+ u32 dx; /**< X offset into the virtual frame-buffer. */
+ u32 dy; /**< Y offset into the virtual frame-buffer. */
+ u32 enabled; /* Not flags. Add new members for new flags. */
+} __packed;
+
+#define VBVAMODEHINT_MAGIC 0x0801add9u
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens and must be re-set.
+ * @see VBVA_REPORT_INPUT_MAPPING.
+ */
+struct vbva_report_input_mapping {
+ s32 x; /**< Upper left X co-ordinate relative to the first screen. */
+ s32 y; /**< Upper left Y co-ordinate relative to the first screen. */
+ u32 cx; /**< Rectangle width. */
+ u32 cy; /**< Rectangle height. */
+} __packed;
+
+/**
+ * Report the guest cursor position and query the host one. The host may wish
+ * to use the guest information to re-position its own cursor (though this is
+ * currently unlikely).
+ * @see VBVA_CURSOR_POSITION
+ */
+struct vbva_cursor_position {
+ u32 report_position; /**< Are we reporting a position? */
+ u32 x; /**< Guest cursor X position */
+ u32 y; /**< Guest cursor Y position */
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..d09da841711a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_GUEST_H__
+#define __VBOXVIDEO_GUEST_H__
+
+#include <linux/genalloc.h>
+#include "vboxvideo.h"
+
+/**
+ * Structure grouping the context needed for sending graphics acceleration
+ * information to the host via VBVA. Each screen has its own VBVA buffer.
+ */
+struct vbva_buf_ctx {
+ /** Offset of the buffer in the VRAM section for the screen */
+ u32 buffer_offset;
+ /** Length of the buffer in bytes */
+ u32 buffer_length;
+ /** Set if we wrote to the buffer faster than the host could read it */
+ bool buffer_overflow;
+ /** VBVA record that we are currently preparing for the host, or NULL */
+ struct vbva_record *record;
+ /**
+ * Pointer to the VBVA buffer mapped into the current address space.
+ * Will be NULL if VBVA is not enabled.
+ */
+ struct vbva_buffer *vbva;
+};
+
+/**
+ * @name Base HGSMI APIs
+ * @{
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
+int hgsmi_test_query_conf(struct gen_pool *ctx);
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len);
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host);
+/** @} */
+
+/**
+ * @name VBVA APIs
+ * @{
+ */
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen);
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen);
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx);
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len);
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length);
+/** @} */
+
+/**
+ * @name Modesetting APIs
+ * @{
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags);
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height);
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints);
+/** @} */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..f842f4d9c80a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_VBE_H__
+#define __VBOXVIDEO_VBE_H__
+
+/* GUEST <-> HOST Communication API */
+
+/**
+ * @todo FIXME: Either dynamicly ask host for this or put somewhere high in
+ * physical memory like 0xE0000000.
+ */
+
+#define VBE_DISPI_BANK_ADDRESS 0xA0000
+#define VBE_DISPI_BANK_SIZE_KB 64
+
+#define VBE_DISPI_MAX_XRES 16384
+#define VBE_DISPI_MAX_YRES 16384
+#define VBE_DISPI_MAX_BPP 32
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
+#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
+#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+
+#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
+/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
+#define VBE_DISPI_ID_HGSMI 0xBE01
+#define VBE_DISPI_ID_ANYX 0xBE02
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+/**
+ * @note this definition is a BOCHS legacy, used only in the video BIOS
+ * code and ignored by the emulated hardware.
+ */
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+#define VGA_PORT_HGSMI_HOST 0x3b0
+#define VGA_PORT_HGSMI_GUEST 0x3d0
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..c10c782f94e1
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbva_base.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "hgsmi_channels.h"
+
+/*
+ * There is a hardware ring buffer in the graphics device video RAM, formerly
+ * in the VBox VMMDev PCI memory space.
+ * All graphics commands go there serialized by vbva_buffer_begin_update.
+ * and vbva_buffer_end_update.
+ *
+ * free_offset is writing position. data_offset is reading position.
+ * free_offset == data_offset means buffer is empty.
+ * There must be always gap between data_offset and free_offset when data
+ * are in the buffer.
+ * Guest only changes free_offset, host changes data_offset.
+ */
+
+static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
+{
+ s32 diff = vbva->data_offset - vbva->free_offset;
+
+ return diff > 0 ? diff : vbva->data_len + diff;
+}
+
+static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
+ const void *p, u32 len, u32 offset)
+{
+ struct vbva_buffer *vbva = vbva_ctx->vbva;
+ u32 bytes_till_boundary = vbva->data_len - offset;
+ u8 *dst = &vbva->data[offset];
+ s32 diff = len - bytes_till_boundary;
+
+ if (diff <= 0) {
+ /* Chunk will not cross buffer boundary. */
+ memcpy(dst, p, len);
+ } else {
+ /* Chunk crosses buffer boundary. */
+ memcpy(dst, p, bytes_till_boundary);
+ memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
+ }
+}
+
+static void vbva_buffer_flush(struct gen_pool *ctx)
+{
+ struct vbva_flush *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
+ if (!p)
+ return;
+
+ p->reserved = 0;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len)
+{
+ struct vbva_record *record;
+ struct vbva_buffer *vbva;
+ u32 available;
+
+ vbva = vbva_ctx->vbva;
+ record = vbva_ctx->record;
+
+ if (!vbva || vbva_ctx->buffer_overflow ||
+ !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
+ return false;
+
+ available = vbva_buffer_available(vbva);
+
+ while (len > 0) {
+ u32 chunk = len;
+
+ if (chunk >= available) {
+ vbva_buffer_flush(ctx);
+ available = vbva_buffer_available(vbva);
+ }
+
+ if (chunk >= available) {
+ if (WARN_ON(available <= vbva->partial_write_tresh)) {
+ vbva_ctx->buffer_overflow = true;
+ return false;
+ }
+ chunk = available - vbva->partial_write_tresh;
+ }
+
+ vbva_buffer_place_data_at(vbva_ctx, p, chunk,
+ vbva->free_offset);
+
+ vbva->free_offset = (vbva->free_offset + chunk) %
+ vbva->data_len;
+ record->len_and_flags += chunk;
+ available -= chunk;
+ len -= chunk;
+ p += chunk;
+ }
+
+ return true;
+}
+
+static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx, s32 screen, bool enable)
+{
+ struct vbva_enable_ex *p;
+ bool ret;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
+ if (!p)
+ return false;
+
+ p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
+ p->base.offset = vbva_ctx->buffer_offset;
+ p->base.result = VERR_NOT_SUPPORTED;
+ if (screen >= 0) {
+ p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
+ p->screen_id = screen;
+ }
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (enable)
+ ret = RT_SUCCESS(p->base.result);
+ else
+ ret = true;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return ret;
+}
+
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen)
+{
+ bool ret = false;
+
+ memset(vbva, 0, sizeof(*vbva));
+ vbva->partial_write_tresh = 256;
+ vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
+ vbva_ctx->vbva = vbva;
+
+ ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
+ if (!ret)
+ vbva_disable(vbva_ctx, ctx, screen);
+
+ return ret;
+}
+
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen)
+{
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+ vbva_ctx->vbva = NULL;
+
+ vbva_inform_host(vbva_ctx, ctx, screen, false);
+}
+
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx)
+{
+ struct vbva_record *record;
+ u32 next;
+
+ if (!vbva_ctx->vbva ||
+ !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
+ return false;
+
+ WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
+
+ next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
+
+ /* Flush if all slots in the records queue are used */
+ if (next == vbva_ctx->vbva->record_first_index)
+ vbva_buffer_flush(ctx);
+
+ /* If even after flush there is no place then fail the request */
+ if (next == vbva_ctx->vbva->record_first_index)
+ return false;
+
+ record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
+ record->len_and_flags = VBVA_F_RECORD_PARTIAL;
+ vbva_ctx->vbva->record_free_index = next;
+ /* Remember which record we are using. */
+ vbva_ctx->record = record;
+
+ return true;
+}
+
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
+{
+ struct vbva_record *record = vbva_ctx->record;
+
+ WARN_ON(!vbva_ctx->vbva || !record ||
+ !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
+
+ /* Mark the record completed. */
+ record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
+
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+}
+
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length)
+{
+ vbva_ctx->buffer_offset = buffer_offset;
+ vbva_ctx->buffer_length = buffer_length;
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 030bec855d86..314ffac50bb8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
struct rpi_firmware *fw;
int err;
- void *ptr_err;
fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
if (!fw_node) {
@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
/* create sysfs entries */
vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
- ptr_err = vchiq_class;
- if (IS_ERR(ptr_err))
+ err = PTR_ERR(vchiq_class);
+ if (IS_ERR(vchiq_class))
goto failed_class_create;
vchiq_dev = device_create(vchiq_class, NULL,
vchiq_devid, NULL, "vchiq");
- ptr_err = vchiq_dev;
- if (IS_ERR(ptr_err))
+ err = PTR_ERR(vchiq_dev);
+ if (IS_ERR(vchiq_dev))
goto failed_device_create;
/* create debugfs entries */
@@ -3455,7 +3454,6 @@ failed_device_create:
class_destroy(vchiq_class);
failed_class_create:
cdev_del(&vchiq_cdev);
- err = PTR_ERR(ptr_err);
failed_cdev_add:
unregister_chrdev_region(vchiq_devid, 1);
failed_platform_init:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ab3e8f410444..e9391bbd4036 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
struct nvm_auth_status {
struct list_head list;
- uuid_be uuid;
+ uuid_t uuid;
u32 status;
};
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
struct nvm_auth_status *st;
list_for_each_entry(st, &nvm_auth_status_cache, list) {
- if (!uuid_be_cmp(st->uuid, *sw->uuid))
+ if (uuid_equal(&st->uuid, sw->uuid))
return st;
}
@@ -281,9 +281,11 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
if (active) {
config.name = "nvm_active";
config.reg_read = tb_switch_nvm_read;
+ config.read_only = true;
} else {
config.name = "nvm_non_active";
config.reg_write = tb_switch_nvm_write;
+ config.root_only = true;
}
config.id = id;
@@ -292,7 +294,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.size = size;
config.dev = &sw->dev;
config.owner = THIS_MODULE;
- config.root_only = true;
config.priv = sw;
return nvmem_register(&config);
@@ -1460,7 +1461,7 @@ struct tb_sw_lookup {
struct tb *tb;
u8 link;
u8 depth;
- const uuid_be *uuid;
+ const uuid_t *uuid;
};
static int tb_switch_match(struct device *dev, void *data)
@@ -1517,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
struct tb_sw_lookup lookup;
struct device *dev;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3d9f64676e58..e0deee4f1eb0 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -101,7 +101,7 @@ struct tb_switch {
struct tb_dma_port *dma_port;
struct tb *tb;
u64 uid;
- uuid_be *uuid;
+ uuid_t *uuid;
u16 vendor;
u16 device;
const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 85b6d33c0919..de6441e4a060 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
struct icm_fr_event_device_connected {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
struct icm_fr_pkg_approve_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
struct icm_fr_pkg_add_device_key {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
struct icm_fr_pkg_add_device_key_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
struct icm_fr_pkg_challenge_device {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
struct icm_fr_pkg_challenge_device_response {
struct icm_pkg_header hdr;
- uuid_be ep_uuid;
+ uuid_t ep_uuid;
u8 connection_key;
u8 connection_id;
u16 reserved;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index d1399aac05a1..284749fb0f6b 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -448,48 +448,6 @@ err:
return retval;
}
-/**
- * pty_open_peer - open the peer of a pty
- * @tty: the peer of the pty being opened
- *
- * Open the cached dentry in tty->link, providing a safe way for userspace
- * to get the slave end of a pty (where they have the master fd and cannot
- * access or trust the mount namespace /dev/pts was mounted inside).
- */
-static struct file *pty_open_peer(struct tty_struct *tty, int flags)
-{
- if (tty->driver->subtype != PTY_TYPE_MASTER)
- return ERR_PTR(-EIO);
- return dentry_open(tty->link->driver_data, flags, current_cred());
-}
-
-static int pty_get_peer(struct tty_struct *tty, int flags)
-{
- int fd = -1;
- struct file *filp = NULL;
- int retval = -EINVAL;
-
- fd = get_unused_fd_flags(0);
- if (fd < 0) {
- retval = fd;
- goto err;
- }
-
- filp = pty_open_peer(tty, flags);
- if (IS_ERR(filp)) {
- retval = PTR_ERR(filp);
- goto err_put;
- }
-
- fd_install(fd, filp);
- return fd;
-
-err_put:
- put_unused_fd(fd);
-err:
- return retval;
-}
-
static void pty_cleanup(struct tty_struct *tty)
{
tty_port_put(tty->port);
@@ -646,9 +604,50 @@ static inline void legacy_pty_init(void) { }
/* Unix98 devices */
#ifdef CONFIG_UNIX98_PTYS
-
static struct cdev ptmx_cdev;
+/**
+ * pty_open_peer - open the peer of a pty
+ * @tty: the peer of the pty being opened
+ *
+ * Open the cached dentry in tty->link, providing a safe way for userspace
+ * to get the slave end of a pty (where they have the master fd and cannot
+ * access or trust the mount namespace /dev/pts was mounted inside).
+ */
+static struct file *pty_open_peer(struct tty_struct *tty, int flags)
+{
+ if (tty->driver->subtype != PTY_TYPE_MASTER)
+ return ERR_PTR(-EIO);
+ return dentry_open(tty->link->driver_data, flags, current_cred());
+}
+
+static int pty_get_peer(struct tty_struct *tty, int flags)
+{
+ int fd = -1;
+ struct file *filp = NULL;
+ int retval = -EINVAL;
+
+ fd = get_unused_fd_flags(0);
+ if (fd < 0) {
+ retval = fd;
+ goto err;
+ }
+
+ filp = pty_open_peer(tty, flags);
+ if (IS_ERR(filp)) {
+ retval = PTR_ERR(filp);
+ goto err_put;
+ }
+
+ fd_install(fd, filp);
+ return fd;
+
+err_put:
+ put_unused_fd(fd);
+err:
+ return retval;
+}
+
static int pty_unix98_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 343de8c384b0..898dcb091a27 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -619,6 +619,12 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
TIOCSER_TEMT : 0;
}
+static bool lpuart_is_32(struct lpuart_port *sport)
+{
+ return sport->port.iotype == UPIO_MEM32 ||
+ sport->port.iotype == UPIO_MEM32BE;
+}
+
static irqreturn_t lpuart_txint(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
@@ -627,7 +633,7 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
spin_lock_irqsave(&sport->port.lock, flags);
if (sport->port.x_char) {
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
else
writeb(sport->port.x_char, sport->port.membase + UARTDR);
@@ -635,14 +641,14 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_stop_tx(&sport->port);
else
lpuart_stop_tx(&sport->port);
goto out;
}
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_transmit_buffer(sport);
else
lpuart_transmit_buffer(sport);
@@ -1978,12 +1984,12 @@ static int __init lpuart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_console_get_options(sport, &baud, &parity, &bits);
else
lpuart_console_get_options(sport, &baud, &parity, &bits);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart32_setup_watermark(sport);
else
lpuart_setup_watermark(sport);
@@ -2118,7 +2124,7 @@ static int lpuart_probe(struct platform_device *pdev)
}
sport->port.irq = ret;
sport->port.iotype = sdata->iotype;
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
sport->port.ops = &lpuart32_pops;
else
sport->port.ops = &lpuart_pops;
@@ -2145,7 +2151,7 @@ static int lpuart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &sport->port);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+ if (lpuart_is_32(sport))
lpuart_reg.cons = LPUART32_CONSOLE;
else
lpuart_reg.cons = LPUART_CONSOLE;
@@ -2198,7 +2204,7 @@ static int lpuart_suspend(struct device *dev)
struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+ if (lpuart_is_32(sport)) {
/* disable Rx/Tx and interrupts */
temp = lpuart32_read(&sport->port, UARTCTRL);
temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
@@ -2249,7 +2255,7 @@ static int lpuart_resume(struct device *dev)
if (sport->port.suspended && !sport->port.irq_wake)
clk_prepare_enable(sport->clk);
- if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+ if (lpuart_is_32(sport)) {
lpuart32_setup_watermark(sport);
temp = lpuart32_read(&sport->port, UARTCTRL);
temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9e3162bf3bd1..80934e7bd67f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -186,11 +186,6 @@
#define UART_NR 8
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
-#define RX_BUF_SIZE (PAGE_SIZE)
-
-
/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
enum imx_uart_type {
IMX1_UART,
@@ -226,7 +221,6 @@ struct imx_port {
struct dma_chan *dma_chan_rx, *dma_chan_tx;
struct scatterlist rx_sgl, tx_sgl[2];
void *rx_buf;
- unsigned int rx_buf_size;
struct circ_buf rx_ring;
unsigned int rx_periods;
dma_cookie_t rx_cookie;
@@ -464,7 +458,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
}
}
- while (!uart_circ_empty(xmit) &&
+ while (!uart_circ_empty(xmit) && !sport->dma_is_txing &&
!(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
@@ -967,6 +961,8 @@ static void imx_timeout(unsigned long data)
}
}
+#define RX_BUF_SIZE (PAGE_SIZE)
+
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
@@ -1049,6 +1045,9 @@ static void dma_rx_callback(void *data)
}
}
+/* RX DMA buffer periods */
+#define RX_DMA_PERIODS 4
+
static int start_rx_dma(struct imx_port *sport)
{
struct scatterlist *sgl = &sport->rx_sgl;
@@ -1059,8 +1058,9 @@ static int start_rx_dma(struct imx_port *sport)
sport->rx_ring.head = 0;
sport->rx_ring.tail = 0;
+ sport->rx_periods = RX_DMA_PERIODS;
- sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
+ sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
if (ret == 0) {
dev_err(dev, "DMA mapping error for RX.\n");
@@ -1171,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
goto err;
}
- sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
+ sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
@@ -2036,7 +2036,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
{
struct device_node *np = pdev->dev.of_node;
int ret;
- u32 dma_buf_size[2];
sport->devdata = of_device_get_match_data(&pdev->dev);
if (!sport->devdata)
@@ -2060,14 +2059,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
- if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) {
- sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1];
- sport->rx_periods = dma_buf_size[1];
- } else {
- sport->rx_buf_size = RX_BUF_SIZE;
- sport->rx_periods = RX_DMA_PERIODS;
- }
-
return 0;
}
#else
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index da5ddfc14778..e08b16b070c0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int ret;
long r;
- if (kstrtol(buf, 0, &r) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &r);
+ if (ret)
+ return ret;
sci->rx_trigger = scif_set_rtrg(port, r);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int ret;
long r;
- if (kstrtol(buf, 0, &r) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &r);
+ if (ret)
+ return ret;
sci->rx_fifo_timeout = r;
scif_set_rtrg(port, 1);
if (r > 0)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f5335be344f6..6b0ca65027d0 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport,
if (IS_ERR(ascport->pinctrl)) {
ret = PTR_ERR(ascport->pinctrl);
dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret);
+ return ret;
}
ascport->states[DEFAULT] =
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5357d83bbda2..5e056064259c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bc3b3fda5000..c4066cd77e47 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3573,6 +3573,9 @@ irq_retry:
/* Report disconnection if it is not already done. */
dwc2_hsotg_disconnect(hsotg);
+ /* Reset device address to zero */
+ __bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
+
if (usb_status & GOTGCTL_BSESVLD && connected)
dwc2_hsotg_core_init_disconnected(hsotg, true);
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 326b302fc440..03474d3575ab 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -766,15 +766,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->maximum_speed = USB_SPEED_HIGH;
}
- ret = dwc3_core_soft_reset(dwc);
+ ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0;
- ret = dwc3_phy_setup(dwc);
+ ret = dwc3_core_soft_reset(dwc);
if (ret)
goto err0;
- ret = dwc3_core_get_phy(dwc);
+ ret = dwc3_phy_setup(dwc);
if (ret)
goto err0;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 98926504b55b..f5aaa0cf3873 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
- irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
- dwc3_omap_interrupt_thread, IRQF_SHARED,
- "dwc3-omap", omap);
- if (ret) {
- dev_err(dev, "failed to request IRQ #%d --> %d\n",
- omap->irq, ret);
- goto err1;
- }
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
@@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
+ ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+ dwc3_omap_interrupt_thread, IRQF_SHARED,
+ "dwc3-omap", omap);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+ goto err1;
+ }
dwc3_omap_enable_irqs(omap);
- enable_irq(omap->irq);
return 0;
err1:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9e41605a276b..6b299c7b7656 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,14 +191,16 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
req->started = false;
list_del(&req->list);
- req->trb = NULL;
req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+ if (req->trb)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+
+ req->trb = NULL;
trace_dwc3_gadget_giveback(req);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e80b9c123a9d..f95bddd6513f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2490,7 +2490,7 @@ static int fsg_main_thread(void *common_)
int i;
down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
struct fsg_lun *curlun = common->luns[i];
if (!curlun || !fsg_lun_is_open(curlun))
continue;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 8656f84e17d9..29efbedc91f9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -92,9 +92,9 @@ static struct uac_input_terminal_descriptor usb_out_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = USB_OUT_IT_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
#define IO_OUT_OT_ID 2
@@ -103,7 +103,7 @@ static struct uac1_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = IO_OUT_OT_ID,
- .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
.bSourceID = USB_OUT_IT_ID,
};
@@ -114,9 +114,9 @@ static struct uac_input_terminal_descriptor io_in_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = IO_IN_IT_ID,
- .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
- .wChannelConfig = 0x3,
+ .wChannelConfig = cpu_to_le16(0x3),
};
#define USB_IN_OT_ID 4
@@ -125,7 +125,7 @@ static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = USB_IN_OT_ID,
- .wTerminalType = UAC_TERMINAL_STREAMING,
+ .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
.bSourceID = IO_IN_IT_ID,
};
@@ -174,7 +174,7 @@ static struct uac1_as_header_descriptor as_out_header_desc = {
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = USB_OUT_IT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
static struct uac1_as_header_descriptor as_in_header_desc = {
@@ -183,7 +183,7 @@ static struct uac1_as_header_descriptor as_in_header_desc = {
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = USB_IN_OT_ID,
.bDelay = 1,
- .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+ .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
};
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
@@ -606,8 +606,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail;
- audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize;
- audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize;
+ audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
+ audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
audio->params.c_chmask = audio_opts->c_chmask;
audio->params.c_srate = audio_opts->c_srate;
audio->params.c_ssize = audio_opts->c_ssize;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 9082ce261e70..f05c3f3e6103 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -168,7 +168,7 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bAssocTerminal = 0,
.bCSourceID = USB_OUT_CLK_ID,
.iChannelNames = 0,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Input Terminal for I/O-In */
@@ -182,7 +182,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bAssocTerminal = 0,
.bCSourceID = USB_IN_CLK_ID,
.iChannelNames = 0,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for USB_IN */
@@ -196,7 +196,7 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bAssocTerminal = 0,
.bSourceID = IO_IN_IT_ID,
.bCSourceID = USB_IN_CLK_ID,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
/* Ouput Terminal for I/O-Out */
@@ -210,7 +210,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bAssocTerminal = 0,
.bSourceID = USB_OUT_IT_ID,
.bCSourceID = USB_OUT_CLK_ID,
- .bmControls = (CONTROL_RDWR << COPY_CTRL),
+ .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
static struct uac2_ac_header_descriptor ac_hdr_desc = {
@@ -220,9 +220,10 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
.bDescriptorSubtype = UAC_MS_HEADER,
.bcdADC = cpu_to_le16(0x200),
.bCategory = UAC2_FUNCTION_IO_BOX,
- .wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc
- + sizeof usb_out_it_desc + sizeof io_in_it_desc
- + sizeof usb_in_ot_desc + sizeof io_out_ot_desc,
+ .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
+ + sizeof out_clk_src_desc + sizeof usb_out_it_desc
+ + sizeof io_in_it_desc + sizeof usb_in_ot_desc
+ + sizeof io_out_ot_desc),
.bmControls = 0,
};
@@ -569,10 +570,12 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
}
- agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
- hs_epin_desc.wMaxPacketSize);
- agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
- hs_epout_desc.wMaxPacketSize);
+ agdev->in_ep_maxpsize = max_t(u16,
+ le16_to_cpu(fs_epin_desc.wMaxPacketSize),
+ le16_to_cpu(hs_epin_desc.wMaxPacketSize));
+ agdev->out_ep_maxpsize = max_t(u16,
+ le16_to_cpu(fs_epout_desc.wMaxPacketSize),
+ le16_to_cpu(hs_epout_desc.wMaxPacketSize));
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 9ffb11ec9ed9..7cd5c969fcbe 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
- depends on EXTCON
+ depends on EXTCON && HAS_DMA
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
@@ -257,6 +257,7 @@ config USB_MV_U3D
config USB_SNP_CORE
depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
+ depends on HAS_DMA
tristate
help
This enables core driver support for Synopsys USB 2.0 Device
@@ -271,7 +272,7 @@ config USB_SNP_CORE
config USB_SNP_UDC_PLAT
tristate "Synopsys USB 2.0 Device controller"
- depends on (USB_GADGET && OF)
+ depends on USB_GADGET && OF && HAS_DMA
select USB_GADGET_DUALSPEED
select USB_SNP_CORE
default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index d8278322d5ac..62dc9c7798e7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -89,6 +89,9 @@
/* USB_COM_CON */
#define USB_COM_CON_CONF BIT(24)
+#define USB_COM_CON_PN_WDATAIF_NL BIT(23)
+#define USB_COM_CON_PN_RDATAIF_NL BIT(22)
+#define USB_COM_CON_PN_LSTTR_PP BIT(21)
#define USB_COM_CON_SPD_MODE BIT(17)
#define USB_COM_CON_EP0_EN BIT(16)
#define USB_COM_CON_DEV_ADDR_SHIFT 8
@@ -686,6 +689,9 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
{
usb3_init_axi_bridge(usb3);
usb3_init_epc_registers(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
+ USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
+ USB3_USB_COM_CON);
usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
@@ -1369,7 +1375,7 @@ static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
usb3_for_each_dma(usb3, dma, i) {
if (dma->prd) {
- dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE,
+ dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
dma->prd, dma->prd_dma);
dma->prd = NULL;
}
@@ -1409,12 +1415,12 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
int ret = -EAGAIN;
u32 enable_bits = 0;
+ spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
- return;
+ goto out;
if (usb3_req != usb3_req_first)
- return;
+ goto out;
- spin_lock_irqsave(&usb3->lock, flags);
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
goto out;
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 2e11f19e07ae..f7b4d0f159e4 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -28,7 +28,7 @@
/* description */
#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver"
-void start_udc(struct udc *udc)
+static void start_udc(struct udc *udc)
{
if (udc->driver) {
dev_info(udc->dev, "Connecting...\n");
@@ -38,7 +38,7 @@ void start_udc(struct udc *udc)
}
}
-void stop_udc(struct udc *udc)
+static void stop_udc(struct udc *udc)
{
int tmp;
u32 reg;
@@ -76,7 +76,7 @@ void stop_udc(struct udc *udc)
dev_info(udc->dev, "Device disconnected\n");
}
-void udc_drd_work(struct work_struct *work)
+static void udc_drd_work(struct work_struct *work)
{
struct udc *udc;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c40480..c8989c62a262 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
#define USB_INTEL_USB3_PSSEN 0xD8
#define USB_INTEL_USB3PRM 0xDC
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG 0xF8
+#define ASMT_DATA_WRITE1_REG 0xFC
+#define ASMT_CONTROL_REG 0xE0
+#define ASMT_CONTROL_WRITE_BIT 0x02
+#define ASMT_WRITEREG_CMD 0x10423
+#define ASMT_FLOWCTL_ADDR 0xFA30
+#define ASMT_FLOWCTL_DATA 0xBA
+#define ASMT_PSEUDO_DATA 0
+
/*
* amd_chipset_gen values represent AMD different chipset generations
*/
@@ -412,6 +422,50 @@ void usb_amd_quirk_pll_disable(void)
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+ unsigned long retry_count;
+ unsigned char value;
+
+ for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+ pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+ if (value == 0xff) {
+ dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+ return -EIO;
+ }
+
+ if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+ return 0;
+
+ usleep_range(40, 60);
+ }
+
+ dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+ return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send command and address to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send data to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
void usb_amd_quirk_pll_enable(void)
{
usb_amd_quirk_pll(0);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 0222195bd5b0..655994480198 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,6 +11,7 @@ bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
@@ -18,6 +19,7 @@ void sb800_prefetch(struct device *dev, int on);
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
static inline void usb_amd_dev_put(void) {}
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1adae9eab831..00721e8807ab 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+ struct xhci_ep_ctx *ep_ctx;
struct xhci_command *command;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+
+ /* Check ep is running, required by AMD SNPS 3.1 xHC */
+ if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
+ continue;
+
command = xhci_alloc_command(xhci, false, false,
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cmd);
return -ENOMEM;
-
}
xhci_queue_stop_endpoint(xhci, command, slot_id, i,
suspend);
@@ -603,12 +610,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
/* Disable all Device Slots */
xhci_dbg(xhci, "Disable all slots\n");
+ spin_unlock_irqrestore(&xhci->lock, *flags);
for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
retval = xhci_disable_slot(xhci, NULL, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
}
+ spin_lock_irqsave(&xhci->lock, *flags);
/* Put all ports to the Disable state by clear PP */
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
@@ -897,6 +906,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->resuming_ports);
set_bit(wIndex, &bus_state->rexit_ports);
+
+ xhci_test_and_clear_bit(xhci, port_array, wIndex,
+ PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 53882e2babbb..5b0fa553c8bc 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,8 @@
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -217,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x1142)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+ xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c50c902d009e..cc368ad2b51e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,13 +864,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
- for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
+ ring = ep->stream_info->stream_rings[stream_id];
+ if (!ring)
+ continue;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
- slot_id, ep_index, stream_id + 1);
- xhci_kill_ring_urbs(xhci,
- ep->stream_info->stream_rings[stream_id]);
+ slot_id, ep_index, stream_id);
+ xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 56f85df013db..b2ff1ff1a02f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci)
if (ret)
return ret;
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wait for controller to be ready for doorbell rings");
/*
@@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd)
if (!command)
return -ENOMEM;
- xhci_queue_vendor_command(xhci, command, 0, 0, 0,
+ ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
+ if (ret)
+ xhci_free_command(xhci, command);
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
@@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c6da1f93c84..e3e935291ed6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1820,6 +1820,7 @@ struct xhci_hcd {
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
#define XHCI_U2_DISABLE_WAKE (1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 623c51300393..f0ce304c5aaf 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev)
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
usbhs_platform_call(priv, phy_reset, pdev);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a6138855..93fba9033b00 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@ struct usbhsg_gpriv;
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
+ spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
@@ -636,10 +637,16 @@ usbhsg_ep_enable_end:
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ int ret = 0;
- if (!pipe)
- return -EINVAL;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
+ ret = -EINVAL;
+ goto out;
+ }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -647,6 +654,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
+out:
+ spin_unlock_irqrestore(&uep->lock, flags);
+
return 0;
}
@@ -696,8 +706,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
@@ -706,6 +719,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+ spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
@@ -852,10 +866,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
- int ret = 0;
+ int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
@@ -887,7 +901,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_ep_disable(&dcp->ep);
+ /* disable all eps */
+ usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+ usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
@@ -1069,6 +1085,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ spin_lock_init(&uep->lock);
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005dd737..6a7720e66595 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
/* Make sure driver was initialized */
- if (us->extra == NULL)
+ if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
+ srb->result = DID_ERROR << 16;
+ return;
+ }
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6b0d2f0918c6..8a88f45822e3 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -3,6 +3,7 @@
#define __DRIVER_USB_TYPEC_UCSI_H
#include <linux/bitops.h>
+#include <linux/device.h>
#include <linux/types.h>
/* -------------------------------------------------------------------------- */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 22caf808bfab..f0b3a0b9d42f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page)
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
}
-static struct page *balloon_pfn_to_page(u32 pfn)
-{
- BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
- return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
-}
-
static void balloon_ack(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
@@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
{
unsigned int i;
- /* Set balloon pfns pointing at this page.
- * Note that the first pfn points at start of the page. */
+ /*
+ * Set balloon pfns pointing at this page.
+ * Note that the first pfn points at start of the page.
+ */
for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
pfns[i] = cpu_to_virtio32(vb->vdev,
page_to_balloon_pfn(page) + i);
@@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
return num_allocated_pages;
}
-static void release_pages_balloon(struct virtio_balloon *vb)
+static void release_pages_balloon(struct virtio_balloon *vb,
+ struct list_head *pages)
{
- unsigned int i;
- struct page *page;
+ struct page *page, *next;
- /* Find pfns pointing at start of each page, get pages and free them. */
- for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
- vb->pfns[i]));
+ list_for_each_entry_safe(page, next, pages, lru) {
if (!virtio_has_feature(vb->vdev,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
adjust_managed_page_count(page, 1);
+ list_del(&page->lru);
put_page(page); /* balloon reference */
}
}
@@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
unsigned num_freed_pages;
struct page *page;
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
+ LIST_HEAD(pages);
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
if (!page)
break;
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ list_add(&page->lru, &pages);
vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
@@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
*/
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
- release_pages_balloon(vb);
+ release_pages_balloon(vb, &pages);
mutex_unlock(&vb->balloon_lock);
return num_freed_pages;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3612542b6044..83fc9aab34e8 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -704,7 +704,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- ret = -ENXIO;
+ dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ ret = irq;
goto err_irq;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 95ea7e6b1d99..74471e7aa5cc 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -728,6 +728,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
atomic_set(&sl->refcnt, 1);
atomic_inc(&sl->master->refcnt);
+ dev->slave_count++;
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
@@ -747,11 +748,11 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
sl->family = f;
-
err = __w1_attach_slave_device(sl);
if (err < 0) {
dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
sl->name);
+ dev->slave_count--;
w1_family_put(sl->family);
atomic_dec(&sl->master->refcnt);
kfree(sl);
@@ -759,7 +760,6 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
}
sl->ttl = dev->slave_ttl;
- dev->slave_count++;
memcpy(msg.id.id, rn, sizeof(msg.id));
msg.type = W1_SLAVE_ADD;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 50dcb68d8070..ab609255a0f3 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -780,6 +780,9 @@ static int __init balloon_init(void)
}
#endif
+ /* Init the xen-balloon driver. */
+ xen_balloon_init();
+
return 0;
}
subsys_initcall(balloon_init);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b241bfa529ce..bae1f5d36c26 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
info->cpu = cpu;
}
-static void xen_evtchn_mask_all(void)
-{
- unsigned int evtchn;
-
- for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
- mask_evtchn(evtchn);
-}
-
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
* @irq: irq of event channel to send event to
@@ -1573,7 +1565,6 @@ void xen_irq_resume(void)
struct irq_info *info;
/* New event-channel space is not 'live' yet. */
- xen_evtchn_mask_all();
xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
@@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
+ unsigned int evtchn;
if (fifo_events)
ret = xen_evtchn_fifo_init();
@@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void)
BUG_ON(!evtchn_to_irq);
/* No event channels are 'live' right now. */
- xen_evtchn_mask_all();
+ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
+ mask_evtchn(evtchn);
pirq_needs_eoi = pirq_needs_eoi_flag;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d6786b87e13b..2c6a9114d332 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -1072,8 +1073,14 @@ static int gnttab_expand(unsigned int req_entries)
cur = nr_grant_frames;
extra = ((req_entries + (grefs_per_grant_frame-1)) /
grefs_per_grant_frame);
- if (cur + extra > gnttab_max_grant_frames())
+ if (cur + extra > gnttab_max_grant_frames()) {
+ pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
+ " cur=%u extra=%u limit=%u"
+ " gnttab_free_count=%u req_entries=%u\n",
+ cur, extra, gnttab_max_grant_frames(),
+ gnttab_free_count, req_entries);
return -ENOSPC;
+ }
rc = gnttab_map(cur, cur + extra - 1);
if (rc == 0)
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e7715cb62eef..e89136ab851e 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -59,6 +59,8 @@ static void watch_target(struct xenbus_watch *watch,
{
unsigned long long new_target;
int err;
+ static bool watch_fired;
+ static long target_diff;
err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
if (err != 1) {
@@ -69,7 +71,14 @@ static void watch_target(struct xenbus_watch *watch,
/* The given memory/target value is in KiB, so it needs converting to
* pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
*/
- balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+ new_target >>= PAGE_SHIFT - 10;
+ if (watch_fired) {
+ balloon_set_new_target(new_target - target_diff);
+ return;
+ }
+
+ watch_fired = true;
+ target_diff = new_target - balloon_stats.target_pages;
}
static struct xenbus_watch target_watch = {
.node = "memory/target",
@@ -94,22 +103,15 @@ static struct notifier_block xenstore_notifier = {
.notifier_call = balloon_init_watcher,
};
-static int __init balloon_init(void)
+void xen_balloon_init(void)
{
- if (!xen_domain())
- return -ENODEV;
-
- pr_info("Initialising balloon driver\n");
-
register_balloon(&balloon_dev);
register_xen_selfballooning(&balloon_dev);
register_xenstore_notifier(&xenstore_notifier);
-
- return 0;
}
-subsys_initcall(balloon_init);
+EXPORT_SYMBOL_GPL(xen_balloon_init);
#define BALLOON_SHOW(name, format, args...) \
static ssize_t show_##name(struct device *dev, \
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 66620713242a..a67e955cacd1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter;
static void frontswap_selfshrink(void)
{
static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
+ unsigned long last_frontswap_pages;
+ unsigned long tgt_frontswap_pages;
last_frontswap_pages = cur_frontswap_pages;
cur_frontswap_pages = frontswap_curr_pages();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 967f069385d0..71ddfb4cf61c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -87,7 +87,6 @@ static int __init xenfs_init(void)
if (xen_domain())
return register_filesystem(&xenfs_type);
- pr_info("not registering filesystem on non-xen platform\n");
return 0;
}