diff options
319 files changed, 11583 insertions, 3656 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 7df3134ebc0e..4890d94ec062 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -1610,7 +1610,7 @@ int max_width, max_height;</synopsis> The connector is then registered with a call to <function>drm_connector_init</function> with a pointer to the connector functions and a connector type, and exposed through sysfs with a call to - <function>drm_sysfs_connector_add</function>. + <function>drm_connector_register</function>. </para> <para> Supported connector types are @@ -1768,7 +1768,7 @@ int max_width, max_height;</synopsis> (<function>drm_encoder_cleanup</function>) and connectors (<function>drm_connector_cleanup</function>). Furthermore, connectors that have been added to sysfs must be removed by a call to - <function>drm_sysfs_connector_remove</function> before calling + <function>drm_connector_unregister</function> before calling <function>drm_connector_cleanup</function>. </para> <para> @@ -1813,7 +1813,7 @@ void intel_crt_init(struct drm_device *dev) drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); }]]></programlisting> <para> In the example above (taken from the i915 driver), a CRTC, connector and @@ -2338,6 +2338,12 @@ void intel_crt_init(struct drm_device *dev) !Edrivers/gpu/drm/drm_dp_helper.c </sect2> <sect2> + <title>Display Port MST Helper Functions Reference</title> +!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper +!Iinclude/drm/drm_dp_mst_helper.h +!Edrivers/gpu/drm/drm_dp_mst_topology.c + </sect2> + <sect2> <title>EDID Helper Functions Reference</title> !Edrivers/gpu/drm/drm_edid.c </sect2> diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl index 00e425faa2fd..78c9a7b2b58f 100644 --- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl +++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl @@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP => 21; use constant HIGH_NR_SCANNED => 22; use constant HIGH_NR_TAKEN => 23; use constant HIGH_NR_RECLAIMED => 24; -use constant HIGH_NR_CONTIG_DIRTY => 25; my %perprocesspid; my %perprocess; @@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)'; my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)'; my $regex_kswapd_sleep_default = 'nid=([0-9]*)'; my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)'; -my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)'; +my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) file=([0-9]*)'; my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)'; my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)'; my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)'; @@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex( $regex_lru_isolate_default, "isolate_mode", "order", "nr_requested", "nr_scanned", "nr_taken", - "contig_taken", "contig_dirty", "contig_failed"); + "file"); $regex_lru_shrink_inactive = generate_traceevent_regex( "vmscan/mm_vmscan_lru_shrink_inactive", $regex_lru_shrink_inactive_default, @@ -375,7 +374,6 @@ EVENT_PROCESS: } my $isolate_mode = $1; my $nr_scanned = $4; - my $nr_contig_dirty = $7; # To closer match vmstat scanning statistics, only count isolate_both # and isolate_inactive as scanning. isolate_active is rotation @@ -385,7 +383,6 @@ EVENT_PROCESS: if ($isolate_mode != 2) { $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; } - $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { $details = $6; if ($details !~ /$regex_lru_shrink_inactive/o) { @@ -539,13 +536,6 @@ sub dump_stats { } } } - if ($stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}) { - print " "; - my $count = $stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}; - if ($count != 0) { - print "contig-dirty=$count "; - } - } print "\n"; } diff --git a/MAINTAINERS b/MAINTAINERS index 702ca10a5a6c..6813d0aa5ecf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -943,16 +943,10 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git F: arch/arm/mach-imx/ +F: arch/arm/mach-mxs/ F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig -ARM/FREESCALE MXS ARM ARCHITECTURE -M: Shawn Guo <shawn.guo@linaro.org> -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -T: git git://git.linaro.org/people/shawnguo/linux-2.6.git -F: arch/arm/mach-mxs/ - ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1052,9 +1046,33 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ -F: drivers/clk/keystone/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git +ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK +M: Santosh Shilimkar <santosh.shilimkar@ti.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/clk/keystone/ + +ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE +M: Santosh Shilimkar <santosh.shilimkar@ti.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/clocksource/timer-keystone.c + +ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER +M: Santosh Shilimkar <santosh.shilimkar@ti.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/power/reset/keystone-reset.c + +ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS +M: Santosh Shilimkar <santosh.shilimkar@ti.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/memory/*emif* + ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -5512,10 +5530,11 @@ S: Maintained F: arch/arm/mach-lpc32xx/ LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) -M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> -M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> -M: support@lsi.com -L: DL-MPTFusionLinux@lsi.com +M: Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com> +M: Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com> +M: Sreekanth Reddy <sreekanth.reddy@avagotech.com> +M: Abhijit Mahajan <abhijit.mahajan@avagotech.com> +L: MPT-FusionLinux.pdl@avagotech.com L: linux-scsi@vger.kernel.org W: http://www.lsilogic.com/support S: Supported @@ -9406,12 +9425,6 @@ S: Maintained F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h -USB KAWASAKI LSI DRIVER -M: Oliver Neukum <oliver@neukum.org> -L: linux-usb@vger.kernel.org -S: Maintained -F: drivers/usb/serial/kl5kusb105.* - USB MASS STORAGE DRIVER M: Matthew Dharm <mdharm-usb@one-eyed-alien.net> L: linux-usb@vger.kernel.org @@ -9439,12 +9452,6 @@ S: Maintained F: Documentation/usb/ohci.txt F: drivers/usb/host/ohci* -USB OPTION-CARD DRIVER -M: Matthias Urlichs <smurf@smurf.noris.de> -L: linux-usb@vger.kernel.org -S: Maintained -F: drivers/usb/serial/option.c - USB PEGASUS DRIVER M: Petko Manolov <petkan@nucleusys.com> L: linux-usb@vger.kernel.org @@ -9477,7 +9484,7 @@ S: Maintained F: drivers/net/usb/rtl8150.c USB SERIAL SUBSYSTEM -M: Johan Hovold <jhovold@gmail.com> +M: Johan Hovold <johan@kernel.org> L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/usb-serial.txt @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Shuffling Zombie Juror # *DOCUMENTATION* @@ -126,7 +126,10 @@ PHONY += $(MAKECMDGOALS) sub-make $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make @: +# Fake the "Entering directory" message once, so that IDEs/editors are +# able to understand relative filenames. sub-make: FORCE + @echo "make[1]: Entering directory \`$(KBUILD_OUTPUT)'" $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ KBUILD_SRC=$(CURDIR) \ KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \ diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 19f1f7e87597..90098f98a5c8 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -319,6 +319,10 @@ phy-mode = "rmii"; }; +&phy_sel { + rmii-clock-ext; +}; + &i2c0 { status = "okay"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index c29945e07c5a..80127638b379 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -773,7 +773,6 @@ clocks = <&qspi_gfclk_div>; clock-names = "fck"; num-cs = <4>; - interrupts = <0 343 0x4>; status = "disabled"; }; @@ -984,6 +983,17 @@ #size-cells = <1>; status = "disabled"; }; + + atl: atl@4843c000 { + compatible = "ti,dra7-atl"; + reg = <0x4843c000 0x3ff>; + ti,hwmods = "atl"; + ti,provided-clocks = <&atl_clkin0_ck>, <&atl_clkin1_ck>, + <&atl_clkin2_ck>, <&atl_clkin3_ck>; + clocks = <&atl_gfclk_mux>; + clock-names = "fck"; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index b03cfe49d22b..c90c76de84d6 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -10,26 +10,26 @@ &cm_core_aon_clocks { atl_clkin0_ck: atl_clkin0_ck { #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; + compatible = "ti,dra7-atl-clock"; + clocks = <&atl_gfclk_mux>; }; atl_clkin1_ck: atl_clkin1_ck { #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; + compatible = "ti,dra7-atl-clock"; + clocks = <&atl_gfclk_mux>; }; atl_clkin2_ck: atl_clkin2_ck { #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; + compatible = "ti,dra7-atl-clock"; + clocks = <&atl_gfclk_mux>; }; atl_clkin3_ck: atl_clkin3_ck { #clock-cells = <0>; - compatible = "fixed-clock"; - clock-frequency = <0>; + compatible = "ti,dra7-atl-clock"; + clocks = <&atl_gfclk_mux>; }; hdmi_clkin_ck: hdmi_clkin_ck { diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts index cf0be662297e..1becefce821b 100644 --- a/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -251,6 +251,11 @@ codec { }; }; + + twl_power: power { + compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off"; + ti,use_poweroff; + }; }; }; @@ -301,6 +306,7 @@ }; &uart3 { + interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>; pinctrl-names = "default"; pinctrl-0 = <&uart3_pins>; }; diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi index 8ae8f007c8ad..c8747c7f1cc8 100644 --- a/arch/arm/boot/dts/omap3-evm-common.dtsi +++ b/arch/arm/boot/dts/omap3-evm-common.dtsi @@ -50,6 +50,13 @@ gpios = <&twl_gpio 18 GPIO_ACTIVE_LOW>; }; +&twl { + twl_power: power { + compatible = "ti,twl4030-power-omap3-evm", "ti,twl4030-power-idle"; + ti,use_poweroff; + }; +}; + &i2c2 { clock-frequency = <400000>; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index ae8ae3f4f9bf..1fe45d1f75ec 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -351,6 +351,11 @@ compatible = "ti,twl4030-audio"; ti,enable-vibra = <1>; }; + + twl_power: power { + compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; + ti,use_poweroff; + }; }; &twl_keypad { diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 3bfda16c8b52..a4ed54988866 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -45,7 +45,6 @@ operating-points = < /* kHz uV */ - 500000 880000 1000000 1060000 1500000 1250000 >; diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig index 9d13dae99125..4bf72264b175 100644 --- a/arch/arm/configs/bcm_defconfig +++ b/arch/arm/configs/bcm_defconfig @@ -94,10 +94,10 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y -CONFIG_MMC_UNSAFE_RESUME=y CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_TEST=y CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_BCM_KONA=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index be1a3455a9fe..534836497998 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -223,12 +223,12 @@ CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_SUN6I=y CONFIG_SENSORS_LM90=y CONFIG_THERMAL=y -CONFIG_DOVE_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_WATCHDOG=y CONFIG_ORION_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MFD_AS3722=y +CONFIG_MFD_BCM590XX=y CONFIG_MFD_CROS_EC=y CONFIG_MFD_CROS_EC_SPI=y CONFIG_MFD_MAX8907=y @@ -240,6 +240,7 @@ CONFIG_MFD_TPS65910=y CONFIG_REGULATOR_VIRTUAL_CONSUMER=y CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_AS3722=y +CONFIG_REGULATOR_BCM590XX=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_MAX8907=y CONFIG_REGULATOR_PALMAS=y diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile index 2ecb828e4a8b..1636cdbef01a 100644 --- a/arch/arm/mach-mvebu/Makefile +++ b/arch/arm/mach-mvebu/Makefile @@ -7,7 +7,7 @@ CFLAGS_pmsu.o := -march=armv7-a obj-y += system-controller.o mvebu-soc-id.o ifeq ($(CONFIG_MACH_MVEBU_V7),y) -obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o +obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o endif diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index 8bb742fdf5ca..b2524d689f21 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -23,6 +23,7 @@ #include <linux/mbus.h> #include <linux/signal.h> #include <linux/slab.h> +#include <linux/irqchip.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -71,17 +72,23 @@ static int armada_375_external_abort_wa(unsigned long addr, unsigned int fsr, return 1; } -static void __init mvebu_timer_and_clk_init(void) +static void __init mvebu_init_irq(void) { - of_clk_init(NULL); - clocksource_of_init(); + irqchip_init(); mvebu_scu_enable(); coherency_init(); BUG_ON(mvebu_mbus_dt_init(coherency_available())); +} + +static void __init external_abort_quirk(void) +{ + u32 dev, rev; - if (of_machine_is_compatible("marvell,armada375")) - hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0, - "imprecise external abort"); + if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > ARMADA_375_Z1_REV) + return; + + hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0, + "imprecise external abort"); } static void __init i2c_quirk(void) @@ -169,8 +176,10 @@ static void __init mvebu_dt_init(void) { if (of_machine_is_compatible("plathome,openblocks-ax3-4")) i2c_quirk(); - if (of_machine_is_compatible("marvell,a375-db")) + if (of_machine_is_compatible("marvell,a375-db")) { + external_abort_quirk(); thermal_quirk(); + } of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } @@ -185,7 +194,7 @@ DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") .l2c_aux_mask = ~0, .smp = smp_ops(armada_xp_smp_ops), .init_machine = mvebu_dt_init, - .init_time = mvebu_timer_and_clk_init, + .init_irq = mvebu_init_irq, .restart = mvebu_restart, .dt_compat = armada_370_xp_dt_compat, MACHINE_END @@ -198,7 +207,7 @@ static const char * const armada_375_dt_compat[] = { DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, - .init_time = mvebu_timer_and_clk_init, + .init_irq = mvebu_init_irq, .init_machine = mvebu_dt_init, .restart = mvebu_restart, .dt_compat = armada_375_dt_compat, @@ -213,7 +222,7 @@ static const char * const armada_38x_dt_compat[] = { DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, - .init_time = mvebu_timer_and_clk_init, + .init_irq = mvebu_init_irq, .restart = mvebu_restart, .dt_compat = armada_38x_dt_compat, MACHINE_END diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index 53a55c8520bf..a1d407c0febe 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -66,6 +66,8 @@ static void __iomem *pmsu_mp_base; extern void ll_disable_coherency(void); extern void ll_enable_coherency(void); +extern void armada_370_xp_cpu_resume(void); + static struct platform_device armada_xp_cpuidle_device = { .name = "cpuidle-armada-370-xp", }; @@ -140,13 +142,6 @@ static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void) writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); } -static void armada_370_xp_cpu_resume(void) -{ - asm volatile("bl ll_add_cpu_to_smp_group\n\t" - "bl ll_enable_coherency\n\t" - "b cpu_resume\n\t"); -} - /* No locking is needed because we only access per-CPU registers */ void armada_370_xp_pmsu_idle_prepare(bool deepidle) { diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S new file mode 100644 index 000000000000..fc3de68d8c54 --- /dev/null +++ b/arch/arm/mach-mvebu/pmsu_ll.S @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2014 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * Gregory Clement <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * This is the entry point through which CPUs exiting cpuidle deep + * idle state are going. + */ +ENTRY(armada_370_xp_cpu_resume) +ARM_BE8(setend be ) @ go BE8 if entered LE + bl ll_add_cpu_to_smp_group + bl ll_enable_coherency + b cpu_resume +ENDPROC(armada_370_xp_cpu_resume) + diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 8421f38cf445..8ca99e9321e3 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -110,14 +110,16 @@ obj-y += prm_common.o cm_common.o obj-$(CONFIG_ARCH_OMAP2) += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o obj-$(CONFIG_ARCH_OMAP3) += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o -obj-$(CONFIG_SOC_AM33XX) += prm33xx.o cm33xx.o omap-prcm-4-5-common = cminst44xx.o cm44xx.o prm44xx.o \ prcm_mpu44xx.o prminst44xx.o \ vc44xx_data.o vp44xx_data.o obj-$(CONFIG_ARCH_OMAP4) += $(omap-prcm-4-5-common) obj-$(CONFIG_SOC_OMAP5) += $(omap-prcm-4-5-common) obj-$(CONFIG_SOC_DRA7XX) += $(omap-prcm-4-5-common) -obj-$(CONFIG_SOC_AM43XX) += $(omap-prcm-4-5-common) +am33xx-43xx-prcm-common += prm33xx.o cm33xx.o +obj-$(CONFIG_SOC_AM33XX) += $(am33xx-43xx-prcm-common) +obj-$(CONFIG_SOC_AM43XX) += $(omap-prcm-4-5-common) \ + $(am33xx-43xx-prcm-common) # OMAP voltage domains voltagedomain-common := voltage.o vc.o vp.o diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h index 15a778ce7707..bd2441790779 100644 --- a/arch/arm/mach-omap2/cm33xx.h +++ b/arch/arm/mach-omap2/cm33xx.h @@ -380,7 +380,7 @@ void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs); void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs); void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs); -#ifdef CONFIG_SOC_AM33XX +#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs, u16 clkctrl_offs); extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs, diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index a373d508799a..b2d252bf4a53 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -248,7 +248,6 @@ static inline void __iomem *omap4_get_scu_base(void) } #endif -extern void __init gic_init_irq(void); extern void gic_dist_disable(void); extern void gic_dist_enable(void); extern bool gic_dist_disabled(void); diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 43969da5d50b..d42022f2a71e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -649,6 +649,18 @@ void __init dra7xxx_check_revision(void) } break; + case 0xb9bc: + switch (rev) { + case 0: + omap_revision = DRA722_REV_ES1_0; + break; + default: + /* If we have no new revisions */ + omap_revision = DRA722_REV_ES1_0; + break; + } + break; + default: /* Unknown default to latest silicon rev as default*/ pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n", diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index fd88edeb027f..f62f7537d899 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, m0_entry = mux->muxnames[0]; /* First check for full name in mode0.muxmode format */ - if (mode0_len && strncmp(muxname, m0_entry, mode0_len)) - continue; + if (mode0_len) + if (strncmp(muxname, m0_entry, mode0_len) || + (strlen(m0_entry) != mode0_len)) + continue; /* Then check for muxmode only */ for (i = 0; i < OMAP_MUX_NR_MODES; i++) { diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 326cd982a3cb..539e8106eb96 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -102,26 +102,6 @@ void __init omap_barriers_init(void) {} #endif -void __init gic_init_irq(void) -{ - void __iomem *omap_irq_base; - - /* Static mapping, never released */ - gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K); - BUG_ON(!gic_dist_base_addr); - - twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_4K); - BUG_ON(!twd_base); - - /* Static mapping, never released */ - omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512); - BUG_ON(!omap_irq_base); - - omap_wakeupgen_init(); - - gic_init(0, 29, gic_dist_base_addr, omap_irq_base); -} - void gic_dist_disable(void) { if (gic_dist_base_addr) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index f7bb435bb543..6c074f37cdd2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -4251,9 +4251,9 @@ void __init omap_hwmod_init(void) soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; - soc_ops.assert_hardreset = _omap4_assert_hardreset; - soc_ops.deassert_hardreset = _omap4_deassert_hardreset; - soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; + soc_ops.assert_hardreset = _am33xx_assert_hardreset; + soc_ops.deassert_hardreset = _am33xx_deassert_hardreset; + soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; } else if (soc_is_am33xx()) { soc_ops.enable_module = _am33xx_enable_module; diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 290213f2cbe3..1103aa0e0d29 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -2020,6 +2020,77 @@ static struct omap_hwmod omap54xx_wd_timer2_hwmod = { }, }; +/* + * 'ocp2scp' class + * bridge to transform ocp interface protocol to scp (serial control port) + * protocol + */ +/* ocp2scp3 */ +static struct omap_hwmod omap54xx_ocp2scp3_hwmod; +/* l4_cfg -> ocp2scp3 */ +static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp3 = { + .master = &omap54xx_l4_cfg_hwmod, + .slave = &omap54xx_ocp2scp3_hwmod, + .clk = "l4_root_clk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod omap54xx_ocp2scp3_hwmod = { + .name = "ocp2scp3", + .class = &omap54xx_ocp2scp_hwmod_class, + .clkdm_name = "l3init_clkdm", + .prcm = { + .omap4 = { + .clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET, + .context_offs = OMAP54XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET, + .modulemode = MODULEMODE_HWCTRL, + }, + }, +}; + +/* + * 'sata' class + * sata: serial ata interface gen2 compliant ( 1 rx/ 1 tx) + */ + +static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = { + .sysc_offs = 0x0000, + .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | + MSTANDBY_SMART | MSTANDBY_SMART_WKUP), + .sysc_fields = &omap_hwmod_sysc_type2, +}; + +static struct omap_hwmod_class omap54xx_sata_hwmod_class = { + .name = "sata", + .sysc = &omap54xx_sata_sysc, +}; + +/* sata */ +static struct omap_hwmod omap54xx_sata_hwmod = { + .name = "sata", + .class = &omap54xx_sata_hwmod_class, + .clkdm_name = "l3init_clkdm", + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, + .main_clk = "func_48m_fclk", + .mpu_rt_idx = 1, + .prcm = { + .omap4 = { + .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET, + .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET, + .modulemode = MODULEMODE_SWCTRL, + }, + }, +}; + +/* l4_cfg -> sata */ +static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = { + .master = &omap54xx_l4_cfg_hwmod, + .slave = &omap54xx_sata_hwmod, + .clk = "l3_iclk_div", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; /* * Interfaces @@ -2765,6 +2836,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = { &omap54xx_l4_cfg__usb_tll_hs, &omap54xx_l4_cfg__usb_otg_ss, &omap54xx_l4_wkup__wd_timer2, + &omap54xx_l4_cfg__ocp2scp3, + &omap54xx_l4_cfg__sata, NULL, }; diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index de2a34c423a7..01ca8086fb6c 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -462,6 +462,7 @@ IS_OMAP_TYPE(3430, 0x3430) #define DRA7XX_CLASS 0x07000000 #define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) #define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) +#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) void omap2xxx_check_revision(void); void omap3xxx_check_revision(void); diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 3f9587bb51f6..b6085084e0ff 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -12,8 +12,81 @@ #include <linux/clk-provider.h> #include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> +#include <asm/mach/map.h> +#include <asm/system_misc.h> + +#define SUN4I_WATCHDOG_CTRL_REG 0x00 +#define SUN4I_WATCHDOG_CTRL_RESTART BIT(0) +#define SUN4I_WATCHDOG_MODE_REG 0x04 +#define SUN4I_WATCHDOG_MODE_ENABLE BIT(0) +#define SUN4I_WATCHDOG_MODE_RESET_ENABLE BIT(1) + +#define SUN6I_WATCHDOG1_IRQ_REG 0x00 +#define SUN6I_WATCHDOG1_CTRL_REG 0x10 +#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0) +#define SUN6I_WATCHDOG1_CONFIG_REG 0x14 +#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0) +#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1) +#define SUN6I_WATCHDOG1_MODE_REG 0x18 +#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0) + +static void __iomem *wdt_base; + +static void sun4i_restart(enum reboot_mode mode, const char *cmd) +{ + if (!wdt_base) + return; + + /* Enable timer and set reset bit in the watchdog */ + writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE, + wdt_base + SUN4I_WATCHDOG_MODE_REG); + + /* + * Restart the watchdog. The default (and lowest) interval + * value for the watchdog is 0.5s. + */ + writel(SUN4I_WATCHDOG_CTRL_RESTART, wdt_base + SUN4I_WATCHDOG_CTRL_REG); + + while (1) { + mdelay(5); + writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE, + wdt_base + SUN4I_WATCHDOG_MODE_REG); + } +} + +static struct of_device_id sunxi_restart_ids[] = { + { .compatible = "allwinner,sun4i-a10-wdt" }, + { /*sentinel*/ } +}; + +static void sunxi_setup_restart(void) +{ + struct device_node *np; + + np = of_find_matching_node(NULL, sunxi_restart_ids); + if (WARN(!np, "unable to setup watchdog restart")) + return; + + wdt_base = of_iomap(np, 0); + WARN(!wdt_base, "failed to map watchdog base address"); +} + +static void __init sunxi_dt_init(void) +{ + sunxi_setup_restart(); + + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); +} static const char * const sunxi_board_dt_compat[] = { "allwinner,sun4i-a10", @@ -23,7 +96,9 @@ static const char * const sunxi_board_dt_compat[] = { }; DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)") + .init_machine = sunxi_dt_init, .dt_compat = sunxi_board_dt_compat, + .restart = sun4i_restart, MACHINE_END static const char * const sun6i_board_dt_compat[] = { @@ -51,5 +126,7 @@ static const char * const sun7i_board_dt_compat[] = { }; DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family") + .init_machine = sunxi_dt_init, .dt_compat = sun7i_board_dt_compat, + .restart = sun4i_restart, MACHINE_END diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 579702086488..e0ccceb317d9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -292,7 +292,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) -#ifdef ARM64_64K_PAGES +#ifdef CONFIG_ARM64_64K_PAGES #define pud_sect(pud) (0) #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index a429b5940be2..501000fadb6f 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,6 +21,10 @@ #include <uapi/asm/ptrace.h> +/* Current Exception Level values, as contained in CurrentEL */ +#define CurrentEL_EL1 (1 << 2) +#define CurrentEL_EL2 (2 << 2) + /* AArch32-specific ptrace requests */ #define COMPAT_PTRACE_GETREGS 12 #define COMPAT_PTRACE_SETREGS 13 diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 66716c9b9e5f..619b1dd7bcde 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -78,8 +78,7 @@ ENTRY(efi_stub_entry) /* Turn off Dcache and MMU */ mrs x0, CurrentEL - cmp x0, #PSR_MODE_EL2t - ccmp x0, #PSR_MODE_EL2h, #0x4, ne + cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a96d3a6a63f6..a2c1195abb7f 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -270,8 +270,7 @@ ENDPROC(stext) */ ENTRY(el2_setup) mrs x0, CurrentEL - cmp x0, #PSR_MODE_EL2t - ccmp x0, #PSR_MODE_EL2h, #0x4, ne + cmp x0, #CurrentEL_EL2 b.ne 1f mrs x0, sctlr_el2 CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index e4193e3adc7f..0d64089d28b5 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) return; if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { - __flush_dcache_area(page_address(page), PAGE_SIZE); + __flush_dcache_area(page_address(page), + PAGE_SIZE << compound_order(page)); __flush_icache_all(); } else if (icache_is_aivivt()) { __flush_icache_all(); diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index cd5e4f568439..f3c56a182fd8 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -384,6 +384,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); + kfree(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index 6a9a9eb645f5..736637363d31 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -36,6 +36,7 @@ header-y += signal.h header-y += socket.h header-y += sockios.h header-y += sclp_ctl.h +header-y += sie.h header-y += stat.h header-y += statfs.h header-y += swab.h diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index 3d97f610198d..5d9cc19462c4 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -1,8 +1,6 @@ #ifndef _UAPI_ASM_S390_SIE_H #define _UAPI_ASM_S390_SIE_H -#include <asm/sigp.h> - #define diagnose_codes \ { 0x10, "DIAG (0x10) release pages" }, \ { 0x44, "DIAG (0x44) time slice end" }, \ @@ -13,18 +11,18 @@ { 0x500, "DIAG (0x500) KVM virtio functions" }, \ { 0x501, "DIAG (0x501) KVM breakpoint" } -#define sigp_order_codes \ - { SIGP_SENSE, "SIGP sense" }, \ - { SIGP_EXTERNAL_CALL, "SIGP external call" }, \ - { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" }, \ - { SIGP_STOP, "SIGP stop" }, \ - { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" }, \ - { SIGP_SET_ARCHITECTURE, "SIGP set architecture" }, \ - { SIGP_SET_PREFIX, "SIGP set prefix" }, \ - { SIGP_SENSE_RUNNING, "SIGP sense running" }, \ - { SIGP_RESTART, "SIGP restart" }, \ - { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" }, \ - { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" } +#define sigp_order_codes \ + { 0x01, "SIGP sense" }, \ + { 0x02, "SIGP external call" }, \ + { 0x03, "SIGP emergency signal" }, \ + { 0x05, "SIGP stop" }, \ + { 0x06, "SIGP restart" }, \ + { 0x09, "SIGP stop and store status" }, \ + { 0x0b, "SIGP initial cpu reset" }, \ + { 0x0d, "SIGP set prefix" }, \ + { 0x0e, "SIGP store status at address" }, \ + { 0x12, "SIGP set architecture" }, \ + { 0x15, "SIGP sense running" } #define icpt_prog_codes \ { 0x0001, "Prog Operation" }, \ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49314155b66c..49205d01b9ad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 -#define KVM_NR_VAR_MTRR 8 +#define KVM_NR_VAR_MTRR 10 #define ASYNC_PF_PER_VCPU 64 @@ -461,7 +461,7 @@ struct kvm_vcpu_arch { bool nmi_injected; /* Trying to inject an NMI this entry */ struct mtrr_state_type mtrr_state; - u32 pat; + u64 pat; unsigned switch_db_regs; unsigned long db[KVM_NR_DB_REGS]; diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 14fd6fd75a19..6205f0c434db 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, #define ARCH_HAS_USER_SINGLE_STEP_INFO +/* + * When hitting ptrace_stop(), we cannot return using SYSRET because + * that does not restore the full CPU state, only a minimal set. The + * ptracer can change arbitrary register values, which is usually okay + * because the usual ptrace stops run off the signal delivery path which + * forces IRET; however, ptrace_event() stops happen in arbitrary places + * in the kernel and don't force IRET path. + * + * So force IRET path after a ptrace stop. + */ +#define arch_ptrace_stop_needed(code, info) \ +({ \ + set_thread_flag(TIF_NOTIFY_RESUME); \ + false; \ +}) + struct user_desc; extern int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *info); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ec8366c5cfea..b5e994ad0135 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1462,6 +1462,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, */ if (var->unusable) var->db = 0; + var->dpl = to_svm(vcpu)->vmcb->save.cpl; break; } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f32a02578c0d..f6449334ec45 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1898,7 +1898,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) break; gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; - if (kvm_write_guest(kvm, data, + if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, &tsc_ref, sizeof(tsc_ref))) return 1; mark_page_dirty(kvm, gfn); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 48eccb350180..089e72cd37be 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -622,8 +622,10 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; - if (reset_capacity) + if (reset_capacity) { set_capacity(zram->disk, 0); + revalidate_disk(zram->disk); + } up_write(&zram->init_lock); } @@ -664,6 +666,7 @@ static ssize_t disksize_store(struct device *dev, zram->comp = comp; zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); + revalidate_disk(zram->disk); up_write(&zram->init_lock); return len; diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 533d011eab3e..61d9e9c6bc10 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -25,7 +25,7 @@ drm-$(CONFIG_OF) += drm_of.o drm-usb-y := drm_usb.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ - drm_plane_helper.o + drm_plane_helper.o drm_dp_mst_topology.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index fd166f532ab9..7838e731b0de 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -131,7 +131,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh, return ret; } -static struct drm_fb_helper_funcs armada_fb_helper_funcs = { +static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { .gamma_set = armada_drm_crtc_gamma_set, .gamma_get = armada_drm_crtc_gamma_get, .fb_probe = armada_fb_probe, @@ -149,7 +149,7 @@ int armada_fbdev_init(struct drm_device *dev) priv->fbdev = fbh; - fbh->funcs = &armada_fb_helper_funcs; + drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs); ret = drm_fb_helper_init(dev, fbh, 1, 1); if (ret) { diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c index d685a5421485..abbc309fe539 100644 --- a/drivers/gpu/drm/armada/armada_output.c +++ b/drivers/gpu/drm/armada/armada_output.c @@ -48,7 +48,7 @@ static void armada_drm_connector_destroy(struct drm_connector *conn) { struct armada_connector *dconn = drm_to_armada_conn(conn); - drm_sysfs_connector_remove(conn); + drm_connector_unregister(conn); drm_connector_cleanup(conn); kfree(dconn); } @@ -141,7 +141,7 @@ int armada_output_create(struct drm_device *dev, if (ret) goto err_conn; - ret = drm_sysfs_connector_add(&dconn->conn); + ret = drm_connector_register(&dconn->conn); if (ret) goto err_sysfs; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 5d6a87573c33..957d4fabf1e1 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index a28640f47c27..cba45c774552 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -287,7 +287,7 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = ast_crtc->lut_b[regno] << 8; } -static struct drm_fb_helper_funcs ast_fb_helper_funcs = { +static const struct drm_fb_helper_funcs ast_fb_helper_funcs = { .gamma_set = ast_fb_gamma_set, .gamma_get = ast_fb_gamma_get, .fb_probe = astfb_create, @@ -328,8 +328,10 @@ int ast_fbdev_init(struct drm_device *dev) return -ENOMEM; ast->fbdev = afbdev; - afbdev->helper.funcs = &ast_fb_helper_funcs; spin_lock_init(&afbdev->dirty_lock); + + drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs); + ret = drm_fb_helper_init(dev, &afbdev->helper, 1, 1); if (ret) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 114aee941d46..5389350244f2 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder) static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -829,7 +821,7 @@ static void ast_connector_destroy(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); ast_i2c_destroy(ast_connector->i2c); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -871,7 +863,7 @@ static int ast_connector_init(struct drm_device *dev) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); connector->polled = DRM_CONNECTOR_POLL_CONNECT; diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 9c13df29fd20..f5e0ead974a6 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = { /* ---------------------------------------------------------------------- */ /* pm interface */ +#ifdef CONFIG_PM_SLEEP static int bochs_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev) drm_kms_helper_poll_enable(drm_dev); return 0; } +#endif static const struct dev_pm_ops bochs_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 561b84474122..fe95d31cd110 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper, bo = gem_to_bochs_bo(gobj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) return ret; @@ -179,7 +179,7 @@ void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = regno; } -static struct drm_fb_helper_funcs bochs_fb_helper_funcs = { +static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { .gamma_set = bochs_fb_gamma_set, .gamma_get = bochs_fb_gamma_get, .fb_probe = bochsfb_create, @@ -189,7 +189,8 @@ int bochs_fbdev_init(struct bochs_device *bochs) { int ret; - bochs->fb.helper.funcs = &bochs_fb_helper_funcs; + drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, + &bochs_fb_helper_funcs); ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1, 1); diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index dcf2e55f4ae9..9d7346b92653 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { bochs_fb = to_bochs_framebuffer(old_fb); bo = gem_to_bochs_bo(bochs_fb->obj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) { DRM_ERROR("failed to reserve old_fb bo\n"); } else { @@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, bochs_fb = to_bochs_framebuffer(crtc->primary->fb); bo = gem_to_bochs_bo(bochs_fb->obj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) return ret; @@ -216,18 +216,9 @@ static struct drm_encoder * bochs_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index b9a695d92792..1728a1b0b813 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, *obj = NULL; - size = ALIGN(size, PAGE_SIZE); + size = PAGE_ALIGN(size); if (size == 0) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c index 98fd17ae4916..d466696ed5e8 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/ptn3460.c @@ -328,7 +328,7 @@ int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder, } drm_connector_helper_add(&ptn_bridge->connector, &ptn3460_connector_helper_funcs); - drm_sysfs_connector_add(&ptn_bridge->connector); + drm_connector_register(&ptn_bridge->connector); drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder); return 0; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 08ce520f61a5..4516b052cc67 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } +#ifdef CONFIG_PM_SLEEP static int cirrus_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev) drm_kms_helper_poll_enable(drm_dev); return 0; } +#endif static const struct file_operations cirrus_driver_fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 117d3eca5e37..401c890b6c6a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 32bbba0a787b..2a135f253e29 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -288,7 +288,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { +static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { .gamma_set = cirrus_crtc_fb_gamma_set, .gamma_get = cirrus_crtc_fb_gamma_get, .fb_probe = cirrusfb_create, @@ -306,9 +306,11 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) return -ENOMEM; cdev->mode_info.gfbdev = gfbdev; - gfbdev->helper.funcs = &cirrus_fb_helper_funcs; spin_lock_init(&gfbdev->dirty_lock); + drm_fb_helper_prepare(cdev->dev, &gfbdev->helper, + &cirrus_fb_helper_funcs); + ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, cdev->num_crtc, CIRRUSFB_CONN_LIMIT); if (ret) { diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 49332c5fe35b..e1c5c3222129 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = - drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 0406110f83ed..86a4a4a60afc 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c @@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size) error_out: - /* Only last element can be null pointer so check for it first. */ - if ((*buf)->data[idx]) - kfree((*buf)->data[idx]); - - for (--idx; idx >= 0; --idx) + for (; idx >= 0; --idx) kfree((*buf)->data[idx]); kfree(*buf); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fe94cc10cd35..1ccf5cb9aa86 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -41,6 +41,10 @@ #include "drm_crtc_internal.h" +static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); + /** * drm_modeset_lock_all - take all modeset locks * @dev: drm device @@ -723,7 +727,7 @@ DEFINE_WW_CLASS(crtc_ww_class); */ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, - void *cursor, + struct drm_plane *cursor, const struct drm_crtc_funcs *funcs) { struct drm_mode_config *config = &dev->mode_config; @@ -748,8 +752,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, config->num_crtc++; crtc->primary = primary; + crtc->cursor = cursor; if (primary) primary->possible_crtcs = 1 << drm_crtc_index(crtc); + if (cursor) + cursor->possible_crtcs = 1 << drm_crtc_index(crtc); out: drm_modeset_unlock_all(dev); @@ -881,6 +888,8 @@ int drm_connector_init(struct drm_device *dev, drm_object_attach_property(&connector->base, dev->mode_config.dpms_property, 0); + connector->debugfs_entry = NULL; + out_put: if (ret) drm_mode_object_put(dev, &connector->base); @@ -921,6 +930,47 @@ void drm_connector_cleanup(struct drm_connector *connector) EXPORT_SYMBOL(drm_connector_cleanup); /** + * drm_connector_register - register a connector + * @connector: the connector to register + * + * Register userspace interfaces for a connector + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_register(struct drm_connector *connector) +{ + int ret; + + ret = drm_sysfs_connector_add(connector); + if (ret) + return ret; + + ret = drm_debugfs_connector_add(connector); + if (ret) { + drm_sysfs_connector_remove(connector); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_connector_register); + +/** + * drm_connector_unregister - unregister a connector + * @connector: the connector to unregister + * + * Unregister userspace interfaces for a connector + */ +void drm_connector_unregister(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_debugfs_connector_remove(connector); +} +EXPORT_SYMBOL(drm_connector_unregister); + + +/** * drm_connector_unplug_all - unregister connector userspace interfaces * @dev: drm device * @@ -934,7 +984,7 @@ void drm_connector_unplug_all(struct drm_device *dev) /* taking the mode config mutex ends up in a clash with sysfs */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); } EXPORT_SYMBOL(drm_connector_unplug_all); @@ -1214,6 +1264,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) { struct drm_property *edid; struct drm_property *dpms; + struct drm_property *dev_path; /* * Standard properties (apply to all connectors) @@ -1228,6 +1279,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) ARRAY_SIZE(drm_dpms_enum_list)); dev->mode_config.dpms_property = dpms; + dev_path = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "PATH", 0); + dev->mode_config.path_property = dev_path; + return 0; } @@ -1470,6 +1527,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); +void drm_reinit_primary_mode_group(struct drm_device *dev) +{ + drm_modeset_lock_all(dev); + drm_mode_group_destroy(&dev->primary->mode_group); + drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); + drm_modeset_unlock_all(dev); +} +EXPORT_SYMBOL(drm_reinit_primary_mode_group); + /** * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user @@ -2118,45 +2184,32 @@ out: return ret; } -/** - * drm_mode_setplane - configure a plane's configuration - * @dev: DRM device - * @data: ioctl data* - * @file_priv: DRM file info +/* + * setplane_internal - setplane handler for internal callers * - * Set plane configuration, including placement, fb, scaling, and other factors. - * Or pass a NULL fb to disable. + * Note that we assume an extra reference has already been taken on fb. If the + * update fails, this reference will be dropped before return; if it succeeds, + * the previous framebuffer (if any) will be unreferenced instead. * - * Returns: - * Zero on success, errno on failure. + * src_{x,y,w,h} are provided in 16.16 fixed point format */ -int drm_mode_setplane(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int setplane_internal(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int32_t crtc_x, int32_t crtc_y, + uint32_t crtc_w, uint32_t crtc_h, + /* src_{x,y,w,h} values are 16.16 fixed point */ + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) { - struct drm_mode_set_plane *plane_req = data; - struct drm_plane *plane; - struct drm_crtc *crtc; - struct drm_framebuffer *fb = NULL, *old_fb = NULL; + struct drm_device *dev = plane->dev; + struct drm_framebuffer *old_fb = NULL; int ret = 0; unsigned int fb_width, fb_height; int i; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - /* - * First, find the plane, crtc, and fb objects. If not available, - * we don't bother to call the driver. - */ - plane = drm_plane_find(dev, plane_req->plane_id); - if (!plane) { - DRM_DEBUG_KMS("Unknown plane ID %d\n", - plane_req->plane_id); - return -ENOENT; - } - /* No fb means shut it down */ - if (!plane_req->fb_id) { + if (!fb) { drm_modeset_lock_all(dev); old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); @@ -2170,14 +2223,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, goto out; } - crtc = drm_crtc_find(dev, plane_req->crtc_id); - if (!crtc) { - DRM_DEBUG_KMS("Unknown crtc ID %d\n", - plane_req->crtc_id); - ret = -ENOENT; - goto out; - } - /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { DRM_DEBUG_KMS("Invalid crtc for plane\n"); @@ -2185,14 +2230,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, goto out; } - fb = drm_framebuffer_lookup(dev, plane_req->fb_id); - if (!fb) { - DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", - plane_req->fb_id); - ret = -ENOENT; - goto out; - } - /* Check whether this plane supports the fb pixel format. */ for (i = 0; i < plane->format_count; i++) if (fb->pixel_format == plane->format_types[i]) @@ -2208,43 +2245,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data, fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ - if (plane_req->src_w > fb_width || - plane_req->src_x > fb_width - plane_req->src_w || - plane_req->src_h > fb_height || - plane_req->src_y > fb_height - plane_req->src_h) { + if (src_w > fb_width || + src_x > fb_width - src_w || + src_h > fb_height || + src_y > fb_height - src_h) { DRM_DEBUG_KMS("Invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", - plane_req->src_w >> 16, - ((plane_req->src_w & 0xffff) * 15625) >> 10, - plane_req->src_h >> 16, - ((plane_req->src_h & 0xffff) * 15625) >> 10, - plane_req->src_x >> 16, - ((plane_req->src_x & 0xffff) * 15625) >> 10, - plane_req->src_y >> 16, - ((plane_req->src_y & 0xffff) * 15625) >> 10); + src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, + src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, + src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, + src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); ret = -ENOSPC; goto out; } - /* Give drivers some help against integer overflows */ - if (plane_req->crtc_w > INT_MAX || - plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || - plane_req->crtc_h > INT_MAX || - plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - plane_req->crtc_w, plane_req->crtc_h, - plane_req->crtc_x, plane_req->crtc_y); - ret = -ERANGE; - goto out; - } - drm_modeset_lock_all(dev); old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, - plane_req->crtc_x, plane_req->crtc_y, - plane_req->crtc_w, plane_req->crtc_h, - plane_req->src_x, plane_req->src_y, - plane_req->src_w, plane_req->src_h); + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); if (!ret) { plane->crtc = crtc; plane->fb = fb; @@ -2261,6 +2280,85 @@ out: drm_framebuffer_unreference(old_fb); return ret; + +} + +/** + * drm_mode_setplane - configure a plane's configuration + * @dev: DRM device + * @data: ioctl data* + * @file_priv: DRM file info + * + * Set plane configuration, including placement, fb, scaling, and other factors. + * Or pass a NULL fb to disable (planes may be disabled without providing a + * valid crtc). + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_setplane(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_set_plane *plane_req = data; + struct drm_mode_object *obj; + struct drm_plane *plane; + struct drm_crtc *crtc = NULL; + struct drm_framebuffer *fb = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + /* Give drivers some help against integer overflows */ + if (plane_req->crtc_w > INT_MAX || + plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || + plane_req->crtc_h > INT_MAX || + plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { + DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", + plane_req->crtc_w, plane_req->crtc_h, + plane_req->crtc_x, plane_req->crtc_y); + return -ERANGE; + } + + /* + * First, find the plane, crtc, and fb objects. If not available, + * we don't bother to call the driver. + */ + obj = drm_mode_object_find(dev, plane_req->plane_id, + DRM_MODE_OBJECT_PLANE); + if (!obj) { + DRM_DEBUG_KMS("Unknown plane ID %d\n", + plane_req->plane_id); + return -ENOENT; + } + plane = obj_to_plane(obj); + + if (plane_req->fb_id) { + fb = drm_framebuffer_lookup(dev, plane_req->fb_id); + if (!fb) { + DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", + plane_req->fb_id); + return -ENOENT; + } + + obj = drm_mode_object_find(dev, plane_req->crtc_id, + DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_DEBUG_KMS("Unknown crtc ID %d\n", + plane_req->crtc_id); + return -ENOENT; + } + crtc = obj_to_crtc(obj); + } + + /* + * setplane_internal will take care of deref'ing either the old or new + * framebuffer depending on success. + */ + return setplane_internal(plane, crtc, fb, + plane_req->crtc_x, plane_req->crtc_y, + plane_req->crtc_w, plane_req->crtc_h, + plane_req->src_x, plane_req->src_y, + plane_req->src_w, plane_req->src_h); } /** @@ -2509,6 +2607,102 @@ out: return ret; } +/** + * drm_mode_cursor_universal - translate legacy cursor ioctl call into a + * universal plane handler call + * @crtc: crtc to update cursor for + * @req: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Legacy cursor ioctl's work directly with driver buffer handles. To + * translate legacy ioctl calls into universal plane handler calls, we need to + * wrap the native buffer handle in a drm_framebuffer. + * + * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB + * buffer with a pitch of 4*width; the universal plane interface should be used + * directly in cases where the hardware can support other buffer settings and + * userspace wants to make use of these capabilities. + * + * Returns: + * Zero on success, errno on failure. + */ +static int drm_mode_cursor_universal(struct drm_crtc *crtc, + struct drm_mode_cursor2 *req, + struct drm_file *file_priv) +{ + struct drm_device *dev = crtc->dev; + struct drm_framebuffer *fb = NULL; + struct drm_mode_fb_cmd2 fbreq = { + .width = req->width, + .height = req->height, + .pixel_format = DRM_FORMAT_ARGB8888, + .pitches = { req->width * 4 }, + .handles = { req->handle }, + }; + int32_t crtc_x, crtc_y; + uint32_t crtc_w = 0, crtc_h = 0; + uint32_t src_w = 0, src_h = 0; + int ret = 0; + + BUG_ON(!crtc->cursor); + + /* + * Obtain fb we'll be using (either new or existing) and take an extra + * reference to it if fb != null. setplane will take care of dropping + * the reference if the plane update fails. + */ + if (req->flags & DRM_MODE_CURSOR_BO) { + if (req->handle) { + fb = add_framebuffer_internal(dev, &fbreq, file_priv); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); + return PTR_ERR(fb); + } + + drm_framebuffer_reference(fb); + } else { + fb = NULL; + } + } else { + mutex_lock(&dev->mode_config.mutex); + fb = crtc->cursor->fb; + if (fb) + drm_framebuffer_reference(fb); + mutex_unlock(&dev->mode_config.mutex); + } + + if (req->flags & DRM_MODE_CURSOR_MOVE) { + crtc_x = req->x; + crtc_y = req->y; + } else { + crtc_x = crtc->cursor_x; + crtc_y = crtc->cursor_y; + } + + if (fb) { + crtc_w = fb->width; + crtc_h = fb->height; + src_w = fb->width << 16; + src_h = fb->height << 16; + } + + /* + * setplane_internal will take care of deref'ing either the old or new + * framebuffer depending on success. + */ + ret = setplane_internal(crtc->cursor, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + 0, 0, src_w, src_h); + + /* Update successful; save new cursor position, if necessary */ + if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { + crtc->cursor_x = req->x; + crtc->cursor_y = req->y; + } + + return ret; +} + static int drm_mode_cursor_common(struct drm_device *dev, struct drm_mode_cursor2 *req, struct drm_file *file_priv) @@ -2528,6 +2722,13 @@ static int drm_mode_cursor_common(struct drm_device *dev, return -ENOENT; } + /* + * If this crtc has a universal cursor plane, call that plane's update + * handler rather than using legacy cursor handlers. + */ + if (crtc->cursor) + return drm_mode_cursor_universal(crtc, req, file_priv); + drm_modeset_lock(&crtc->mutex, NULL); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { @@ -2827,56 +3028,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -/** - * drm_mode_addfb2 - add an FB to the graphics configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Add a new FB to the specified CRTC, given a user request with format. This is - * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers - * and uses fourcc codes as pixel format specifiers. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, errno on failure. - */ -int drm_mode_addfb2(struct drm_device *dev, - void *data, struct drm_file *file_priv) +static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { - struct drm_mode_fb_cmd2 *r = data; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - if (r->flags & ~DRM_MODE_FB_INTERLACED) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ((config->min_height > r->height) || (r->height > config->max_height)) { DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", r->height, config->min_height, config->max_height); - return -EINVAL; + return ERR_PTR(-EINVAL); } ret = framebuffer_check(r); if (ret) - return ret; + return ERR_PTR(ret); fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - return PTR_ERR(fb); + return fb; } mutex_lock(&file_priv->fbs_lock); @@ -2885,8 +3068,37 @@ int drm_mode_addfb2(struct drm_device *dev, DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); mutex_unlock(&file_priv->fbs_lock); + return fb; +} - return ret; +/** + * drm_mode_addfb2 - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Add a new FB to the specified CRTC, given a user request with format. This is + * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers + * and uses fourcc codes as pixel format specifiers. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_framebuffer *fb; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = add_framebuffer_internal(dev, data, file_priv); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + return 0; } /** @@ -3176,7 +3388,7 @@ fail: EXPORT_SYMBOL(drm_property_create); /** - * drm_property_create - create a new enumeration property type + * drm_property_create_enum - create a new enumeration property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3222,7 +3434,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, EXPORT_SYMBOL(drm_property_create_enum); /** - * drm_property_create - create a new bitmask property type + * drm_property_create_bitmask - create a new bitmask property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3284,7 +3496,7 @@ static struct drm_property *property_create_range(struct drm_device *dev, } /** - * drm_property_create - create a new ranged property type + * drm_property_create_range - create a new ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3703,6 +3915,25 @@ done: return ret; } +int drm_mode_connector_set_path_property(struct drm_connector *connector, + char *path) +{ + struct drm_device *dev = connector->dev; + int ret, size; + size = strlen(path) + 1; + + connector->path_blob_ptr = drm_property_create_blob(connector->dev, + size, path); + if (!connector->path_blob_ptr) + return -EINVAL; + + ret = drm_object_property_set_value(&connector->base, + dev->mode_config.path_property, + connector->path_blob_ptr->base.id); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_set_path_property); + /** * drm_mode_connector_update_edid_property - update the edid property of a connector * @connector: drm connector @@ -3720,6 +3951,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct drm_device *dev = connector->dev; int ret, size; + /* ignore requests to set edid when overridden */ + if (connector->override_edid) + return 0; + if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 78b37f3febd3..6c65a0a28fbd 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -818,6 +818,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, &fb->bits_per_pixel); fb->pixel_format = mode_cmd->pixel_format; + fb->flags = mode_cmd->flags; } EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b4b51d46f339..13bd42923dd4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <drm/drmP.h> +#include <drm/drm_edid.h> #if defined(CONFIG_DEBUG_FS) @@ -237,5 +238,186 @@ int drm_debugfs_cleanup(struct drm_minor *minor) return 0; } +static int connector_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + const char *status; + + switch (connector->force) { + case DRM_FORCE_ON: + status = "on\n"; + break; + + case DRM_FORCE_ON_DIGITAL: + status = "digital\n"; + break; + + case DRM_FORCE_OFF: + status = "off\n"; + break; + + case DRM_FORCE_UNSPECIFIED: + status = "unspecified\n"; + break; + + default: + return 0; + } + + seq_puts(m, status); + + return 0; +} + +static int connector_open(struct inode *inode, struct file *file) +{ + struct drm_connector *dev = inode->i_private; + + return single_open(file, connector_show, dev); +} + +static ssize_t connector_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_connector *connector = m->private; + char buf[12]; + + if (len > sizeof(buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, ubuf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (!strcmp(buf, "on")) + connector->force = DRM_FORCE_ON; + else if (!strcmp(buf, "digital")) + connector->force = DRM_FORCE_ON_DIGITAL; + else if (!strcmp(buf, "off")) + connector->force = DRM_FORCE_OFF; + else if (!strcmp(buf, "unspecified")) + connector->force = DRM_FORCE_UNSPECIFIED; + else + return -EINVAL; + + return len; +} + +static int edid_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_property_blob *edid = connector->edid_blob_ptr; + + if (connector->override_edid && edid) + seq_write(m, edid->data, edid->length); + + return 0; +} + +static int edid_open(struct inode *inode, struct file *file) +{ + struct drm_connector *dev = inode->i_private; + + return single_open(file, edid_show, dev); +} + +static ssize_t edid_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_connector *connector = m->private; + char *buf; + struct edid *edid; + int ret; + + buf = memdup_user(ubuf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + edid = (struct edid *) buf; + + if (len == 5 && !strncmp(buf, "reset", 5)) { + connector->override_edid = false; + ret = drm_mode_connector_update_edid_property(connector, NULL); + } else if (len < EDID_LENGTH || + EDID_LENGTH * (1 + edid->extensions) > len) + ret = -EINVAL; + else { + connector->override_edid = false; + ret = drm_mode_connector_update_edid_property(connector, edid); + if (!ret) + connector->override_edid = true; + } + + kfree(buf); + + return (ret) ? ret : len; +} + +static const struct file_operations drm_edid_fops = { + .owner = THIS_MODULE, + .open = edid_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edid_write +}; + + +static const struct file_operations drm_connector_fops = { + .owner = THIS_MODULE, + .open = connector_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = connector_write +}; + +int drm_debugfs_connector_add(struct drm_connector *connector) +{ + struct drm_minor *minor = connector->dev->primary; + struct dentry *root, *ent; + + if (!minor->debugfs_root) + return -1; + + root = debugfs_create_dir(connector->name, minor->debugfs_root); + if (!root) + return -ENOMEM; + + connector->debugfs_entry = root; + + /* force */ + ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector, + &drm_connector_fops); + if (!ent) + goto error; + + /* edid */ + ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, + connector, &drm_edid_fops); + if (!ent) + goto error; + + return 0; + +error: + debugfs_remove_recursive(connector->debugfs_entry); + connector->debugfs_entry = NULL; + return -ENOMEM; +} + +void drm_debugfs_connector_remove(struct drm_connector *connector) +{ + if (!connector->debugfs_entry) + return; + + debugfs_remove_recursive(connector->debugfs_entry); + + connector->debugfs_entry = NULL; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c new file mode 100644 index 000000000000..ac3c2738db94 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -0,0 +1,2715 @@ +/* + * Copyright © 2014 Red Hat + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/i2c.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drmP.h> + +#include <drm/drm_fixed.h> + +/** + * DOC: dp mst helper + * + * These functions contain parts of the DisplayPort 1.2a MultiStream Transport + * protocol. The helpers contain a topology manager and bandwidth manager. + * The helpers encapsulate the sending and received of sideband msgs. + */ +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf); +static int test_calc_pbn_mode(void); + +static void drm_dp_put_port(struct drm_dp_mst_port *port); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload); + +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); + +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port); +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid); + +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); +/* sideband msg handling */ +static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = num_nibbles * 4; + u8 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x10) == 0x10) + remainder ^= 0x13; + } + + number_of_bits = 4; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x10) != 0) + remainder ^= 0x13; + } + + return remainder; +} + +static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = number_of_bytes * 8; + u16 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x100) == 0x100) + remainder ^= 0xd5; + } + + number_of_bits = 8; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x100) != 0) + remainder ^= 0xd5; + } + + return remainder & 0xff; +} +static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) +{ + u8 size = 3; + size += (hdr->lct / 2); + return size; +} + +static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int *len) +{ + int idx = 0; + int i; + u8 crc4; + buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); + for (i = 0; i < (hdr->lct / 2); i++) + buf[idx++] = hdr->rad[i]; + buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | + (hdr->msg_len & 0x3f); + buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); + + crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); + buf[idx - 1] |= (crc4 & 0xf); + + *len = idx; +} + +static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int buflen, u8 *hdrlen) +{ + u8 crc4; + u8 len; + int i; + u8 idx; + if (buf[0] == 0) + return false; + len = 3; + len += ((buf[0] & 0xf0) >> 4) / 2; + if (len > buflen) + return false; + crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); + + if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { + DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); + return false; + } + + hdr->lct = (buf[0] & 0xf0) >> 4; + hdr->lcr = (buf[0] & 0xf); + idx = 1; + for (i = 0; i < (hdr->lct / 2); i++) + hdr->rad[i] = buf[idx++]; + hdr->broadcast = (buf[idx] >> 7) & 0x1; + hdr->path_msg = (buf[idx] >> 6) & 0x1; + hdr->msg_len = buf[idx] & 0x3f; + idx++; + hdr->somt = (buf[idx] >> 7) & 0x1; + hdr->eomt = (buf[idx] >> 6) & 0x1; + hdr->seqno = (buf[idx] >> 4) & 0x1; + idx++; + *hdrlen = idx; + return true; +} + +static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + int i; + u8 *buf = raw->msg; + buf[idx++] = req->req_type & 0x7f; + + switch (req->req_type) { + case DP_ENUM_PATH_RESOURCES: + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; + idx++; + break; + case DP_ALLOCATE_PAYLOAD: + buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | + (req->u.allocate_payload.number_sdp_streams & 0xf); + idx++; + buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); + idx++; + buf[idx] = (req->u.allocate_payload.pbn >> 8); + idx++; + buf[idx] = (req->u.allocate_payload.pbn & 0xff); + idx++; + for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { + buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | + (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); + idx++; + } + if (req->u.allocate_payload.number_sdp_streams & 1) { + i = req->u.allocate_payload.number_sdp_streams - 1; + buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; + idx++; + } + break; + case DP_QUERY_PAYLOAD: + buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.query_payload.vcpi & 0x7f); + idx++; + break; + case DP_REMOTE_DPCD_READ: + buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_read.num_bytes); + idx++; + break; + + case DP_REMOTE_DPCD_WRITE: + buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); + idx += req->u.dpcd_write.num_bytes; + break; + case DP_REMOTE_I2C_READ: + buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; + buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); + idx++; + for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { + buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; + idx++; + buf[idx] = req->u.i2c_read.transactions[i].num_bytes; + idx++; + memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); + idx += req->u.i2c_read.transactions[i].num_bytes; + + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); + idx++; + } + buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_read.num_bytes_read); + idx++; + break; + + case DP_REMOTE_I2C_WRITE: + buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); + idx += req->u.i2c_write.num_bytes; + break; + } + raw->cur_len = idx; +} + +static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) +{ + u8 crc4; + crc4 = drm_dp_msg_data_crc4(msg, len); + msg[len] = crc4; +} + +static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + u8 *buf = raw->msg; + + buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); + + raw->cur_len = idx; +} + +/* this adds a chunk of msg to the builder to get the final msg */ +static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, + u8 *replybuf, u8 replybuflen, bool hdr) +{ + int ret; + u8 crc4; + + if (hdr) { + u8 hdrlen; + struct drm_dp_sideband_msg_hdr recv_hdr; + ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); + if (ret == false) { + print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); + return false; + } + + /* get length contained in this portion */ + msg->curchunk_len = recv_hdr.msg_len; + msg->curchunk_hdrlen = hdrlen; + + /* we have already gotten an somt - don't bother parsing */ + if (recv_hdr.somt && msg->have_somt) + return false; + + if (recv_hdr.somt) { + memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); + msg->have_somt = true; + } + if (recv_hdr.eomt) + msg->have_eomt = true; + + /* copy the bytes for the remainder of this header chunk */ + msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); + memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); + } else { + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); + msg->curchunk_idx += replybuflen; + } + + if (msg->curchunk_idx >= msg->curchunk_len) { + /* do CRC */ + crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); + /* copy chunk into bigger msg */ + memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); + msg->curlen += msg->curchunk_len - 1; + } + return true; +} + +static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + int i; + memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); + idx += 16; + repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + for (i = 0; i < repmsg->u.link_addr.nports; i++) { + if (raw->msg[idx] & 0x80) + repmsg->u.link_addr.ports[i].input_port = 1; + + repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; + repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); + + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; + repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; + if (repmsg->u.link_addr.ports[i].input_port == 0) + repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + idx++; + if (idx > raw->curlen) + goto fail_len; + if (repmsg->u.link_addr.ports[i].input_port == 0) { + repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); + idx++; + if (idx > raw->curlen) + goto fail_len; + memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); + idx++; + + } + if (idx > raw->curlen) + goto fail_len; + } + + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; + if (idx > raw->curlen) + goto fail_len; + + memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + + repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; + idx++; + /* TODO check */ + memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.vcpi = raw->msg[idx]; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->reply_type = (raw->msg[0] & 0x80) >> 7; + msg->req_type = (raw->msg[0] & 0x7f); + + if (msg->reply_type) { + memcpy(msg->u.nak.guid, &raw->msg[1], 16); + msg->u.nak.reason = raw->msg[17]; + msg->u.nak.nak_data = raw->msg[18]; + return false; + } + + switch (msg->req_type) { + case DP_LINK_ADDRESS: + return drm_dp_sideband_parse_link_address(raw, msg); + case DP_QUERY_PAYLOAD: + return drm_dp_sideband_parse_query_payload_ack(raw, msg); + case DP_REMOTE_DPCD_READ: + return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); + case DP_REMOTE_DPCD_WRITE: + return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); + case DP_REMOTE_I2C_READ: + return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); + case DP_ENUM_PATH_RESOURCES: + return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); + case DP_ALLOCATE_PAYLOAD: + return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); + default: + DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); + return false; + } +} + +static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; + msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; + msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; + msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->req_type = (raw->msg[0] & 0x7f); + + switch (msg->req_type) { + case DP_CONNECTION_STATUS_NOTIFY: + return drm_dp_sideband_parse_connection_status_notify(raw, msg); + case DP_RESOURCE_STATUS_NOTIFY: + return drm_dp_sideband_parse_resource_status_notify(raw, msg); + default: + DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); + return false; + } +} + +static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_WRITE; + req.u.dpcd_write.port_number = port_num; + req.u.dpcd_write.dpcd_address = offset; + req.u.dpcd_write.num_bytes = num_bytes; + req.u.dpcd_write.bytes = bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} + +static int build_link_address(struct drm_dp_sideband_msg_tx *msg) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_LINK_ADDRESS; + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + +static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_ENUM_PATH_RESOURCES; + req.u.port_num.port_number = port_num; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, + u8 vcpi, uint16_t pbn) +{ + struct drm_dp_sideband_msg_req_body req; + memset(&req, 0, sizeof(req)); + req.req_type = DP_ALLOCATE_PAYLOAD; + req.u.allocate_payload.port_number = port_num; + req.u.allocate_payload.vcpi = vcpi; + req.u.allocate_payload.pbn = pbn; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi) +{ + int ret; + + mutex_lock(&mgr->payload_lock); + ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); + if (ret > mgr->max_payloads) { + ret = -EINVAL; + DRM_DEBUG_KMS("out of payload ids %d\n", ret); + goto out_unlock; + } + + set_bit(ret, &mgr->payload_mask); + vcpi->vcpi = ret; + mgr->proposed_vcpis[ret - 1] = vcpi; +out_unlock: + mutex_unlock(&mgr->payload_lock); + return ret; +} + +static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, + int id) +{ + if (id == 0) + return; + + mutex_lock(&mgr->payload_lock); + DRM_DEBUG_KMS("putting payload %d\n", id); + clear_bit(id, &mgr->payload_mask); + mgr->proposed_vcpis[id - 1] = NULL; + mutex_unlock(&mgr->payload_lock); +} + +static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + bool ret; + mutex_lock(&mgr->qlock); + ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || + txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); + mutex_unlock(&mgr->qlock); + return ret; +} + +static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + int ret; + + ret = wait_event_timeout(mgr->tx_waitq, + check_txmsg_state(mgr, txmsg), + (4 * HZ)); + mutex_lock(&mstb->mgr->qlock); + if (ret > 0) { + if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { + ret = -EIO; + goto out; + } + } else { + DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); + + /* dump some state */ + ret = -EIO; + + /* remove from q */ + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || + txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { + list_del(&txmsg->next); + } + + if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || + txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { + mstb->tx_slots[txmsg->seqno] = NULL; + } + } +out: + mutex_unlock(&mgr->qlock); + + return ret; +} + +static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + + mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); + if (!mstb) + return NULL; + + mstb->lct = lct; + if (lct > 1) + memcpy(mstb->rad, rad, lct / 2); + INIT_LIST_HEAD(&mstb->ports); + kref_init(&mstb->kref); + return mstb; +} + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + cancel_work_sync(&mstb->mgr->work); + + /* + * destroy all ports - don't need lock + * as there are no more references to the mst branch + * device at this point. + */ + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_put_port(port); + } + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up(&mstb->mgr->tx_waitq); + kfree(mstb); +} + +static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) +{ + kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); +} + + +static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) +{ + switch (old_pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + drm_dp_put_mst_branch_device(port->mstb); + port->mstb = NULL; + break; + } +} + +static void drm_dp_destroy_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { + port->vcpi.num_slots = 0; + if (port->connector) + (*port->mgr->cbs->destroy_connector)(mgr, port->connector); + drm_dp_port_teardown_pdt(port, port->pdt); + + if (!port->input && port->vcpi.vcpi > 0) + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + } + kfree(port); + + (*mgr->cbs->hotplug)(mgr); +} + +static void drm_dp_put_port(struct drm_dp_mst_port *port) +{ + kref_put(&port->kref, drm_dp_destroy_port); +} + +static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) +{ + struct drm_dp_mst_port *port; + struct drm_dp_mst_branch *rmstb; + if (to_find == mstb) { + kref_get(&mstb->kref); + return mstb; + } + list_for_each_entry(port, &mstb->ports, next) { + if (port->mstb) { + rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); + if (rmstb) + return rmstb; + } + } + return NULL; +} + +static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_branch *rmstb = NULL; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); + mutex_unlock(&mgr->lock); + return rmstb; +} + +static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) +{ + struct drm_dp_mst_port *port, *mport; + + list_for_each_entry(port, &mstb->ports, next) { + if (port == to_find) { + kref_get(&port->kref); + return port; + } + if (port->mstb) { + mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); + if (mport) + return mport; + } + } + return NULL; +} + +static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *rport = NULL; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); + mutex_unlock(&mgr->lock); + return rport; +} + +static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) +{ + struct drm_dp_mst_port *port; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + kref_get(&port->kref); + return port; + } + } + + return NULL; +} + +/* + * calculate a new RAD for this MST branch device + * if parent has an LCT of 2 then it has 1 nibble of RAD, + * if parent has an LCT of 3 then it has 2 nibbles of RAD, + */ +static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, + u8 *rad) +{ + int lct = port->parent->lct; + int shift = 4; + int idx = lct / 2; + if (lct > 1) { + memcpy(rad, port->parent->rad, idx); + shift = (lct % 2) ? 4 : 0; + } else + rad[0] = 0; + + rad[idx] |= port->port_num << shift; + return lct + 1; +} + +/* + * return sends link address for new mstb + */ +static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +{ + int ret; + u8 rad[6], lct; + bool send_link = false; + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + lct = drm_dp_calculate_rad(port, rad); + + port->mstb = drm_dp_add_mst_branch_device(lct, rad); + port->mstb->mgr = port->mgr; + port->mstb->port_parent = port; + + send_link = true; + break; + } + return send_link; +} + +static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + int ret; + if (port->dpcd_rev >= 0x12) { + port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); + if (!port->guid_valid) { + ret = drm_dp_send_dpcd_write(mstb->mgr, + port, + DP_GUID, + 16, port->guid); + port->guid_valid = true; + } + } +} + +static void build_mst_prop_path(struct drm_dp_mst_port *port, + struct drm_dp_mst_branch *mstb, + char *proppath) +{ + int i; + char temp[8]; + snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id); + for (i = 0; i < (mstb->lct - 1); i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = mstb->rad[i / 2] >> shift; + snprintf(temp, 8, "-%d", port_num); + strncat(proppath, temp, 255); + } + snprintf(temp, 8, "-%d", port->port_num); + strncat(proppath, temp, 255); +} + +static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, + struct device *dev, + struct drm_dp_link_addr_reply_port *port_msg) +{ + struct drm_dp_mst_port *port; + bool ret; + bool created = false; + int old_pdt = 0; + int old_ddps = 0; + port = drm_dp_get_port(mstb, port_msg->port_number); + if (!port) { + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return; + kref_init(&port->kref); + port->parent = mstb; + port->port_num = port_msg->port_number; + port->mgr = mstb->mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev; + created = true; + } else { + old_pdt = port->pdt; + old_ddps = port->ddps; + } + + port->pdt = port_msg->peer_device_type; + port->input = port_msg->input_port; + port->mcs = port_msg->mcs; + port->ddps = port_msg->ddps; + port->ldps = port_msg->legacy_device_plug_status; + port->dpcd_rev = port_msg->dpcd_revision; + port->num_sdp_streams = port_msg->num_sdp_streams; + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; + memcpy(port->guid, port_msg->peer_guid, 16); + + /* manage mstb port lists with mgr lock - take a reference + for this list */ + if (created) { + mutex_lock(&mstb->mgr->lock); + kref_get(&port->kref); + list_add(&port->next, &mstb->ports); + mutex_unlock(&mstb->mgr->lock); + } + + if (old_ddps != port->ddps) { + if (port->ddps) { + drm_dp_check_port_guid(mstb, port); + if (!port->input) + drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); + } else { + port->guid_valid = false; + port->available_pbn = 0; + } + } + + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + ret = drm_dp_port_setup_pdt(port); + if (ret == true) { + drm_dp_send_link_address(mstb->mgr, port->mstb); + port->mstb->link_address_sent = true; + } + } + + if (created && !port->input) { + char proppath[255]; + build_mst_prop_path(port, mstb, proppath); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + } + + /* put reference to this port */ + drm_dp_put_port(port); +} + +static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, + struct drm_dp_connection_status_notify *conn_stat) +{ + struct drm_dp_mst_port *port; + int old_pdt; + int old_ddps; + bool dowork = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); + if (!port) + return; + + old_ddps = port->ddps; + old_pdt = port->pdt; + port->pdt = conn_stat->peer_device_type; + port->mcs = conn_stat->message_capability_status; + port->ldps = conn_stat->legacy_device_plug_status; + port->ddps = conn_stat->displayport_device_plug_status; + + if (old_ddps != port->ddps) { + if (port->ddps) { + drm_dp_check_port_guid(mstb, port); + dowork = true; + } else { + port->guid_valid = false; + port->available_pbn = 0; + } + } + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + if (drm_dp_port_setup_pdt(port)) + dowork = true; + } + + drm_dp_put_port(port); + if (dowork) + queue_work(system_long_wq, &mstb->mgr->work); + +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, + u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_port *port; + int i; + /* find the port by iterating down */ + mstb = mgr->mst_primary; + + for (i = 0; i < lct - 1; i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = rad[i / 2] >> shift; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + if (!port->mstb) { + DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); + return NULL; + } + + mstb = port->mstb; + break; + } + } + } + kref_get(&mstb->kref); + return mstb; +} + +static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + if (!mstb->link_address_sent) { + drm_dp_send_link_address(mgr, mstb); + mstb->link_address_sent = true; + } + list_for_each_entry(port, &mstb->ports, next) { + if (port->input) + continue; + + if (!port->ddps) + continue; + + if (!port->available_pbn) + drm_dp_send_enum_path_resources(mgr, mstb, port); + + if (port->mstb) + drm_dp_check_and_send_link_address(mgr, port->mstb); + } +} + +static void drm_dp_mst_link_probe_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + + drm_dp_check_and_send_link_address(mgr, mgr->mst_primary); + +} + +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid) +{ + static u8 zero_guid[16]; + + if (!memcmp(guid, zero_guid, 16)) { + u64 salt = get_jiffies_64(); + memcpy(&guid[0], &salt, sizeof(u64)); + memcpy(&guid[8], &salt, sizeof(u64)); + return false; + } + return true; +} + +#if 0 +static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_READ; + req.u.dpcd_read.port_number = port_num; + req.u.dpcd_read.dpcd_address = offset; + req.u.dpcd_read.num_bytes = num_bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} +#endif + +static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, + bool up, u8 *msg, int len) +{ + int ret; + int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; + int tosend, total, offset; + int retries = 0; + +retry: + total = len; + offset = 0; + do { + tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); + + ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret != tosend) { + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } + DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); + WARN(1, "fail\n"); + + return -EIO; + } + offset += tosend; + total -= tosend; + } while (total > 0); + return 0; +} + +static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_branch *mstb = txmsg->dst; + + /* both msg slots are full */ + if (txmsg->seqno == -1) { + if (mstb->tx_slots[0] && mstb->tx_slots[1]) { + DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); + return -EAGAIN; + } + if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { + txmsg->seqno = mstb->last_seqno; + mstb->last_seqno ^= 1; + } else if (mstb->tx_slots[0] == NULL) + txmsg->seqno = 0; + else + txmsg->seqno = 1; + mstb->tx_slots[txmsg->seqno] = txmsg; + } + hdr->broadcast = 0; + hdr->path_msg = txmsg->path_msg; + hdr->lct = mstb->lct; + hdr->lcr = mstb->lct - 1; + if (mstb->lct > 1) + memcpy(hdr->rad, mstb->rad, mstb->lct / 2); + hdr->seqno = txmsg->seqno; + return 0; +} +/* + * process a single block of the next message in the sideband queue + */ +static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg, + bool up) +{ + u8 chunk[48]; + struct drm_dp_sideband_msg_hdr hdr; + int len, space, idx, tosend; + int ret; + + memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); + + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { + txmsg->seqno = -1; + txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; + } + + /* make hdr from dst mst - for replies use seqno + otherwise assign one */ + ret = set_hdr_from_dst_qlock(&hdr, txmsg); + if (ret < 0) + return ret; + + /* amount left to send in this message */ + len = txmsg->cur_len - txmsg->cur_offset; + + /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ + space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); + + tosend = min(len, space); + if (len == txmsg->cur_len) + hdr.somt = 1; + if (space >= len) + hdr.eomt = 1; + + + hdr.msg_len = tosend + 1; + drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); + memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); + /* add crc at end */ + drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); + idx += tosend + 1; + + ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); + if (ret) { + DRM_DEBUG_KMS("sideband msg failed to send\n"); + return ret; + } + + txmsg->cur_offset += tosend; + if (txmsg->cur_offset == txmsg->cur_len) { + txmsg->state = DRM_DP_SIDEBAND_TX_SENT; + return 1; + } + return 0; +} + +/* must be called holding qlock */ +static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + /* construct a chunk from the first msg in the tx_msg queue */ + if (list_empty(&mgr->tx_msg_downq)) { + mgr->tx_down_in_progress = false; + return; + } + mgr->tx_down_in_progress = true; + + txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); + ret = process_single_tx_qlock(mgr, txmsg, false); + if (ret == 1) { + /* txmsg is sent it should be in the slots now */ + list_del(&txmsg->next); + } else if (ret) { + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + list_del(&txmsg->next); + if (txmsg->seqno != -1) + txmsg->dst->tx_slots[txmsg->seqno] = NULL; + txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + wake_up(&mgr->tx_waitq); + } + if (list_empty(&mgr->tx_msg_downq)) { + mgr->tx_down_in_progress = false; + return; + } +} + +/* called holding qlock */ +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + /* construct a chunk from the first msg in the tx_msg queue */ + if (list_empty(&mgr->tx_msg_upq)) { + mgr->tx_up_in_progress = false; + return; + } + + txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); + ret = process_single_tx_qlock(mgr, txmsg, true); + if (ret == 1) { + /* up txmsgs aren't put in slots - so free after we send it */ + list_del(&txmsg->next); + kfree(txmsg); + } else if (ret) + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->tx_up_in_progress = true; +} + +static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + mutex_lock(&mgr->qlock); + list_add_tail(&txmsg->next, &mgr->tx_msg_downq); + if (!mgr->tx_down_in_progress) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + len = build_link_address(txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + int i; + + if (txmsg->reply.reply_type == 1) + DRM_DEBUG_KMS("link address nak received\n"); + else { + DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, + txmsg->reply.u.link_addr.ports[i].input_port, + txmsg->reply.u.link_addr.ports[i].peer_device_type, + txmsg->reply.u.link_addr.ports[i].port_number, + txmsg->reply.u.link_addr.ports[i].dpcd_revision, + txmsg->reply.u.link_addr.ports[i].mcs, + txmsg->reply.u.link_addr.ports[i].ddps, + txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, + txmsg->reply.u.link_addr.ports[i].num_sdp_streams, + txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); + } + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); + } + (*mgr->cbs->hotplug)(mgr); + } + } else + DRM_DEBUG_KMS("link address failed %d\n", ret); + + kfree(txmsg); + return 0; +} + +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + len = build_enum_path_resources(txmsg, port->port_num); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) + DRM_DEBUG_KMS("enum path resources nak received\n"); + else { + if (port->port_num != txmsg->reply.u.path_resources.port_number) + DRM_ERROR("got incorrect port in response\n"); + DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, + txmsg->reply.u.path_resources.avail_payload_bw_number); + port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; + } + } + + kfree(txmsg); + return 0; +} + +static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + int pbn) +{ + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int len, ret; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + txmsg->dst = mstb; + len = build_allocate_payload(txmsg, port->port_num, + id, + pbn); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) { + ret = -EINVAL; + } else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + int ret; + + ret = drm_dp_dpcd_write_payload(mgr, id, payload); + if (ret < 0) { + payload->payload_state = 0; + return ret; + } + payload->payload_state = DP_PAYLOAD_LOCAL; + return 0; +} + +static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + int ret; + ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); + if (ret < 0) + return ret; + payload->payload_state = DP_PAYLOAD_REMOTE; + return ret; +} + +static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + DRM_DEBUG_KMS("\n"); + /* its okay for these to fail */ + if (port) { + drm_dp_payload_send_msg(mgr, port, id, 0); + } + + drm_dp_dpcd_write_payload(mgr, id, payload); + payload->payload_state = 0; + return 0; +} + +static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + payload->payload_state = 0; + return 0; +} + +/** + * drm_dp_update_payload_part1() - Execute payload update part 1 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step just writes the VCPI to the MST device. For slots->0 + * transitions, this writes the updated VCPIs and removes the + * remote VC payloads. + * + * after calling this the driver should generate ACT and payload + * packets. + */ +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +{ + int i; + int cur_slots = 1; + struct drm_dp_payload req_payload; + struct drm_dp_mst_port *port; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + /* solve the current payloads - compare to the hw ones + - update the hw view */ + req_payload.start_slot = cur_slots; + if (mgr->proposed_vcpis[i]) { + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; + } else { + port = NULL; + req_payload.num_slots = 0; + } + /* work out what is required to happen with this payload */ + if (mgr->payloads[i].start_slot != req_payload.start_slot || + mgr->payloads[i].num_slots != req_payload.num_slots) { + + /* need to push an update for this payload */ + if (req_payload.num_slots) { + drm_dp_create_payload_step1(mgr, i + 1, &req_payload); + mgr->payloads[i].num_slots = req_payload.num_slots; + } else if (mgr->payloads[i].num_slots) { + mgr->payloads[i].num_slots = 0; + drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]); + req_payload.payload_state = mgr->payloads[i].payload_state; + } else + req_payload.payload_state = 0; + + mgr->payloads[i].start_slot = req_payload.start_slot; + mgr->payloads[i].payload_state = req_payload.payload_state; + } + cur_slots += req_payload.num_slots; + } + mutex_unlock(&mgr->payload_lock); + + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part1); + +/** + * drm_dp_update_payload_part2() - Execute payload update part 2 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step writes the remote VC payload commands. For slots->0 + * this just resets some internal state. + */ +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_port *port; + int i; + int ret = 0; + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + + if (!mgr->proposed_vcpis[i]) + continue; + + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + + DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); + if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { + ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]); + } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { + ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]); + } + if (ret) { + mutex_unlock(&mgr->payload_lock); + return ret; + } + } + mutex_unlock(&mgr->payload_lock); + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part2); + +#if 0 /* unused as of yet */ +static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + len = build_dpcd_read(txmsg, port->port_num, 0, 8); + txmsg->dst = port->parent; + + drm_dp_queue_down_tx(mgr, txmsg); + + return 0; +} +#endif + +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) +{ + int len; + int ret; + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); + txmsg->dst = mstb; + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) { + ret = -EINVAL; + } else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) +{ + struct drm_dp_sideband_msg_reply_body reply; + + reply.reply_type = 1; + reply.req_type = req_type; + drm_dp_encode_sideband_reply(&reply, msg); + return 0; +} + +static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int req_type, int seqno, bool broadcast) +{ + struct drm_dp_sideband_msg_tx *txmsg; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + txmsg->seqno = seqno; + drm_dp_encode_up_ack_reply(txmsg, req_type); + + mutex_lock(&mgr->qlock); + list_add_tail(&txmsg->next, &mgr->tx_msg_upq); + if (!mgr->tx_up_in_progress) { + process_single_up_tx_qlock(mgr); + } + mutex_unlock(&mgr->qlock); + return 0; +} + +static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count) +{ + switch (dp_link_bw) { + case DP_LINK_BW_1_62: + return 3 * dp_link_count; + case DP_LINK_BW_2_7: + return 5 * dp_link_count; + case DP_LINK_BW_5_4: + return 10 * dp_link_count; + } + return 0; +} + +/** + * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager + * @mgr: manager to set state for + * @mst_state: true to enable MST on this connector - false to disable. + * + * This is called by the driver when it detects an MST capable device plugged + * into a DP MST capable port, or when a DP MST capable device is unplugged. + */ +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) +{ + int ret = 0; + struct drm_dp_mst_branch *mstb = NULL; + + mutex_lock(&mgr->lock); + if (mst_state == mgr->mst_state) + goto out_unlock; + + mgr->mst_state = mst_state; + /* set the device into MST mode */ + if (mst_state) { + WARN_ON(mgr->mst_primary); + + /* get dpcd info */ + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("failed to read DPCD\n"); + goto out_unlock; + } + + mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); + mgr->total_pbn = 2560; + mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); + mgr->avail_slots = mgr->total_slots; + + /* add initial branch device at LCT 1 */ + mstb = drm_dp_add_mst_branch_device(1, NULL); + if (mstb == NULL) { + ret = -ENOMEM; + goto out_unlock; + } + mstb->mgr = mgr; + + /* give this the main reference */ + mgr->mst_primary = mstb; + kref_get(&mgr->mst_primary->kref); + + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + goto out_unlock; + } + + + /* sort out guid */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); + goto out_unlock; + } + + mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); + if (!mgr->guid_valid) { + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); + mgr->guid_valid = true; + } + + queue_work(system_long_wq, &mgr->work); + + ret = 0; + } else { + /* disable MST on the device */ + mstb = mgr->mst_primary; + mgr->mst_primary = NULL; + /* this can fail if the device is gone */ + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + ret = 0; + memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); + mgr->payload_mask = 0; + set_bit(0, &mgr->payload_mask); + } + +out_unlock: + mutex_unlock(&mgr->lock); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + return ret; + +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); + +/** + * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager + * @mgr: manager to suspend + * + * This function tells the MST device that we can't handle UP messages + * anymore. This should stop it from sending any since we are suspended. + */ +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); + mutex_unlock(&mgr->lock); +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); + +/** + * drm_dp_mst_topology_mgr_resume() - resume the MST manager + * @mgr: manager to resume + * + * This will fetch DPCD and see if the device is still there, + * if it is, it will rewrite the MSTM control bits, and return. + * + * if the device fails this returns -1, and the driver should do + * a full MST reprobe, in case we were undocked. + */ +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary) { + int sret; + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (sret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + ret = 0; + } else + ret = -1; + +out_unlock: + mutex_unlock(&mgr->lock); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); + +static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +{ + int len; + u8 replyblock[32]; + int replylen, origlen, curreply; + int ret; + struct drm_dp_sideband_msg_rx *msg; + int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; + msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; + + len = min(mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); + return; + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); + if (!ret) { + DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); + return; + } + replylen = msg->curchunk_len + msg->curchunk_hdrlen; + + origlen = replylen; + replylen -= len; + curreply = len; + while (replylen > 0) { + len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read a chunk\n"); + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); + if (ret == false) + DRM_DEBUG_KMS("failed to build sideband msg\n"); + curreply += len; + replylen -= len; + } +} + +static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + drm_dp_get_one_sb_msg(mgr, false); + + if (mgr->down_rep_recv.have_eomt) { + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int slot = -1; + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + /* find the message */ + slot = mgr->down_rep_recv.initial_hdr.seqno; + mutex_lock(&mgr->qlock); + txmsg = mstb->tx_slots[slot]; + /* remove from slots */ + mutex_unlock(&mgr->qlock); + + if (!txmsg) { + DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", + mstb, + mgr->down_rep_recv.initial_hdr.seqno, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad[0], + mgr->down_rep_recv.msg[0]); + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); + if (txmsg->reply.reply_type == 1) { + DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); + } + + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + drm_dp_put_mst_branch_device(mstb); + + mutex_lock(&mgr->qlock); + txmsg->state = DRM_DP_SIDEBAND_TX_RX; + mstb->tx_slots[slot] = NULL; + mutex_unlock(&mgr->qlock); + + wake_up(&mgr->tx_waitq); + } + return ret; +} + +static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + drm_dp_get_one_sb_msg(mgr, true); + + if (mgr->up_req_recv.have_eomt) { + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_mst_branch *mstb; + bool seqno; + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + seqno = mgr->up_req_recv.initial_hdr.seqno; + drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + + if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_update_port(mstb, &msg.u.conn_stat); + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); + (*mgr->cbs->hotplug)(mgr); + + } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); + } + + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + } + return ret; +} + +/** + * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify + * @mgr: manager to notify irq for. + * @esi: 4 bytes from SINK_COUNT_ESI + * + * This should be called from the driver when it detects a short IRQ, + * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The + * topology manager will process the sideband messages received as a result + * of this. + */ +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) +{ + int ret = 0; + int sc; + *handled = false; + sc = esi[0] & 0x3f; + + if (sc != mgr->sink_count) { + mgr->sink_count = sc; + *handled = true; + } + + if (esi[1] & DP_DOWN_REP_MSG_RDY) { + ret = drm_dp_mst_handle_down_rep(mgr); + *handled = true; + } + + if (esi[1] & DP_UP_REQ_MSG_RDY) { + ret |= drm_dp_mst_handle_up_req(mgr); + *handled = true; + } + + drm_dp_mst_kick_tx(mgr); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_hpd_irq); + +/** + * drm_dp_mst_detect_port() - get connection status for an MST port + * @mgr: manager for this port + * @port: unverified pointer to a port + * + * This returns the current connection state for a port. It validates the + * port pointer still exists so the caller doesn't require a reference + */ +enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + enum drm_connector_status status = connector_status_disconnected; + + /* we need to search for the port in the mgr in case its gone */ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return connector_status_disconnected; + + if (!port->ddps) + goto out; + + switch (port->pdt) { + case DP_PEER_DEVICE_NONE: + case DP_PEER_DEVICE_MST_BRANCHING: + break; + + case DP_PEER_DEVICE_SST_SINK: + status = connector_status_connected; + break; + case DP_PEER_DEVICE_DP_LEGACY_CONV: + if (port->ldps) + status = connector_status_connected; + break; + } +out: + drm_dp_put_port(port); + return status; +} +EXPORT_SYMBOL(drm_dp_mst_detect_port); + +/** + * drm_dp_mst_get_edid() - get EDID for an MST port + * @connector: toplevel connector to get EDID for + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This returns an EDID for the port connected to a connector, + * It validates the pointer still exists so the caller doesn't require a + * reference. + */ +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + struct edid *edid = NULL; + + /* we need to search for the port in the mgr in case its gone */ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return NULL; + + edid = drm_get_edid(connector, &port->aux.ddc); + drm_dp_put_port(port); + return edid; +} +EXPORT_SYMBOL(drm_dp_mst_get_edid); + +/** + * drm_dp_find_vcpi_slots() - find slots for this PBN value + * @mgr: manager to use + * @pbn: payload bandwidth to convert into slots. + */ +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn) +{ + int num_slots; + + num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + if (num_slots > mgr->avail_slots) + return -ENOSPC; + return num_slots; +} +EXPORT_SYMBOL(drm_dp_find_vcpi_slots); + +static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi, int pbn) +{ + int num_slots; + int ret; + + num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + if (num_slots > mgr->avail_slots) + return -ENOSPC; + + vcpi->pbn = pbn; + vcpi->aligned_pbn = num_slots * mgr->pbn_div; + vcpi->num_slots = num_slots; + + ret = drm_dp_mst_assign_payload_id(mgr, vcpi); + if (ret < 0) + return ret; + return 0; +} + +/** + * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel + * @mgr: manager for this port + * @port: port to allocate a virtual channel for. + * @pbn: payload bandwidth number to request + * @slots: returned number of slots for this PBN. + */ +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots) +{ + int ret; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return false; + + if (port->vcpi.vcpi > 0) { + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); + if (pbn == port->vcpi.pbn) { + *slots = port->vcpi.num_slots; + return true; + } + } + + ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); + if (ret) { + DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret); + goto out; + } + DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots); + *slots = port->vcpi.num_slots; + + drm_dp_put_port(port); + return true; +out: + return false; +} +EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); + +/** + * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This just resets the number of slots for the ports VCPI for later programming. + */ +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return; + port->vcpi.num_slots = 0; + drm_dp_put_port(port); +} +EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); + +/** + * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI + * @mgr: manager for this port + * @port: unverified port to deallocate vcpi for + */ +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return; + + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + port->vcpi.num_slots = 0; + port->vcpi.pbn = 0; + port->vcpi.aligned_pbn = 0; + port->vcpi.vcpi = 0; + drm_dp_put_port(port); +} +EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, struct drm_dp_payload *payload) +{ + u8 payload_alloc[3], status; + int ret; + int retries = 0; + + drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, + DP_PAYLOAD_TABLE_UPDATED); + + payload_alloc[0] = id; + payload_alloc[1] = payload->start_slot; + payload_alloc[2] = payload->num_slots; + + ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); + if (ret != 3) { + DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); + goto fail; + } + +retry: + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { + retries++; + if (retries < 20) { + usleep_range(10000, 20000); + goto retry; + } + DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); + ret = -EINVAL; + goto fail; + } + ret = 0; +fail: + return ret; +} + + +/** + * drm_dp_check_act_status() - Check ACT handled status. + * @mgr: manager to use + * + * Check the payload status bits in the DPCD for ACT handled completion. + */ +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) +{ + u8 status; + int ret; + int count = 0; + + do { + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (status & DP_PAYLOAD_ACT_HANDLED) + break; + count++; + udelay(100); + + } while (count < 30); + + if (!(status & DP_PAYLOAD_ACT_HANDLED)) { + DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); + ret = -EINVAL; + goto fail; + } + return 0; +fail: + return ret; +} +EXPORT_SYMBOL(drm_dp_check_act_status); + +/** + * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. + * @clock: dot clock for the mode + * @bpp: bpp for the mode. + * + * This uses the formula in the spec to calculate the PBN value for a mode. + */ +int drm_dp_calc_pbn_mode(int clock, int bpp) +{ + fixed20_12 pix_bw; + fixed20_12 fbpp; + fixed20_12 result; + fixed20_12 margin, tmp; + u32 res; + + pix_bw.full = dfixed_const(clock); + fbpp.full = dfixed_const(bpp); + tmp.full = dfixed_const(8); + fbpp.full = dfixed_div(fbpp, tmp); + + result.full = dfixed_mul(pix_bw, fbpp); + margin.full = dfixed_const(54); + tmp.full = dfixed_const(64); + margin.full = dfixed_div(margin, tmp); + result.full = dfixed_div(result, margin); + + margin.full = dfixed_const(1006); + tmp.full = dfixed_const(1000); + margin.full = dfixed_div(margin, tmp); + result.full = dfixed_mul(result, margin); + + result.full = dfixed_div(result, tmp); + result.full = dfixed_ceil(result); + res = dfixed_trunc(result); + return res; +} +EXPORT_SYMBOL(drm_dp_calc_pbn_mode); + +static int test_calc_pbn_mode(void) +{ + int ret; + ret = drm_dp_calc_pbn_mode(154000, 30); + if (ret != 689) + return -EINVAL; + ret = drm_dp_calc_pbn_mode(234000, 30); + if (ret != 1047) + return -EINVAL; + return 0; +} + +/* we want to kick the TX after we've ack the up/down IRQs. */ +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) +{ + queue_work(system_long_wq, &mgr->tx_work); +} + +static void drm_dp_mst_dump_mstb(struct seq_file *m, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + int tabs = mstb->lct; + char prefix[10]; + int i; + + for (i = 0; i < tabs; i++) + prefix[i] = '\t'; + prefix[i] = '\0'; + + seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); + list_for_each_entry(port, &mstb->ports, next) { + seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector); + if (port->mstb) + drm_dp_mst_dump_mstb(m, port->mstb); + } +} + +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf) +{ + int ret; + int i; + for (i = 0; i < 4; i++) { + ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); + if (ret != 16) + break; + } + if (i == 4) + return true; + return false; +} + +/** + * drm_dp_mst_dump_topology(): dump topology to seq file. + * @m: seq_file to dump output to + * @mgr: manager to dump current topology for. + * + * helper to dump MST topology to a seq file for debugfs. + */ +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr) +{ + int i; + struct drm_dp_mst_port *port; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + drm_dp_mst_dump_mstb(m, mgr->mst_primary); + + /* dump VCPIs */ + mutex_unlock(&mgr->lock); + + mutex_lock(&mgr->payload_lock); + seq_printf(m, "vcpi: %lx\n", mgr->payload_mask); + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->proposed_vcpis[i]) { + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots); + } else + seq_printf(m, "vcpi %d:unsed\n", i); + } + for (i = 0; i < mgr->max_payloads; i++) { + seq_printf(m, "payload %d: %d, %d, %d\n", + i, + mgr->payloads[i].payload_state, + mgr->payloads[i].start_slot, + mgr->payloads[i].num_slots); + + + } + mutex_unlock(&mgr->payload_lock); + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + u8 buf[64]; + bool bret; + int ret; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); + seq_printf(m, "dpcd: "); + for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); + seq_printf(m, "faux/mst: "); + for (i = 0; i < 2; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); + seq_printf(m, "mst ctrl: "); + for (i = 0; i < 1; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + + bret = dump_dp_payload_table(mgr, buf); + if (bret == true) { + seq_printf(m, "payload table: "); + for (i = 0; i < 63; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + } + + } + + mutex_unlock(&mgr->lock); + +} +EXPORT_SYMBOL(drm_dp_mst_dump_topology); + +static void drm_dp_tx_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); + + mutex_lock(&mgr->qlock); + if (mgr->tx_down_in_progress) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +/** + * drm_dp_mst_topology_mgr_init - initialise a topology manager + * @mgr: manager struct to initialise + * @dev: device providing this structure - for i2c addition. + * @aux: DP helper aux channel to talk to this device + * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit + * @max_payloads: maximum number of payloads this GPU can source + * @conn_base_id: the connector object ID the MST device is connected to. + * + * Return 0 for success, or negative error code on failure + */ +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id) +{ + mutex_init(&mgr->lock); + mutex_init(&mgr->qlock); + mutex_init(&mgr->payload_lock); + INIT_LIST_HEAD(&mgr->tx_msg_upq); + INIT_LIST_HEAD(&mgr->tx_msg_downq); + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); + INIT_WORK(&mgr->tx_work, drm_dp_tx_work); + init_waitqueue_head(&mgr->tx_waitq); + mgr->dev = dev; + mgr->aux = aux; + mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; + mgr->max_payloads = max_payloads; + mgr->conn_base_id = conn_base_id; + mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); + if (!mgr->payloads) + return -ENOMEM; + mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); + if (!mgr->proposed_vcpis) + return -ENOMEM; + set_bit(0, &mgr->payload_mask); + test_calc_pbn_mode(); + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); + +/** + * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. + * @mgr: manager to destroy + */ +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->payload_lock); + kfree(mgr->payloads); + mgr->payloads = NULL; + kfree(mgr->proposed_vcpis); + mgr->proposed_vcpis = NULL; + mutex_unlock(&mgr->payload_lock); + mgr->dev = NULL; + mgr->aux = NULL; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); + +/* I2C device */ +static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + unsigned int i; + bool reading = false; + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_sideband_msg_tx *txmsg = NULL; + int ret; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EREMOTEIO; + + /* construct i2c msg */ + /* see if last msg is a read */ + if (msgs[num - 1].flags & I2C_M_RD) + reading = true; + + if (!reading) { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + goto out; + } + + msg.req_type = DP_REMOTE_I2C_READ; + msg.u.i2c_read.num_transactions = num - 1; + msg.u.i2c_read.port_number = port->port_num; + for (i = 0; i < num - 1; i++) { + msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; + msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; + msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; + } + msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; + msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto out; + } + + txmsg->dst = mstb; + drm_dp_encode_sideband_req(&msg, txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + + if (txmsg->reply.reply_type == 1) { /* got a NAK back */ + ret = -EREMOTEIO; + goto out; + } + if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { + ret = -EIO; + goto out; + } + memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); + ret = num; + } +out: + kfree(txmsg); + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm drm_dp_mst_i2c_algo = { + .functionality = drm_dp_mst_i2c_functionality, + .master_xfer = drm_dp_mst_i2c_xfer, +}; + +/** + * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) +{ + aux->ddc.algo = &drm_dp_mst_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + aux->ddc.dev.of_node = aux->dev->of_node; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + return i2c_add_adapter(&aux->ddc); +} + +/** + * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter + * @aux: DisplayPort AUX channel + */ +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) +{ + i2c_del_adapter(&aux->ddc); +} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8218078b6133..0cc182745e31 100755..100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -233,7 +233,7 @@ module_exit(drm_core_exit); /** * Copy and IOCTL return string to user space */ -static int drm_copy_field(char *buf, size_t *buf_len, const char *value) +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) { int len; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfa9769b26b5..087d6080bc1d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder && connector->eld[0]) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index f27c883be391..cc0ae047ed3b 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -327,7 +327,7 @@ err_drm_gem_cma_free_object: return ret; } -static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { +static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { .fb_probe = drm_fbdev_cma_create, }; @@ -354,9 +354,10 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs; helper = &fbdev_cma->fb_helper; + drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); + ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); if (ret < 0) { dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d5d8cea1a679..3144db9dc0f1 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -49,10 +49,11 @@ static LIST_HEAD(kernel_fb_helper_list); * helper functions used by many drivers to implement the kernel mode setting * interfaces. * - * Initialization is done as a three-step process with drm_fb_helper_init(), - * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config(). - * Drivers with fancier requirements than the default behaviour can override the - * second step with their own code. Teardown is done with drm_fb_helper_fini(). + * Initialization is done as a four-step process with drm_fb_helper_prepare(), + * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and + * drm_fb_helper_initial_config(). Drivers with fancier requirements than the + * default behaviour can override the third step with their own code. + * Teardown is done with drm_fb_helper_fini(). * * At runtime drivers should restore the fbdev console by calling * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They @@ -63,6 +64,19 @@ static LIST_HEAD(kernel_fb_helper_list); * * All other functions exported by the fb helper library can be used to * implement the fbdev driver interface by the driver. + * + * It is possible, though perhaps somewhat tricky, to implement race-free + * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() + * helper must be called first to initialize the minimum required to make + * hotplug detection work. Drivers also need to make sure to properly set up + * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init() + * it is safe to enable interrupts and start processing hotplug events. At the + * same time, drivers should initialize all modeset objects such as CRTCs, + * encoders and connectors. To finish up the fbdev helper initialization, the + * drm_fb_helper_init() function is called. To probe for all attached displays + * and set up an initial configuration using the detected hardware, drivers + * should call drm_fb_helper_single_add_all_connectors() followed by + * drm_fb_helper_initial_config(). */ /** @@ -105,6 +119,58 @@ fail: } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector) +{ + struct drm_fb_helper_connector **temp; + struct drm_fb_helper_connector *fb_helper_connector; + + WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) { + temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1; + fb_helper->connector_info = temp; + } + + + fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); + if (!fb_helper_connector) + return -ENOMEM; + + fb_helper_connector->connector = connector; + fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_add_one_connector); + +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + struct drm_fb_helper_connector *fb_helper_connector; + int i, j; + + WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + + for (i = 0; i < fb_helper->connector_count; i++) { + if (fb_helper->connector_info[i]->connector == connector) + break; + } + + if (i == fb_helper->connector_count) + return -EINVAL; + fb_helper_connector = fb_helper->connector_info[i]; + + for (j = i + 1; j < fb_helper->connector_count; j++) { + fb_helper->connector_info[j - 1] = fb_helper->connector_info[j]; + } + fb_helper->connector_count--; + kfree(fb_helper_connector); + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); + static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { struct drm_fb_helper_connector *fb_helper_conn; @@ -199,9 +265,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info) struct drm_crtc_helper_funcs *funcs; int i; - if (list_empty(&kernel_fb_helper_list)) - return false; - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = @@ -531,6 +594,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) } /** + * drm_fb_helper_prepare - setup a drm_fb_helper structure + * @dev: DRM device + * @helper: driver-allocated fbdev helper structure to set up + * @funcs: pointer to structure of functions associate with this helper + * + * Sets up the bare minimum to make the framebuffer helper usable. This is + * useful to implement race-free initialization of the polling helpers. + */ +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ + INIT_LIST_HEAD(&helper->kernel_fb_list); + helper->funcs = funcs; + helper->dev = dev; +} +EXPORT_SYMBOL(drm_fb_helper_prepare); + +/** * drm_fb_helper_init - initialize a drm_fb_helper structure * @dev: drm device * @fb_helper: driver-allocated fbdev helper structure to initialize @@ -542,8 +623,7 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) * nor register the fbdev. This is only done in drm_fb_helper_initial_config() * to allow driver writes more control over the exact init sequence. * - * Drivers must set fb_helper->funcs before calling - * drm_fb_helper_initial_config(). + * Drivers must call drm_fb_helper_prepare() before calling this function. * * RETURNS: * Zero if everything went ok, nonzero otherwise. @@ -558,10 +638,6 @@ int drm_fb_helper_init(struct drm_device *dev, if (!max_conn_count) return -EINVAL; - fb_helper->dev = dev; - - INIT_LIST_HEAD(&fb_helper->kernel_fb_list); - fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); if (!fb_helper->crtc_info) return -ENOMEM; @@ -572,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev, kfree(fb_helper->crtc_info); return -ENOMEM; } + fb_helper->connector_info_alloc_count = dev->mode_config.num_connector; fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { @@ -1056,7 +1133,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; - info->fix.type_aux = 0; info->fix.line_length = pitch; return; @@ -1613,8 +1689,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); * either the output polling work or a work item launched from the driver's * hotplug interrupt). * - * Note that the driver must ensure that this is only called _after_ the fb has - * been fully set up, i.e. after the call to drm_fb_helper_initial_config. + * Note that drivers may call this even before calling + * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows + * for a race-free fbcon setup and will make sure that the fbdev emulation will + * not miss any hotplug events. * * RETURNS: * 0 on success and a non-zero error code otherwise. @@ -1624,11 +1702,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; u32 max_width, max_height; - if (!fb_helper->fb) - return 0; - mutex_lock(&fb_helper->dev->mode_config.mutex); - if (!drm_fb_helper_is_bound(fb_helper)) { + if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; mutex_unlock(&fb_helper->dev->mode_config.mutex); return 0; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f7d71190aad5..6adee4c2afc0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset); * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem * @obj: obj in question - * @gfpmask: gfp mask of requested pages + * + * This reads the page-array of the shmem-backing storage of the given gem + * object. An array of pages is returned. If a page is not allocated or + * swapped-out, this will allocate/swap-in the required pages. Note that the + * whole object is covered by the page-array and pinned in memory. + * + * Use drm_gem_put_pages() to release the array and unpin all pages. + * + * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). + * If you require other GFP-masks, you have to do those allocations yourself. + * + * Note that you are not allowed to change gfp-zones during runtime. That is, + * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as + * set during initialization. If you have special zone constraints, set them + * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care + * to keep pages in the required zone during swap-in. */ -struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +struct page **drm_gem_get_pages(struct drm_gem_object *obj) { - struct inode *inode; struct address_space *mapping; struct page *p, **pages; int i, npages; /* This is the shared memory object that backs the GEM resource */ - inode = file_inode(obj->filp); - mapping = inode->i_mapping; + mapping = file_inode(obj->filp)->i_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) if (pages == NULL) return ERR_PTR(-ENOMEM); - gfpmask |= mapping_gfp_mask(mapping); - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + p = shmem_read_mapping_page(mapping, i); if (IS_ERR(p)) goto fail; pages[i] = p; @@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * so shmem can relocate pages during swapin if required. */ - BUG_ON((gfpmask & __GFP_DMA32) && + BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) && (page_to_pfn(p) >= 0x00100000UL)); } diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 05c97c5350a1..e467e67af6e7 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, size); if (IS_ERR(cma_obj)) - return ERR_PTR(PTR_ERR(cma_obj)); + return ERR_CAST(cma_obj); cma_obj->paddr = sg_dma_address(sgt->sgl); cma_obj->sgt = sgt; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 69c61f392e66..ad66f961170e 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -342,8 +342,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) file_priv->stereo_allowed = req->value; break; case DRM_CLIENT_CAP_UNIVERSAL_PLANES: - if (!drm_universal_planes) - return -EINVAL; if (req->value > 1) return -EINVAL; file_priv->universal_planes = req->value; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 6d133149cc74..827ec1a3040b 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, } /* possible_crtc's will be filled in later by crtc_init */ - ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs, - formats, num_formats, - DRM_PLANE_TYPE_PRIMARY); + ret = drm_universal_plane_init(dev, primary, 0, + &drm_primary_helper_funcs, + formats, num_formats, + DRM_PLANE_TYPE_PRIMARY); if (ret) { kfree(primary); primary = NULL; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d22676b89cbb..db7d250f7ac7 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -130,7 +130,14 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect count = drm_load_edid_firmware(connector); if (count == 0) #endif - count = (*connector_funcs->get_modes)(connector); + { + if (connector->override_edid) { + struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; + + count = drm_add_edid_modes(connector, edid); + } else + count = (*connector_funcs->get_modes)(connector); + } if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 14d16464000a..233ea208c9fe 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -37,18 +37,9 @@ unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); -unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */ -EXPORT_SYMBOL(drm_rnodes); - -/* 1 to allow user space to request universal planes (experimental) */ -unsigned int drm_universal_planes = 0; -EXPORT_SYMBOL(drm_universal_planes); - unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ -EXPORT_SYMBOL(drm_vblank_offdelay); unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ -EXPORT_SYMBOL(drm_timestamp_precision); /* * Default to use monotonic timestamps for wait-for-vblank and page-flip @@ -60,14 +51,11 @@ MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); -MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); -module_param_named(rnodes, drm_rnodes, int, 0600); -module_param_named(universal_planes, drm_universal_planes, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); @@ -588,7 +576,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, goto err_minors; } - if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { + if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) goto err_minors; diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 369b26278e76..7827dad8fcf4 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -438,7 +438,6 @@ err_out_files: out: return ret; } -EXPORT_SYMBOL(drm_sysfs_connector_add); /** * drm_sysfs_connector_remove - remove an connector device from sysfs @@ -468,7 +467,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) device_unregister(connector->kdev); connector->kdev = NULL; } -EXPORT_SYMBOL(drm_sysfs_connector_remove); /** * drm_sysfs_hotplug_event - generate a DRM uevent diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index a8ffc8c1477b..86dc69d9eabb 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1018,7 +1018,7 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 9a16dbe121d1..ba9b3d5ed672 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder( struct drm_device *dev = connector->dev; struct exynos_drm_connector *exynos_connector = to_exynos_connector(connector); - struct drm_mode_object *obj; - struct drm_encoder *encoder; - - obj = drm_mode_object_find(dev, exynos_connector->encoder_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) { - DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", - exynos_connector->encoder_id); - return NULL; - } - - encoder = obj_to_encoder(obj); - - return encoder; + return drm_encoder_find(dev, exynos_connector->encoder_id); } static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { @@ -185,7 +172,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector) struct exynos_drm_connector *exynos_connector = to_exynos_connector(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(exynos_connector); } @@ -230,7 +217,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, drm_connector_init(dev, connector, &exynos_connector_funcs, type); drm_connector_helper_add(connector, &exynos_connector_helper_funcs); - err = drm_sysfs_connector_add(connector); + err = drm_connector_register(connector); if (err) goto err_connector; @@ -250,7 +237,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, return connector; err_sysfs: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); err_connector: drm_connector_cleanup(connector); kfree(exynos_connector); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 9e530f205ad2..3aa1c7ebbfcc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -48,7 +48,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force) static void exynos_dpi_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -117,7 +117,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index ab7d182063c3..d82e3cb8a70d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -39,8 +39,6 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -#define VBLANK_OFF_DELAY 50000 - static struct platform_device *exynos_drm_pdev; static DEFINE_MUTEX(drm_component_lock); @@ -103,8 +101,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) /* setup possible_clones. */ exynos_drm_encoder_setup(dev); - drm_vblank_offdelay = VBLANK_OFF_DELAY; - platform_set_drvdata(dev->platformdev, dev); /* Try to bind all sub drivers. */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 06cde4506278..02f3b3dcb9f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -40,8 +40,6 @@ struct drm_device; struct exynos_drm_overlay; struct drm_connector; -extern unsigned int drm_vblank_offdelay; - /* This enumerates device type. */ enum exynos_drm_device_type { EXYNOS_DEVICE_TYPE_NONE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 6302aa64f6c1..2df3592166de 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1246,7 +1246,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index d771b467cf0c..32e63f60e1d1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -225,7 +225,7 @@ out: return ret; } -static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { +static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { .fb_probe = exynos_drm_fbdev_create, }; @@ -266,7 +266,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev) return -ENOMEM; private->fb_helper = helper = &fbdev->drm_fb_helper; - helper->funcs = &exynos_drm_fb_helper_funcs; + + drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs); num_crtc = dev->mode_config.num_crtc; diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 2fb8705d6461..9528d81d8004 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -562,7 +562,7 @@ static int vidi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &vidi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index aa259b0a873a..81df11d57673 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1129,7 +1129,7 @@ static int hdmi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index c18268cd516e..248c33a35ebf 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -192,7 +192,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector) struct gma_encoder *gma_encoder = gma_attached_encoder(connector); psb_intel_i2c_destroy(gma_encoder->ddc_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -304,7 +304,7 @@ void cdv_intel_crt_init(struct drm_device *dev, drm_connector_helper_add(connector, &cdv_intel_crt_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_ddc: diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 9ff30c2efadb..a4cc0e60a1be 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1713,7 +1713,7 @@ cdv_intel_dp_destroy(struct drm_connector *connector) } } i2c_del_adapter(&intel_dp->adapter); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -1847,7 +1847,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); /* Set up the DDC bus. */ switch (output_reg) { diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index b99084b3f706..4268bf210034 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -248,7 +248,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) if (gma_encoder->i2c_bus) psb_intel_i2c_destroy(gma_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -356,7 +356,7 @@ void cdv_hdmi_init(struct drm_device *dev, hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter); hdmi_priv->dev = dev; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_ddc: diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 8ecc920fc26d..0b770396548c 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -446,7 +446,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector) if (gma_encoder->i2c_bus) psb_intel_i2c_destroy(gma_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -774,7 +774,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index e7fcc148f333..d0dd3bea8aa5 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -561,7 +561,7 @@ static int psbfb_probe(struct drm_fb_helper *helper, return psbfb_create(psb_fbdev, sizes); } -static struct drm_fb_helper_funcs psb_fb_helper_funcs = { +static const struct drm_fb_helper_funcs psb_fb_helper_funcs = { .gamma_set = psbfb_gamma_set, .gamma_get = psbfb_gamma_get, .fb_probe = psbfb_probe, @@ -600,7 +600,8 @@ int psb_fbdev_init(struct drm_device *dev) } dev_priv->fbdev = fbdev; - fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs; + + drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs); drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs, INTELFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 592d205a0089..ce015db59dc6 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) WARN_ON(gt->pages); - pages = drm_gem_get_pages(>->gem, 0); + pages = drm_gem_get_pages(>->gem); if (IS_ERR(pages)) return PTR_ERR(pages); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 6e91b20ce2e5..abf2248da61e 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -318,7 +318,7 @@ static void mdfld_dsi_connector_destroy(struct drm_connector *connector) if (!dsi_connector) return; - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); sender = dsi_connector->pkg_sender; mdfld_dsi_pkg_sender_destroy(sender); @@ -597,7 +597,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, dsi_config->encoder = encoder; encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; /*TODO: add code to destroy outputs on error*/ diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index cf018ddcc5a6..e6f5c620a0a2 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -665,7 +665,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); dev_info(dev->dev, "HDMI initialised.\n"); return; diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 9b099468a5db..0d39da6e8b7a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -404,7 +404,7 @@ void oaktrail_lvds_init(struct drm_device *dev, out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index d7778d0472c1..88aad95bde09 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -563,7 +563,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector) if (lvds_priv->ddc_bus) psb_intel_i2c_destroy(lvds_priv->ddc_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -829,7 +829,7 @@ void psb_intel_lvds_init(struct drm_device *dev, */ out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index deeb0829b129..0be96fdb5e28 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1682,7 +1682,7 @@ static void psb_intel_sdvo_destroy(struct drm_connector *connector) psb_intel_sdvo_connector->tv_format); psb_intel_sdvo_destroy_enhance_property(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -2071,7 +2071,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector, connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; gma_connector_attach_encoder(&connector->base, &encoder->base); - drm_sysfs_connector_add(&connector->base.base); + drm_connector_register(&connector->base.base); } static void diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 240c331405b9..a8f1e03bd1f5 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -810,6 +810,12 @@ static int tda998x_encoder_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { + if (mode->clock > 150000) + return MODE_CLOCK_HIGH; + if (mode->htotal >= BIT(13)) + return MODE_BAD_HVALUE; + if (mode->vtotal >= BIT(11)) + return MODE_BAD_VVALUE; return MODE_OK; } @@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) return i; } } else { - for (i = 10; i > 0; i--) { - msleep(10); + for (i = 100; i > 0; i--) { + msleep(1); ret = reg_read(priv, REG_INT_FLAGS_2); if (ret < 0) return ret; @@ -1183,7 +1189,6 @@ static void tda998x_encoder_destroy(struct drm_encoder *encoder) { struct tda998x_priv *priv = to_tda998x_priv(encoder); - drm_i2c_encoder_destroy(encoder); /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); @@ -1191,8 +1196,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) if (priv->hdmi->irq) free_irq(priv->hdmi->irq, priv); - if (priv->cec) - i2c_unregister_device(priv->cec); + i2c_unregister_device(priv->cec); + drm_i2c_encoder_destroy(encoder); kfree(priv); } diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index cad1683d8bb5..91bd167e1cb7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \ intel_crt.o \ intel_ddi.o \ intel_dp.o \ + intel_dp_mst.o \ intel_dsi_cmd.o \ intel_dsi.o \ intel_dsi_pll.o \ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9d7954366bd2..dea99d92fb4a 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = { GEN7_SO_WRITE_OFFSET(1), GEN7_SO_WRITE_OFFSET(2), GEN7_SO_WRITE_OFFSET(3), + GEN7_L3SQCREG1, + GEN7_L3CNTLREG2, + GEN7_L3CNTLREG3, }; static const u32 gen7_blt_regs[] = { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b8c689202c40..e6ff0ecf161c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) } if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); + if (obj->frontbuffer_bits) + seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } static void describe_ctx(struct seq_file *m, struct intel_context *ctx) { - seq_putc(m, ctx->is_initialized ? 'I' : 'i'); + seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, ' '); } @@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; unsigned long flags; struct intel_crtc *crtc; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for_each_intel_crtc(dev, crtc) { const char pipe = pipe_name(crtc->pipe); @@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) spin_unlock_irqrestore(&dev->event_lock, flags); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, i915_next_seqno_get, i915_next_seqno_set, "0x%llx\n"); -static int i915_rstdby_delays(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u16 crstanddelay; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - crstanddelay = I915_READ16(CRSTANDVID); - - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); - - return 0; -} - static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) MEMSTAT_VID_SHIFT); seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || + IS_BROADWELL(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); @@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) reqf = I915_READ(GEN6_RPNSWREQ); reqf &= ~GEN6_TURBO_DISABLE; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) reqf >>= 24; else reqf >>= 25; @@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; else cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; @@ -1148,61 +1135,6 @@ out: return ret; } -static int i915_delayfreq_table(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 delayfreq; - int ret, i; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - for (i = 0; i < 16; i++) { - delayfreq = I915_READ(PXVFREQ_BASE + i * 4); - seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, - (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); - } - - intel_runtime_pm_put(dev_priv); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static inline int MAP_TO_MV(int map) -{ - return 1250 - (map * 25); -} - -static int i915_inttoext_table(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 inttoext; - int ret, i; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - for (i = 1; i <= 32; i++) { - inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); - seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); - } - - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; @@ -1513,10 +1445,17 @@ static int i915_ips_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) - seq_puts(m, "enabled\n"); - else - seq_puts(m, "disabled\n"); + seq_printf(m, "Enabled by kernel parameter: %s\n", + yesno(i915.enable_ips)); + + if (INTEL_INFO(dev)->gen >= 8) { + seq_puts(m, "Currently: unknown\n"); + } else { + if (I915_READ(IPS_CTL) & IPS_ENABLE) + seq_puts(m, "Currently: enabled\n"); + else + seq_puts(m, "Currently: disabled\n"); + } intel_runtime_pm_put(dev_priv); @@ -1620,26 +1559,6 @@ out: return ret; } -static int i915_gfxec(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); - intel_runtime_pm_put(dev_priv); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static int i915_opregion(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1677,9 +1596,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) #ifdef CONFIG_DRM_I915_FBDEV struct drm_i915_private *dev_priv = dev->dev_private; - int ret = mutex_lock_interruptible(&dev->mode_config.mutex); - if (ret) - return ret; ifbdev = dev_priv->fbdev; fb = to_intel_framebuffer(ifbdev->helper.fb); @@ -1692,7 +1608,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); seq_putc(m, '\n'); - mutex_unlock(&dev->mode_config.mutex); #endif mutex_lock(&dev->mode_config.fb_lock); @@ -1723,7 +1638,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct intel_context *ctx; int ret, i; - ret = mutex_lock_interruptible(&dev->mode_config.mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1740,7 +1655,7 @@ static int i915_context_status(struct seq_file *m, void *unused) } list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (ctx->obj == NULL) + if (ctx->legacy_hw_ctx.rcs_state == NULL) continue; seq_puts(m, "HW context "); @@ -1749,11 +1664,11 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ring->default_context == ctx) seq_printf(m, "(default context %s) ", ring->name); - describe_obj(m, ctx->obj); + describe_obj(m, ctx->legacy_hw_ctx.rcs_state); seq_putc(m, '\n'); } - mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1863,7 +1778,7 @@ static int per_file_ctx(int id, void *ptr, void *data) if (i915_gem_context_is_default(ctx)) seq_puts(m, " default context:\n"); else - seq_printf(m, " context %d:\n", ctx->id); + seq_printf(m, " context %d:\n", ctx->user_handle); ppgtt->debug_dump(ppgtt, m); return 0; @@ -1978,10 +1893,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); + seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); enabled = HAS_PSR(dev) && I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; - seq_printf(m, "Enabled: %s\n", yesno(enabled)); + seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); if (HAS_PSR(dev)) psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & @@ -2126,6 +2043,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "VGA"; case POWER_DOMAIN_AUDIO: return "AUDIO"; + case POWER_DOMAIN_PLLS: + return "PLLS"; case POWER_DOMAIN_INIT: return "INIT"; default: @@ -2223,9 +2142,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder; - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", - crtc->primary->fb->base.id, crtc->x, crtc->y, - crtc->primary->fb->width, crtc->primary->fb->height); + if (crtc->primary->fb) + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + crtc->primary->fb->base.id, crtc->x, crtc->y, + crtc->primary->fb->width, crtc->primary->fb->height); + else + seq_puts(m, "\tprimary plane disabled\n"); for_each_encoder_on_crtc(dev, crtc, intel_encoder) intel_encoder_info(m, intel_crtc, intel_encoder); } @@ -2287,13 +2209,15 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); } - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - intel_dp_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_HDMI) - intel_hdmi_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_LVDS) - intel_lvds_info(m, intel_connector); + if (intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_EDP) + intel_dp_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_HDMI) + intel_hdmi_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + intel_lvds_info(m, intel_connector); + } seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) @@ -2347,17 +2271,17 @@ static int i915_display_info(struct seq_file *m, void *unused) bool active; int x, y; - seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", + seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", crtc->base.base.id, pipe_name(crtc->pipe), - yesno(crtc->active)); + yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h); if (crtc->active) { intel_crtc_info(m, crtc); active = cursor_position(dev, crtc->pipe, &x, &y); - seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", + seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", yesno(crtc->cursor_base), - x, y, crtc->cursor_addr, - yesno(active)); + x, y, crtc->cursor_width, crtc->cursor_height, + crtc->cursor_addr, yesno(active)); } seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", @@ -2377,12 +2301,132 @@ static int i915_display_info(struct seq_file *m, void *unused) return 0; } +static int i915_semaphore_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + int i, j, ret; + + if (!i915_semaphore_is_enabled(dev)) { + seq_puts(m, "Semaphores are disabled\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + intel_runtime_pm_get(dev_priv); + + if (IS_BROADWELL(dev)) { + struct page *page; + uint64_t *seqno; + + page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); + + seqno = (uint64_t *)kmap_atomic(page); + for_each_ring(ring, dev_priv, i) { + uint64_t offset; + + seq_printf(m, "%s\n", ring->name); + + seq_puts(m, " Last signal:"); + for (j = 0; j < num_rings; j++) { + offset = i * I915_NUM_RINGS + j; + seq_printf(m, "0x%08llx (0x%02llx) ", + seqno[offset], offset * 8); + } + seq_putc(m, '\n'); + + seq_puts(m, " Last wait: "); + for (j = 0; j < num_rings; j++) { + offset = i + (j * I915_NUM_RINGS); + seq_printf(m, "0x%08llx (0x%02llx) ", + seqno[offset], offset * 8); + } + seq_putc(m, '\n'); + + } + kunmap_atomic(seqno); + } else { + seq_puts(m, " Last signal:"); + for_each_ring(ring, dev_priv, i) + for (j = 0; j < num_rings; j++) + seq_printf(m, "0x%08x\n", + I915_READ(ring->semaphore.mbox.signal[j])); + seq_putc(m, '\n'); + } + + seq_puts(m, "\nSync seqno:\n"); + for_each_ring(ring, dev_priv, i) { + for (j = 0; j < num_rings; j++) { + seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); + } + seq_putc(m, '\n'); + } + seq_putc(m, '\n'); + + intel_runtime_pm_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +static int i915_shared_dplls_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + drm_modeset_lock_all(dev); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + + seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); + seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, + pll->active, yesno(pll->on)); + seq_printf(m, " tracked hardware state:\n"); + seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); + seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); + seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); + seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); + seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); + } + drm_modeset_unlock_all(dev); + + return 0; +} + struct pipe_crc_info { const char *name; struct drm_device *dev; enum pipe pipe; }; +static int i915_dp_mst_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + drm_modeset_lock_all(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + intel_encoder = to_intel_encoder(encoder); + if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) + continue; + intel_dig_port = enc_to_dig_port(encoder); + if (!intel_dig_port->dp.can_mst) + continue; + + drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); + } + drm_modeset_unlock_all(dev); + return 0; +} + static int i915_pipe_crc_open(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; @@ -2849,7 +2893,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, +static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); + + drm_modeset_lock_all(dev); + /* + * If we use the eDP transcoder we need to make sure that we don't + * bypass the pfit, since otherwise the pipe CRC source won't work. Only + * relevant on hsw with pipe A when using the always-on power well + * routing. + */ + if (crtc->config.cpu_transcoder == TRANSCODER_EDP && + !crtc->config.pch_pfit.enabled) { + crtc->config.pch_pfit.force_thru = true; + + intel_display_power_get(dev_priv, + POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); + + dev_priv->display.crtc_disable(&crtc->base); + dev_priv->display.crtc_enable(&crtc->base); + } + drm_modeset_unlock_all(dev); +} + +static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); + + drm_modeset_lock_all(dev); + /* + * If we use the eDP transcoder we need to make sure that we don't + * bypass the pfit, since otherwise the pipe CRC source won't work. Only + * relevant on hsw with pipe A when using the always-on power well + * routing. + */ + if (crtc->config.pch_pfit.force_thru) { + crtc->config.pch_pfit.force_thru = false; + + dev_priv->display.crtc_disable(&crtc->base); + dev_priv->display.crtc_enable(&crtc->base); + + intel_display_power_put(dev_priv, + POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); + } + drm_modeset_unlock_all(dev); +} + +static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, + enum pipe pipe, + enum intel_pipe_crc_source *source, uint32_t *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) @@ -2863,6 +2960,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; case INTEL_PIPE_CRC_SOURCE_PF: + if (IS_HASWELL(dev) && pipe == PIPE_A) + hsw_trans_edp_pipe_A_crc_wa(dev); + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; case INTEL_PIPE_CRC_SOURCE_NONE: @@ -2895,11 +2995,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, else if (INTEL_INFO(dev)->gen < 5) ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); else if (IS_VALLEYVIEW(dev)) - ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); + ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); else if (IS_GEN5(dev) || IS_GEN6(dev)) ret = ilk_pipe_crc_ctl_reg(&source, &val); else - ret = ivb_pipe_crc_ctl_reg(&source, &val); + ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); if (ret != 0) return ret; @@ -2929,11 +3029,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, /* real source -> none transition */ if (source == INTEL_PIPE_CRC_SOURCE_NONE) { struct intel_pipe_crc_entry *entries; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", pipe_name(pipe)); - intel_wait_for_vblank(dev, pipe); + drm_modeset_lock(&crtc->base.mutex, NULL); + if (crtc->active) + intel_wait_for_vblank(dev, pipe); + drm_modeset_unlock(&crtc->base.mutex); spin_lock_irq(&pipe_crc->lock); entries = pipe_crc->entries; @@ -2946,6 +3051,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, g4x_undo_pipe_scramble_reset(dev, pipe); else if (IS_VALLEYVIEW(dev)) vlv_undo_pipe_scramble_reset(dev, pipe); + else if (IS_HASWELL(dev) && pipe == PIPE_A) + hsw_undo_trans_edp_pipe_A_crc_wa(dev); } return 0; @@ -3506,7 +3613,7 @@ i915_max_freq_get(void *data, u64 *val) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3532,7 +3639,7 @@ i915_max_freq_set(void *data, u64 val) u32 rp_state_cap, hw_max, hw_min; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3587,7 +3694,7 @@ i915_min_freq_get(void *data, u64 *val) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3613,7 +3720,7 @@ i915_min_freq_set(void *data, u64 val) u32 rp_state_cap, hw_max, hw_min; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3799,14 +3906,10 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, - {"i915_rstdby_delays", i915_rstdby_delays, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_delayfreq_table", i915_delayfreq_table, 0}, - {"i915_inttoext_table", i915_inttoext_table, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, - {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, @@ -3823,6 +3926,9 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_pc8_status", i915_pc8_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, + {"i915_semaphore_status", i915_semaphore_status, 0}, + {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, + {"i915_dp_mst_info", i915_dp_mst_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6c656392d67d..d335c46ec6bc 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev) I915_WRITE(HWS_PGA, 0x1ffff000); } -void i915_kernel_lost_context(struct drm_device * dev) +void i915_kernel_lost_context(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; @@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev) master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } -static int i915_dma_cleanup(struct drm_device * dev) +static int i915_dma_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; @@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev) return 0; } -static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) +static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) return 0; } -static int i915_dma_resume(struct drm_device * dev) +static int i915_dma_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = LP_RING(dev_priv); @@ -359,7 +359,7 @@ static int validate_cmd(int cmd) return 0; } -static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) +static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords) { struct drm_i915_private *dev_priv = dev->dev_private; int i, ret; @@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) for (i = 0; i < dwords;) { int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) return -EINVAL; i += sz; @@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev) } } -static int i915_dispatch_cmdbuffer(struct drm_device * dev, +static int i915_dispatch_cmdbuffer(struct drm_device *dev, drm_i915_cmdbuffer_t *cmd, struct drm_clip_rect *cliprects, void *cmdbuf) @@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, return 0; } -static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch, +static int i915_dispatch_batchbuffer(struct drm_device *dev, + drm_i915_batchbuffer_t *batch, struct drm_clip_rect *cliprects) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, return 0; } -static int i915_dispatch_flip(struct drm_device * dev) +static int i915_dispatch_flip(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = @@ -755,7 +756,7 @@ fail_batch_free: return ret; } -static int i915_emit_irq(struct drm_device * dev) +static int i915_emit_irq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev) return dev_priv->dri1.counter; } -static int i915_wait_irq(struct drm_device * dev, int irq_nr) +static int i915_wait_irq(struct drm_device *dev, int irq_nr) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ { struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { pr_info("switched on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -1375,9 +1377,6 @@ static int i915_load_modeset_init(struct drm_device *dev) */ intel_fbdev_initial_config(dev); - /* Only enable hotplug handling once the fbdev is fully set up. */ - dev_priv->enable_hotplug_processing = true; - drm_kms_helper_poll_init(dev); return 0; @@ -1491,10 +1490,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) #define SEP_EMPTY #define PRINT_FLAG(name) info->name ? #name "," : "" #define SEP_COMMA , - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), info->gen, dev_priv->dev->pdev->device, + dev_priv->dev->pdev->revision, DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); #undef PRINT_S #undef SEP_EMPTY @@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (dev_priv == NULL) return -ENOMEM; - dev->dev_private = (void *)dev_priv; + dev->dev_private = dev_priv; dev_priv->dev = dev; /* copy initial configuration to dev_priv->info */ @@ -1605,6 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->modeset_restore_lock); @@ -1716,6 +1717,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_mtrrfree; } + dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->dp_wq == NULL) { + DRM_ERROR("Failed to create our dp workqueue.\n"); + ret = -ENOMEM; + goto out_freewq; + } + intel_irq_init(dev); intel_uncore_sanitize(dev); @@ -1791,6 +1799,8 @@ out_gem_unload: intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); + destroy_workqueue(dev_priv->dp_wq); +out_freewq: destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1891,6 +1901,7 @@ int i915_driver_unload(struct drm_device *dev) intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); @@ -1932,7 +1943,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ -void i915_driver_lastclose(struct drm_device * dev) +void i915_driver_lastclose(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1953,11 +1964,11 @@ void i915_driver_lastclose(struct drm_device * dev) i915_dma_cleanup(dev); } -void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) { mutex_lock(&dev->struct_mutex); - i915_gem_context_close(dev, file_priv); - i915_gem_release(dev, file_priv); + i915_gem_context_close(dev, file); + i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); } @@ -2030,7 +2041,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); * manage the gtt, we need to claim that all intel devices are agp. For * otherwise the drm core refuses to initialize the agp support code. */ -int i915_driver_device_is_agp(struct drm_device * dev) +int i915_driver_device_is_agp(struct drm_device *dev) { return 1; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 651e65e051c0..a361bb9bc243 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -28,6 +28,7 @@ */ #include <linux/device.h> +#include <linux/acpi.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -46,8 +47,6 @@ static struct drm_driver driver; PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ - .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \ - .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } #define GEN_CHV_PIPEOFFSETS \ @@ -55,10 +54,6 @@ static struct drm_driver driver; CHV_PIPE_C_OFFSET }, \ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ CHV_TRANSCODER_C_OFFSET, }, \ - .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \ - CHV_DPLL_C_OFFSET }, \ - .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \ - CHV_DPLL_C_MD_OFFSET }, \ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ CHV_PALETTE_C_OFFSET } @@ -482,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (i915.semaphores >= 0) return i915.semaphores; - /* Until we get further testing... */ - if (IS_GEN8(dev)) - return false; - #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) @@ -499,8 +490,7 @@ static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - - intel_runtime_pm_get(dev_priv); + pci_power_t opregion_target_state; /* ignore lid events during suspend */ mutex_lock(&dev_priv->modeset_restore_lock); @@ -526,10 +516,10 @@ static int i915_drm_freeze(struct drm_device *dev) return error; } - drm_irq_uninstall(dev); - dev_priv->enable_hotplug_processing = false; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + - intel_disable_gt_powersave(dev); + intel_suspend_gt_powersave(dev); /* * Disable CRTCs directly since we want to preserve sw state @@ -541,6 +531,9 @@ static int i915_drm_freeze(struct drm_device *dev) } drm_modeset_unlock_all(dev); + intel_dp_mst_suspend(dev); + intel_runtime_pm_disable_interrupts(dev); + intel_modeset_suspend_hw(dev); } @@ -548,8 +541,15 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); + opregion_target_state = PCI_D3cold; +#if IS_ENABLED(CONFIG_ACPI_SLEEP) + if (acpi_target_system_state() < ACPI_STATE_S3) + opregion_target_state = PCI_D1; +#endif + intel_opregion_notify_adapter(dev, opregion_target_state); + + intel_uncore_forcewake_reset(dev, false); intel_opregion_fini(dev); - intel_uncore_fini(dev); console_lock(); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); @@ -557,6 +557,8 @@ static int i915_drm_freeze(struct drm_device *dev) dev_priv->suspend_count++; + intel_display_set_init_power(dev_priv, false); + return 0; } @@ -606,7 +608,10 @@ static int i915_drm_thaw_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_early_sanitize(dev); + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + hsw_disable_pc8(dev_priv); + + intel_uncore_early_sanitize(dev, true); intel_uncore_sanitize(dev); intel_power_domains_init_hw(dev_priv); @@ -639,11 +644,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) } mutex_unlock(&dev->struct_mutex); - /* We need working interrupts for modeset enabling ... */ - drm_irq_install(dev, dev->pdev->irq); + intel_runtime_pm_restore_interrupts(dev); intel_modeset_init_hw(dev); + { + unsigned long irqflags; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); @@ -655,7 +668,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) * notifications. * */ intel_hpd_init(dev); - dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } @@ -678,7 +690,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); - intel_runtime_pm_put(dev_priv); + intel_opregion_notify_adapter(dev, PCI_D0); + return 0; } @@ -887,6 +900,7 @@ static int i915_pm_suspend_late(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_i915_private *dev_priv = drm_dev->dev_private; /* * We have a suspedn ordering issue with the snd-hda driver also @@ -900,6 +914,9 @@ static int i915_pm_suspend_late(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) + hsw_enable_pc8(dev_priv); + pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a47fbf60b781..7db16bee2997 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,7 +53,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20080730" +#define DRIVER_DATE "20140620" enum pipe { INVALID_PIPE = -1, @@ -129,6 +129,7 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO, + POWER_DOMAIN_PLLS, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -184,8 +185,10 @@ struct i915_mmu_object; enum intel_dpll_id { DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ /* real shared dpll ids must be >= 0 */ - DPLL_ID_PCH_PLL_A, - DPLL_ID_PCH_PLL_B, + DPLL_ID_PCH_PLL_A = 0, + DPLL_ID_PCH_PLL_B = 1, + DPLL_ID_WRPLL1 = 0, + DPLL_ID_WRPLL2 = 1, }; #define I915_NUM_PLLS 2 @@ -194,6 +197,7 @@ struct intel_dpll_hw_state { uint32_t dpll_md; uint32_t fp0; uint32_t fp1; + uint32_t wrpll; }; struct intel_shared_dpll { @@ -204,6 +208,8 @@ struct intel_shared_dpll { /* should match the index in the dev_priv->shared_dplls array */ enum intel_dpll_id id; struct intel_dpll_hw_state hw_state; + /* The mode_set hook is optional and should be used together with the + * intel_prepare_shared_dpll function. */ void (*mode_set)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll); void (*enable)(struct drm_i915_private *dev_priv, @@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n); -struct intel_ddi_plls { - int spll_refcount; - int wrpll1_refcount; - int wrpll2_refcount; -}; - /* Interface history: * * 1.1: Original. @@ -324,6 +324,7 @@ struct drm_i915_error_state { u64 fence[I915_MAX_NUM_FENCES]; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; + struct drm_i915_error_object *semaphore_obj; struct drm_i915_error_ring { bool valid; @@ -552,8 +553,6 @@ struct intel_device_info { /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; int trans_offsets[I915_MAX_TRANSCODERS]; - int dpll_offsets[I915_MAX_PIPES]; - int dpll_md_offsets[I915_MAX_PIPES]; int palette_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES]; }; @@ -586,28 +585,48 @@ struct i915_ctx_hang_stats { }; /* This must match up with the value previously used for execbuf2.rsvd1. */ -#define DEFAULT_CONTEXT_ID 0 +#define DEFAULT_CONTEXT_HANDLE 0 +/** + * struct intel_context - as the name implies, represents a context. + * @ref: reference count. + * @user_handle: userspace tracking identity for this context. + * @remap_slice: l3 row remapping information. + * @file_priv: filp associated with this context (NULL for global default + * context). + * @hang_stats: information about the role of this context in possible GPU + * hangs. + * @vm: virtual memory space used by this context. + * @legacy_hw_ctx: render context backing object and whether it is correctly + * initialized (legacy ring submission mechanism only). + * @link: link in the global list of contexts. + * + * Contexts are memory images used by the hardware to store copies of their + * internal state. + */ struct intel_context { struct kref ref; - int id; - bool is_initialized; + int user_handle; uint8_t remap_slice; struct drm_i915_file_private *file_priv; - struct intel_engine_cs *last_ring; - struct drm_i915_gem_object *obj; struct i915_ctx_hang_stats hang_stats; struct i915_address_space *vm; + struct { + struct drm_i915_gem_object *rcs_state; + bool initialized; + } legacy_hw_ctx; + struct list_head link; }; struct i915_fbc { unsigned long size; + unsigned threshold; unsigned int fb_id; enum plane plane; int y; - struct drm_mm_node *compressed_fb; + struct drm_mm_node compressed_fb; struct drm_mm_node *compressed_llb; struct intel_fbc_work { @@ -638,6 +657,10 @@ struct i915_drrs { struct i915_psr { bool sink_support; bool source_ok; + bool setup_done; + bool enabled; + bool active; + struct delayed_work work; }; enum intel_pch { @@ -879,6 +902,12 @@ struct vlv_s0ix_state { u32 clock_gate_dis2; }; +struct intel_rps_ei { + u32 cz_clock; + u32 render_c0; + u32 media_c0; +}; + struct intel_gen6_power_mgmt { /* work and pm_iir are protected by dev_priv->irq_lock */ struct work_struct work; @@ -903,12 +932,17 @@ struct intel_gen6_power_mgmt { u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ + u32 ei_interrupt_count; + int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; bool enabled; struct delayed_work delayed_resume_work; + /* manual wa residency calculations */ + struct intel_rps_ei up_ei, down_ei; + /* * Protects RPS/RC6 register access and PCU communication. * Must be taken after struct_mutex if nested. @@ -1331,6 +1365,17 @@ struct intel_pipe_crc { wait_queue_head_t wq; }; +struct i915_frontbuffer_tracking { + struct mutex lock; + + /* + * Tracking bits for delayed frontbuffer flushing du to gpu activity or + * scheduled flips. + */ + unsigned busy_bits; + unsigned flip_bits; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1362,6 +1407,7 @@ struct drm_i915_private { struct pci_dev *bridge_dev; struct intel_engine_cs ring[I915_NUM_RINGS]; + struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; drm_dma_handle_t *status_page_dmah; @@ -1370,6 +1416,9 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + /* protects the mmio flip data */ + spinlock_t mmio_flip_lock; + bool display_irqs_enabled; /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ @@ -1389,7 +1438,6 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct work_struct hotplug_work; - bool enable_hotplug_processing; struct { unsigned long hpd_last_jiffies; int hpd_cnt; @@ -1466,7 +1514,6 @@ struct drm_i915_private { int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; - struct intel_ddi_plls ddi_plls; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; /* Reclocking support */ @@ -1474,6 +1521,9 @@ struct drm_i915_private { bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; + + struct i915_frontbuffer_tracking fb_tracking; + u16 orig_clock; bool mchbar_need_disable; @@ -1540,6 +1590,20 @@ struct drm_i915_private { struct i915_runtime_pm pm; + struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; + u32 long_hpd_port_mask; + u32 short_hpd_port_mask; + struct work_struct dig_port_work; + + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; @@ -1591,6 +1655,28 @@ struct drm_i915_gem_object_ops { void (*release)(struct drm_i915_gem_object *); }; +/* + * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is + * considered to be the frontbuffer for the given plane interface-vise. This + * doesn't mean that the hw necessarily already scans it out, but that any + * rendering (by the cpu or gpu) will land in the frontbuffer eventually. + * + * We have one bit per pipe and per scanout plane type. + */ +#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 +#define INTEL_FRONTBUFFER_BITS \ + (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) +#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ + (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) +#define INTEL_FRONTBUFFER_CURSOR(pipe) \ + (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_SPRITE(pipe) \ + (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ + (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ + (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) + struct drm_i915_gem_object { struct drm_gem_object base; @@ -1661,6 +1747,12 @@ struct drm_i915_gem_object { unsigned int pin_display:1; /* + * Is the object to be mapped as read-only to the GPU + * Only honoured if hardware has relevant pte bit + */ + unsigned long gt_ro:1; + + /* * Is the GPU currently using a fence to access this buffer, */ unsigned int pending_fenced_gpu_access:1; @@ -1672,6 +1764,8 @@ struct drm_i915_gem_object { unsigned int has_global_gtt_mapping:1; unsigned int has_dma_mapping:1; + unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + struct sg_table *pages; int pages_pin_count; @@ -1718,6 +1812,10 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Request queue structure. * @@ -1939,10 +2037,8 @@ struct drm_i915_cmd_table { #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \ - (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) -#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \ - && !IS_GEN8(dev)) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) @@ -2039,6 +2135,7 @@ struct i915_params { bool reset; bool disable_display; bool disable_vtd_wa; + int use_mmio_flip; }; extern struct i915_params i915 __read_mostly; @@ -2047,12 +2144,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev); extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); -extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); +extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, - struct drm_file *file_priv); + struct drm_file *file); extern void i915_driver_postclose(struct drm_device *dev, - struct drm_file *file_priv); + struct drm_file *file); extern int i915_driver_device_is_agp(struct drm_device * dev); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, @@ -2083,10 +2180,12 @@ extern void intel_irq_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev); +extern void intel_uncore_early_sanitize(struct drm_device *dev, + bool restore_forcewake); extern void intel_uncore_init(struct drm_device *dev); extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev); +extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, @@ -2234,6 +2333,8 @@ bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); +int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { return unlikely(atomic_read(&error->reset_counter) @@ -2403,7 +2504,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx) static inline bool i915_gem_context_is_default(const struct intel_context *c) { - return c->id == DEFAULT_CONTEXT_ID; + return c->user_handle == DEFAULT_CONTEXT_HANDLE; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -2434,7 +2535,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); -int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); void i915_gem_stolen_cleanup_compression(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev); struct drm_i915_gem_object * @@ -2444,7 +2545,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 stolen_offset, u32 gtt_offset, u32 size); -void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -2594,6 +2694,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); +extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, + bool enable); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); @@ -2604,6 +2706,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void intel_notify_mmio_flip(struct intel_engine_cs *ring); + /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f36126383d26..e5d4d73a9844 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, * Compare seqno against outstanding lazy request. Emit a request if they are * equal. */ -static int +int i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) { int ret; @@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; - if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { + if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); if (file_priv) mod_delayed_work(dev_priv->wq, @@ -1561,14 +1561,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unpin; - obj->fault_mappable = true; - + /* Finally, remap it using the new GTT offset */ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); pfn >>= PAGE_SHIFT; - pfn += page_offset; - /* Finally, remap it using the new GTT offset */ - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + if (!obj->fault_mappable) { + unsigned long size = min_t(unsigned long, + vma->vm_end - vma->vm_start, + obj->base.size); + int i; + + for (i = 0; i < size >> PAGE_SHIFT; i++) { + ret = vm_insert_pfn(vma, + (unsigned long)vma->vm_start + i * PAGE_SIZE, + pfn + i); + if (ret) + break; + } + + obj->fault_mappable = true; + } else + ret = vm_insert_pfn(vma, + (unsigned long)vmf->virtual_address, + pfn + page_offset); unpin: i915_gem_object_ggtt_unpin(obj); unlock: @@ -1616,22 +1631,6 @@ out: return ret; } -void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) -{ - struct i915_vma *vma; - - /* - * Only the global gtt is relevant for gtt memory mappings, so restrict - * list traversal to objects bound into the global address space. Note - * that the active list should be empty, but better safe than sorry. - */ - WARN_ON(!list_empty(&dev_priv->gtt.base.active_list)); - list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list) - i915_gem_release_mmap(vma->obj); - list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list) - i915_gem_release_mmap(vma->obj); -} - /** * i915_gem_release_mmap - remove physical page mappings * @obj: obj in question @@ -1657,6 +1656,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) obj->fault_mappable = false; } +void +i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + i915_gem_release_mmap(obj); +} + uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) { @@ -2059,16 +2067,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. */ - gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); - gfp |= __GFP_IO | __GFP_WAIT; - i915_gem_shrink_all(dev_priv); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); + page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) goto err_pages; - - gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; - gfp &= ~(__GFP_IO | __GFP_WAIT); } #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { @@ -2217,6 +2219,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) list_move_tail(&vma->mm_list, &vm->inactive_list); } + intel_fb_obj_flush(obj, true); + list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2326,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring, u32 request_ring_position, request_start; int ret; - request_start = intel_ring_get_tail(ring); + request_start = intel_ring_get_tail(ring->buffer); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2347,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring, * GPU processing the request, we never over-estimate the * position of the head. */ - request_ring_position = intel_ring_get_tail(ring); + request_ring_position = intel_ring_get_tail(ring->buffer); ret = ring->add_request(ring); if (ret) @@ -2838,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, idx = intel_ring_sync_index(from, to); seqno = obj->last_read_seqno; + /* Optimization: Avoid semaphore sync when we are sure we already + * waited for an object with higher seqno */ if (seqno <= from->semaphore.sync_seqno[idx]) return 0; @@ -3546,6 +3552,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; + intel_fb_obj_flush(obj, false); + trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); @@ -3567,6 +3575,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; + intel_fb_obj_flush(obj, false); + trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); @@ -3620,6 +3630,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) obj->dirty = 1; } + if (write) + intel_fb_obj_invalidate(obj, NULL); + trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); @@ -3956,6 +3969,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + if (write) + intel_fb_obj_invalidate(obj, NULL); + trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); @@ -4444,13 +4460,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (obj->stolen) i915_gem_object_unpin_pages(obj); + WARN_ON(obj->frontbuffer_bits); + if (WARN_ON(obj->pages_pin_count)) obj->pages_pin_count = 0; if (discard_backing_storage(obj)) obj->madv = I915_MADV_DONTNEED; i915_gem_object_put_pages(obj); i915_gem_object_free_mmap_offset(obj); - i915_gem_object_release_stolen(obj); BUG_ON(obj->pages); @@ -4928,6 +4945,8 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; register_oom_notifier(&dev_priv->mm.oom_notifier); + + mutex_init(&dev_priv->fb_tracking.lock); } void i915_gem_release(struct drm_device *dev, struct drm_file *file) @@ -4989,6 +5008,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return ret; } +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits) +{ + if (old) { + WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); + WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); + old->frontbuffer_bits &= ~frontbuffer_bits; + } + + if (new) { + WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); + WARN_ON(new->frontbuffer_bits & frontbuffer_bits); + new->frontbuffer_bits |= frontbuffer_bits; + } +} + static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) { if (!mutex_is_locked(mutex)) @@ -5071,12 +5107,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, vm == &dev_priv->mm.aliasing_ppgtt->base) vm = &dev_priv->gtt.base; - BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, vma_link) { if (vma->vm == vm) return vma->node.start; } + WARN(1, "%s vma for this object not found.\n", + i915_is_ggtt(vm) ? "global" : "ppgtt"); return -1; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a5ddf3bce9c3..de72a2859f32 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref) typeof(*ctx), ref); struct i915_hw_ppgtt *ppgtt = NULL; - if (ctx->obj) { + if (ctx->legacy_hw_ctx.rcs_state) { /* We refcount even the aliasing PPGTT to keep the code symmetric */ - if (USES_PPGTT(ctx->obj->base.dev)) + if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) ppgtt = ctx_to_ppgtt(ctx); /* XXX: Free up the object before tearing down the address space, in * case we're bound in the PPGTT */ - drm_gem_object_unreference(&ctx->obj->base); + drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); } if (ppgtt) @@ -198,6 +198,36 @@ void i915_gem_context_free(struct kref *ctx_ref) kfree(ctx); } +static struct drm_i915_gem_object * +i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) +{ + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, size); + if (obj == NULL) + return ERR_PTR(-ENOMEM); + + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + */ + if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); + /* Failure shouldn't ever happen this early */ + if (WARN_ON(ret)) { + drm_gem_object_unreference(&obj->base); + return ERR_PTR(ret); + } + } + + return obj; +} + static struct i915_hw_ppgtt * create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) { @@ -234,40 +264,26 @@ __create_hw_context(struct drm_device *dev, list_add_tail(&ctx->link, &dev_priv->context_list); if (dev_priv->hw_context_size) { - ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); - if (ctx->obj == NULL) { - ret = -ENOMEM; + struct drm_i915_gem_object *obj = + i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); goto err_out; } - - /* - * Try to make the context utilize L3 as well as LLC. - * - * On VLV we don't have L3 controls in the PTEs so we - * shouldn't touch the cache level, especially as that - * would make the object snooped which might have a - * negative performance impact. - */ - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { - ret = i915_gem_object_set_cache_level(ctx->obj, - I915_CACHE_L3_LLC); - /* Failure shouldn't ever happen this early */ - if (WARN_ON(ret)) - goto err_out; - } + ctx->legacy_hw_ctx.rcs_state = obj; } /* Default context will never have a file_priv */ if (file_priv != NULL) { ret = idr_alloc(&file_priv->context_idr, ctx, - DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); + DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); if (ret < 0) goto err_out; } else - ret = DEFAULT_CONTEXT_ID; + ret = DEFAULT_CONTEXT_HANDLE; ctx->file_priv = file_priv; - ctx->id = ret; + ctx->user_handle = ret; /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ @@ -301,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev, if (IS_ERR(ctx)) return ctx; - if (is_global_default_ctx && ctx->obj) { + if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -309,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev, * be available. To avoid this we always pin the default * context. */ - ret = i915_gem_obj_ggtt_pin(ctx->obj, + ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, get_context_alignment(dev), 0); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); @@ -349,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev, return ctx; err_unpin: - if (is_global_default_ctx && ctx->obj) - i915_gem_object_ggtt_unpin(ctx->obj); + if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) + i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); err_destroy: i915_gem_context_unreference(ctx); return ERR_PTR(ret); @@ -366,23 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev) for (i = 0; i < I915_NUM_RINGS; i++) { struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_context *dctx = ring->default_context; + struct intel_context *lctx = ring->last_context; /* Do a fake switch to the default context */ - if (ring->last_context == dctx) + if (lctx == dctx) continue; - if (!ring->last_context) + if (!lctx) continue; - if (dctx->obj && i == RCS) { - WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, + if (dctx->legacy_hw_ctx.rcs_state && i == RCS) { + WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state, get_context_alignment(dev), 0)); /* Fake a finish/inactive */ - dctx->obj->base.write_domain = 0; - dctx->obj->active = 0; + dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0; + dctx->legacy_hw_ctx.rcs_state->active = 0; } - i915_gem_context_unreference(ring->last_context); + if (lctx->legacy_hw_ctx.rcs_state && i == RCS) + i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); + + i915_gem_context_unreference(lctx); i915_gem_context_reference(dctx); ring->last_context = dctx; } @@ -429,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev) struct intel_context *dctx = dev_priv->ring[RCS].default_context; int i; - if (dctx->obj) { + if (dctx->legacy_hw_ctx.rcs_state) { /* The only known way to stop the gpu from accessing the hw context is * to reset it. Do this as the very last operation to avoid confusing * other code, leading to spurious errors. */ @@ -444,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev) WARN_ON(!dev_priv->ring[RCS].last_context); if (dev_priv->ring[RCS].last_context == dctx) { /* Fake switch to NULL context */ - WARN_ON(dctx->obj->active); - i915_gem_object_ggtt_unpin(dctx->obj); + WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); + i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(dctx); dev_priv->ring[RCS].last_context = NULL; } - i915_gem_object_ggtt_unpin(dctx->obj); + i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } for (i = 0; i < I915_NUM_RINGS; i++) { @@ -570,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -602,16 +622,16 @@ static int do_switch(struct intel_engine_cs *ring, int ret, i; if (from != NULL && ring == &dev_priv->ring[RCS]) { - BUG_ON(from->obj == NULL); - BUG_ON(!i915_gem_obj_is_pinned(from->obj)); + BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); + BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } - if (from == to && from->last_ring == ring && !to->remap_slice) + if (from == to && !to->remap_slice) return 0; /* Trying to pin first makes error handling easier. */ if (ring == &dev_priv->ring[RCS]) { - ret = i915_gem_obj_ggtt_pin(to->obj, + ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, get_context_alignment(ring->dev), 0); if (ret) return ret; @@ -644,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring, * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->obj, false); + ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); if (ret) goto unpin_out; - if (!to->obj->has_global_gtt_mapping) { - struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, + if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) { + struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state, &dev_priv->gtt.base); - vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); + vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND); } - if (!to->is_initialized || i915_gem_context_is_default(to)) + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) hw_flags |= MI_RESTORE_INHIBIT; ret = mi_set_context(ring, to, hw_flags); @@ -680,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring, * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); + from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -689,21 +709,20 @@ static int do_switch(struct intel_engine_cs *ring, * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->obj->dirty = 1; - BUG_ON(from->obj->ring != ring); + from->legacy_hw_ctx.rcs_state->dirty = 1; + BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->obj); + i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(from); } - uninitialized = !to->is_initialized && from == NULL; - to->is_initialized = true; + uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; + to->legacy_hw_ctx.initialized = true; done: i915_gem_context_reference(to); ring->last_context = to; - to->last_ring = ring; if (uninitialized) { ret = i915_gem_render_state_init(ring); @@ -715,7 +734,7 @@ done: unpin_out: if (ring->id == RCS) - i915_gem_object_ggtt_unpin(to->obj); + i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); return ret; } @@ -736,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring, WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); - if (to->obj == NULL) { /* We have the fake context */ + if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ if (to != ring->last_context) { i915_gem_context_reference(to); if (ring->last_context) @@ -774,7 +793,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (IS_ERR(ctx)) return PTR_ERR(ctx); - args->ctx_id = ctx->id; + args->ctx_id = ctx->user_handle; DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); return 0; @@ -788,7 +807,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct intel_context *ctx; int ret; - if (args->ctx_id == DEFAULT_CONTEXT_ID) + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; ret = i915_mutex_lock_interruptible(dev); @@ -801,7 +820,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, return PTR_ERR(ctx); } - idr_remove(&ctx->file_priv->context_idr, ctx->id); + idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3a30133f93e8..60998fc4e5b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_context *ctx = NULL; struct i915_ctx_hang_stats *hs; - if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) + if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); ctx = i915_gem_context_get(file->driver_priv, ctx_id); @@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (obj->base.write_domain) { obj->dirty = 1; obj->last_write_seqno = intel_ring_get_seqno(ring); - /* check for potential scanout */ - if (i915_gem_obj_ggtt_bound(obj) && - i915_gem_obj_to_ggtt(obj)->pin_count) - intel_mark_fb_busy(obj, ring); + + intel_fb_obj_invalidate(obj, ring); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static int +legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags) +{ + struct drm_clip_rect *cliprects = NULL; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 exec_len; + int instp_mode; + u32 instp_mask; + int i, ret = 0; + + if (args->num_cliprects != 0) { + if (ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("clip rectangles are only valid with the render ring\n"); + return -EINVAL; + } + + if (INTEL_INFO(dev)->gen >= 5) { + DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + return -EINVAL; + } + + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } + + cliprects = kcalloc(args->num_cliprects, + sizeof(*cliprects), + GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto error; + } + + if (copy_from_user(cliprects, + to_user_ptr(args->cliprects_ptr), + sizeof(*cliprects)*args->num_cliprects)) { + ret = -EFAULT; + goto error; + } + } else { + if (args->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + args->DR4 = 0; + } + + if (args->DR1 || args->DR4 || args->cliprects_ptr) { + DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); + return -EINVAL; + } + } + + ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); + if (ret) + goto error; + + ret = i915_switch_context(ring, ctx); + if (ret) + goto error; + + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; + instp_mask = I915_EXEC_CONSTANTS_MASK; + switch (instp_mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); + ret = -EINVAL; + goto error; + } + + if (instp_mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("no rel constants on pre-gen4\n"); + ret = -EINVAL; + goto error; + } + + if (INTEL_INFO(dev)->gen > 5 && + instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { + DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); + ret = -EINVAL; + goto error; + } + + /* The HW changed the meaning on this bit on gen6 */ + if (INTEL_INFO(dev)->gen >= 6) + instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + } + break; + default: + DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + ret = -EINVAL; + goto error; + } + + if (ring == &dev_priv->ring[RCS] && + instp_mode != dev_priv->relative_constants_mode) { + ret = intel_ring_begin(ring, 4); + if (ret) + goto error; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = instp_mode; + } + + if (args->flags & I915_EXEC_GEN7_SOL_RESET) { + ret = i915_reset_gen7_sol_offsets(dev, ring); + if (ret) + goto error; + } + + exec_len = args->batch_len; + if (cliprects) { + for (i = 0; i < args->num_cliprects; i++) { + ret = i915_emit_box(dev, &cliprects[i], + args->DR1, args->DR4); + if (ret) + goto error; + + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + goto error; + } + } else { + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + return ret; + } + + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); + + i915_gem_execbuffer_move_to_active(vmas, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); + +error: + kfree(cliprects); + return ret; +} + /** * Find one BSD ring to dispatch the corresponding BSD command. * The Ring ID is returned. @@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; - struct drm_clip_rect *cliprects = NULL; struct intel_engine_cs *ring; struct intel_context *ctx; struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u64 exec_start = args->batch_start_offset, exec_len; - u32 mask, flags; - int ret, mode, i; + u64 exec_start = args->batch_start_offset; + u32 flags; + int ret; bool need_relocs; if (!i915_gem_check_execbuffer(args)) @@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } - mode = args->flags & I915_EXEC_CONSTANTS_MASK; - mask = I915_EXEC_CONSTANTS_MASK; - switch (mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (mode != 0 && ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); - return -EINVAL; - } - - if (mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) { - DRM_DEBUG("no rel constants on pre-gen4\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev)->gen > 5 && - mode == I915_EXEC_CONSTANTS_REL_SURFACE) { - DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); - return -EINVAL; - } - - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) - mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", mode); - return -EINVAL; - } - if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } - if (args->num_cliprects != 0) { - if (ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("clip rectangles are only valid with the render ring\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev)->gen >= 5) { - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); - return -EINVAL; - } - - if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { - DRM_DEBUG("execbuf with %u cliprects\n", - args->num_cliprects); - return -EINVAL; - } - - cliprects = kcalloc(args->num_cliprects, - sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - if (copy_from_user(cliprects, - to_user_ptr(args->cliprects_ptr), - sizeof(*cliprects)*args->num_cliprects)) { - ret = -EFAULT; - goto pre_mutex_err; - } - } else { - if (args->DR4 == 0xffffffff) { - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); - args->DR4 = 0; - } - - if (args->DR1 || args->DR4 || args->cliprects_ptr) { - DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); - return -EINVAL; - } - } - intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); @@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else exec_start += i915_gem_obj_offset(batch_obj, vm); - ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); + ret = legacy_ringbuffer_submission(dev, file, ring, ctx, + args, &eb->vmas, batch_obj, exec_start, flags); if (ret) goto err; - ret = i915_switch_context(ring, ctx); - if (ret) - goto err; - - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - ret = intel_ring_begin(ring, 4); - if (ret) - goto err; - - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, INSTPM); - intel_ring_emit(ring, mask << 16 | mode); - intel_ring_advance(ring); - - dev_priv->relative_constants_mode = mode; - } - - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, ring); - if (ret) - goto err; - } - - - exec_len = args->batch_len; - if (cliprects) { - for (i = 0; i < args->num_cliprects; i++) { - ret = i915_emit_box(dev, &cliprects[i], - args->DR1, args->DR4); - if (ret) - goto err; - - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } - } else { - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } - - trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); - - i915_gem_execbuffer_move_to_active(&eb->vmas, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); - err: /* the request owns the ref now */ i915_gem_context_unreference(ctx); @@ -1387,8 +1413,6 @@ err: mutex_unlock(&dev->struct_mutex); pre_mutex_err: - kfree(cliprects); - /* intel_gpu_busy should also get a ref, so it will free when the device * is really idle. */ intel_runtime_pm_put(dev_priv); @@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - struct drm_i915_gem_exec_object2 *user_exec_list = + struct drm_i915_gem_exec_object2 __user *user_exec_list = to_user_ptr(args->buffers_ptr); int i; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8b3cde703364..a4153eef48c2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -63,6 +63,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) } #endif + /* Early VLV doesn't have this */ + if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { + DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); + return 0; + } + return HAS_ALIASING_PPGTT(dev) ? 1 : 0; } @@ -110,7 +116,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -132,7 +138,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -156,7 +162,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 flags) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -164,7 +170,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, /* Mark the page as writeable. Other platforms don't have a * setting for read-only/writable, so this matches that behavior. */ - pte |= BYT_PTE_WRITEABLE; + if (!(flags & PTE_READ_ONLY)) + pte |= BYT_PTE_WRITEABLE; if (level != I915_CACHE_NONE) pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; @@ -174,7 +181,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -187,7 +194,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -301,7 +308,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, u32 unused) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -639,7 +646,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) uint32_t pd_entry; int pte, pde; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); @@ -941,7 +948,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); while (num_entries) { last_pte = first_pte + num_entries; @@ -964,7 +971,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -981,7 +988,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr[act_pte] = vm->pte_encode(sg_page_iter_dma_address(&sg_iter), - cache_level, true); + cache_level, true, flags); + if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); pt_vaddr = NULL; @@ -1218,8 +1226,12 @@ ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + /* Currently applicable only to VLV */ + if (vma->obj->gt_ro) + flags |= PTE_READ_ONLY; + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, - cache_level); + cache_level, flags); } static void ppgtt_unbind_vma(struct i915_vma *vma) @@ -1394,7 +1406,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level level) + enum i915_cache_level level, u32 unused) { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; @@ -1440,7 +1452,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level level) + enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; @@ -1452,7 +1464,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); + iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); i++; } @@ -1464,7 +1476,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, */ if (i != 0) WARN_ON(readl(>t_entries[i-1]) != - vm->pte_encode(addr, level, true)); + vm->pte_encode(addr, level, true, flags)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -1518,7 +1530,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); @@ -1567,6 +1579,10 @@ static void ggtt_bind_vma(struct i915_vma *vma, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; + /* Currently applicable only to VLV */ + if (obj->gt_ro) + flags |= PTE_READ_ONLY; + /* If there is no aliasing PPGTT, or the caller needs a global mapping, * or we have a global mapping already but the cacheability flags have * changed, set the global PTEs. @@ -1583,7 +1599,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (cache_level != obj->cache_level)) { vma->vm->insert_entries(vma->vm, obj->pages, vma->node.start, - cache_level); + cache_level, flags); obj->has_global_gtt_mapping = 1; } } @@ -1595,7 +1611,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, appgtt->base.insert_entries(&appgtt->base, vma->obj->pages, vma->node.start, - cache_level); + cache_level, flags); vma->obj->has_aliasing_ppgtt_mapping = 1; } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 1b96a06be3cb..8d6f7c18c404 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -154,6 +154,7 @@ struct i915_vma { void (*unbind_vma)(struct i915_vma *vma); /* Map an object into an address space with the given cache flags. */ #define GLOBAL_BIND (1<<0) +#define PTE_READ_ONLY (1<<1) void (*bind_vma)(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -197,7 +198,7 @@ struct i915_address_space { /* FIXME: Need a more generic return type */ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, - bool valid); /* Create a valid PTE */ + bool valid, u32 flags); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, uint64_t start, uint64_t length, @@ -205,7 +206,7 @@ struct i915_address_space { void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level cache_level); + enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); }; diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3521f998a178..e60be3f552a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -28,64 +28,13 @@ #include "i915_drv.h" #include "intel_renderstate.h" -struct i915_render_state { +struct render_state { + const struct intel_renderstate_rodata *rodata; struct drm_i915_gem_object *obj; - unsigned long ggtt_offset; - void *batch; - u32 size; - u32 len; + u64 ggtt_offset; + int gen; }; -static struct i915_render_state *render_state_alloc(struct drm_device *dev) -{ - struct i915_render_state *so; - struct page *page; - int ret; - - so = kzalloc(sizeof(*so), GFP_KERNEL); - if (!so) - return ERR_PTR(-ENOMEM); - - so->obj = i915_gem_alloc_object(dev, 4096); - if (so->obj == NULL) { - ret = -ENOMEM; - goto free; - } - so->size = 4096; - - ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); - if (ret) - goto free_gem; - - BUG_ON(so->obj->pages->nents != 1); - page = sg_page(so->obj->pages->sgl); - - so->batch = kmap(page); - if (!so->batch) { - ret = -ENOMEM; - goto unpin; - } - - so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); - - return so; -unpin: - i915_gem_object_ggtt_unpin(so->obj); -free_gem: - drm_gem_object_unreference(&so->obj->base); -free: - kfree(so); - return ERR_PTR(ret); -} - -static void render_state_free(struct i915_render_state *so) -{ - kunmap(so->batch); - i915_gem_object_ggtt_unpin(so->obj); - drm_gem_object_unreference(&so->obj->base); - kfree(so); -} - static const struct intel_renderstate_rodata * render_state_get_rodata(struct drm_device *dev, const int gen) { @@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_setup(const int gen, - const struct intel_renderstate_rodata *rodata, - struct i915_render_state *so) +static int render_state_init(struct render_state *so, struct drm_device *dev) { - const u64 goffset = i915_gem_obj_ggtt_offset(so->obj); - u32 reloc_index = 0; - u32 * const d = so->batch; - unsigned int i = 0; int ret; - if (!rodata || rodata->batch_items * 4 > so->size) + so->gen = INTEL_INFO(dev)->gen; + so->rodata = render_state_get_rodata(dev, so->gen); + if (so->rodata == NULL) + return 0; + + if (so->rodata->batch_items * 4 > 4096) return -EINVAL; + so->obj = i915_gem_alloc_object(dev, 4096); + if (so->obj == NULL) + return -ENOMEM; + + ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); + if (ret) + goto free_gem; + + so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); + return 0; + +free_gem: + drm_gem_object_unreference(&so->obj->base); + return ret; +} + +static int render_state_setup(struct render_state *so) +{ + const struct intel_renderstate_rodata *rodata = so->rodata; + unsigned int i = 0, reloc_index = 0; + struct page *page; + u32 *d; + int ret; + ret = i915_gem_object_set_to_cpu_domain(so->obj, true); if (ret) return ret; + page = sg_page(so->obj->pages->sgl); + d = kmap(page); + while (i < rodata->batch_items) { u32 s = rodata->batch[i]; - if (reloc_index < rodata->reloc_items && - i * 4 == rodata->reloc[reloc_index]) { - - s += goffset & 0xffffffff; - - /* We keep batch offsets max 32bit */ - if (gen >= 8) { + if (i * 4 == rodata->reloc[reloc_index]) { + u64 r = s + so->ggtt_offset; + s = lower_32_bits(r); + if (so->gen >= 8) { if (i + 1 >= rodata->batch_items || rodata->batch[i + 1] != 0) return -EINVAL; - d[i] = s; - i++; - s = (goffset & 0xffffffff00000000ull) >> 32; + d[i++] = s; + s = upper_32_bits(r); } reloc_index++; } - d[i] = s; - i++; + d[i++] = s; } + kunmap(page); ret = i915_gem_object_set_to_gtt_domain(so->obj, false); if (ret) return ret; - if (rodata->reloc_items != reloc_index) { - DRM_ERROR("not all relocs resolved, %d out of %d\n", - reloc_index, rodata->reloc_items); + if (rodata->reloc[reloc_index] != -1) { + DRM_ERROR("only %d relocs resolved\n", reloc_index); return -EINVAL; } - so->len = rodata->batch_items * 4; - return 0; } +static void render_state_fini(struct render_state *so) +{ + i915_gem_object_ggtt_unpin(so->obj); + drm_gem_object_unreference(&so->obj->base); +} + int i915_gem_render_state_init(struct intel_engine_cs *ring) { - const int gen = INTEL_INFO(ring->dev)->gen; - struct i915_render_state *so; - const struct intel_renderstate_rodata *rodata; + struct render_state so; int ret; if (WARN_ON(ring->id != RCS)) return -ENOENT; - rodata = render_state_get_rodata(ring->dev, gen); - if (rodata == NULL) - return 0; + ret = render_state_init(&so, ring->dev); + if (ret) + return ret; - so = render_state_alloc(ring->dev); - if (IS_ERR(so)) - return PTR_ERR(so); + if (so.rodata == NULL) + return 0; - ret = render_state_setup(gen, rodata, so); + ret = render_state_setup(&so); if (ret) goto out; ret = ring->dispatch_execbuffer(ring, - i915_gem_obj_ggtt_offset(so->obj), - so->len, + so.ggtt_offset, + so.rodata->batch_items * 4, I915_DISPATCH_SECURE); if (ret) goto out; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring); + i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); - ret = __i915_add_request(ring, NULL, so->obj, NULL); + ret = __i915_add_request(ring, NULL, so.obj, NULL); /* __i915_add_request moves object to inactive if it fails */ out: - render_state_free(so); + render_state_fini(&so); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 62ef55ba061c..b695d184c487 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) return base; } -static int i915_setup_compression(struct drm_device *dev, int size) +static int find_compression_threshold(struct drm_device *dev, + struct drm_mm_node *node, + int size, + int fb_cpp) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); + int compression_threshold = 1; int ret; - compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); - if (!compressed_fb) - goto err_llb; + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ - /* Try to over-allocate to reduce reallocations and fragmentation */ - ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); - if (ret) - ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, - size >>= 1, 4096, - DRM_MM_SEARCH_DEFAULT); - if (ret) + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, + size >>= 1, 4096, + DRM_MM_SEARCH_DEFAULT); + if (ret && INTEL_INFO(dev)->gen <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb, + size, fb_cpp); + if (!ret) goto err_llb; + else if (ret > 1) { + DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + + } + + dev_priv->fbc.threshold = ret; if (HAS_PCH_SPLIT(dev)) - I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); else if (IS_GM45(dev)) { - I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) @@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size) dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, - dev_priv->mm.stolen_base + compressed_fb->start); + dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); I915_WRITE(FBC_LL_BASE, dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->fbc.compressed_fb = compressed_fb; - dev_priv->fbc.size = size; + dev_priv->fbc.size = size / dev_priv->fbc.threshold; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); @@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size) err_fb: kfree(compressed_llb); - drm_mm_remove_node(compressed_fb); + drm_mm_remove_node(&dev_priv->fbc.compressed_fb); err_llb: - kfree(compressed_fb); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } -int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) /* Release any current block */ i915_gem_stolen_cleanup_compression(dev); - return i915_setup_compression(dev, size); + return i915_setup_compression(dev, size, fb_cpp); } void i915_gem_stolen_cleanup_compression(struct drm_device *dev) @@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) if (dev_priv->fbc.size == 0) return; - if (dev_priv->fbc.compressed_fb) { - drm_mm_remove_node(dev_priv->fbc.compressed_fb); - kfree(dev_priv->fbc.compressed_fb); - } + drm_mm_remove_node(&dev_priv->fbc.compressed_fb); if (dev_priv->fbc.compressed_llb) { drm_mm_remove_node(dev_priv->fbc.compressed_llb); @@ -292,9 +325,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) kfree(obj->pages); } + +static void +i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) +{ + if (obj->stolen) { + drm_mm_remove_node(obj->stolen); + kfree(obj->stolen); + obj->stolen = NULL; + } +} static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .get_pages = i915_gem_object_get_pages_stolen, .put_pages = i915_gem_object_put_pages_stolen, + .release = i915_gem_object_release_stolen, }; static struct drm_i915_gem_object * @@ -452,13 +496,3 @@ err_out: drm_gem_object_unreference(&obj->base); return NULL; } - -void -i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) -{ - if (obj->stolen) { - drm_mm_remove_node(obj->stolen); - kfree(obj->stolen); - obj->stolen = NULL; - } -} diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 66cf41765bf9..45b6191efb58 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, struct drm_device *dev = error_priv->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error = error_priv->error; + struct drm_i915_error_object *obj; int i, j, offset, elt; int max_hangcheck_score; @@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, error->pinned_bo_count[0]); for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - struct drm_i915_error_object *obj; - obj = error->ring[i].batchbuffer; if (obj) { err_puts(m, dev_priv->ring[i].name); @@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } + if ((obj = error->semaphore_obj)) { + err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + elt * 4, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + } + } + if (error->overlay) intel_overlay_print_error_state(m, error->overlay); @@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref) kfree(error->ring[i].requests); } + i915_error_object_free(error->semaphore_obj); kfree(error->active_bo); kfree(error->overlay); kfree(error->display); @@ -746,7 +758,52 @@ static void i915_gem_record_fences(struct drm_device *dev, } } + +static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error, + struct intel_engine_cs *ring, + struct drm_i915_error_ring *ering) +{ + struct intel_engine_cs *useless; + int i; + + if (!i915_semaphore_is_enabled(dev_priv->dev)) + return; + + if (!error->semaphore_obj) + error->semaphore_obj = + i915_error_object_create(dev_priv, + dev_priv->semaphore_obj, + &dev_priv->gtt.base); + + for_each_ring(useless, dev_priv, i) { + u16 signal_offset = + (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4; + u32 *tmp = error->semaphore_obj->pages[0]; + + ering->semaphore_mboxes[i] = tmp[signal_offset]; + ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i]; + } +} + +static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, + struct intel_engine_cs *ring, + struct drm_i915_error_ring *ering) +{ + ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); + ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); + ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; + ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; + + if (HAS_VEBOX(dev_priv->dev)) { + ering->semaphore_mboxes[2] = + I915_READ(RING_SYNC_2(ring->mmio_base)); + ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; + } +} + static void i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, struct intel_engine_cs *ring, struct drm_i915_error_ring *ering) { @@ -755,18 +812,10 @@ static void i915_record_ring_state(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 6) { ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); - ering->semaphore_mboxes[0] - = I915_READ(RING_SYNC_0(ring->mmio_base)); - ering->semaphore_mboxes[1] - = I915_READ(RING_SYNC_1(ring->mmio_base)); - ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; - ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; - } - - if (HAS_VEBOX(dev)) { - ering->semaphore_mboxes[2] = - I915_READ(RING_SYNC_2(ring->mmio_base)); - ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; + if (INTEL_INFO(dev)->gen >= 8) + gen8_record_semaphore_state(dev_priv, error, ring, ering); + else + gen6_record_semaphore_state(dev_priv, ring, ering); } if (INTEL_INFO(dev)->gen >= 4) { @@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].valid = true; - i915_record_ring_state(dev, ring, &error->ring[i]); + i915_record_ring_state(dev, error, ring, &error->ring[i]); request = i915_gem_find_active_request(ring); if (request) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 267f069765ad..07e1a409e488 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev, return true; } +static void i915_digport_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, dig_port_work); + unsigned long irqflags; + u32 long_port_mask, short_port_mask; + struct intel_digital_port *intel_dig_port; + int i, ret; + u32 old_bits = 0; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + long_port_mask = dev_priv->long_hpd_port_mask; + dev_priv->long_hpd_port_mask = 0; + short_port_mask = dev_priv->short_hpd_port_mask; + dev_priv->short_hpd_port_mask = 0; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + for (i = 0; i < I915_MAX_PORTS; i++) { + bool valid = false; + bool long_hpd = false; + intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port || !intel_dig_port->hpd_pulse) + continue; + + if (long_port_mask & (1 << i)) { + valid = true; + long_hpd = true; + } else if (short_port_mask & (1 << i)) + valid = true; + + if (valid) { + ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); + if (ret == true) { + /* if we get true fallback to old school hpd */ + old_bits |= (1 << intel_dig_port->base.hpd_pin); + } + } + } + + if (old_bits) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dev_priv->hpd_event_bits |= old_bits; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + schedule_work(&dev_priv->hotplug_work); + } +} + /* * Handle hotplug events outside the interrupt handler proper. */ @@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work) bool changed = false; u32 hpd_event_bits; - /* HPD irq before everything is fully set up. */ - if (!dev_priv->enable_hotplug_processing) - return; - mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); @@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work) dev_priv->hpd_event_bits = 0; list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (intel_encoder->hpd_pin > HPD_NONE && dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && @@ -1152,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work) list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { if (intel_encoder->hot_plug) @@ -1218,10 +1265,138 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_notify_mmio_flip(ring); + wake_up_all(&ring->irq_queue); i915_queue_hangcheck(dev); } +static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, + struct intel_rps_ei *rps_ei) +{ + u32 cz_ts, cz_freq_khz; + u32 render_count, media_count; + u32 elapsed_render, elapsed_media, elapsed_time; + u32 residency = 0; + + cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); + + render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); + media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); + + if (rps_ei->cz_clock == 0) { + rps_ei->cz_clock = cz_ts; + rps_ei->render_c0 = render_count; + rps_ei->media_c0 = media_count; + + return dev_priv->rps.cur_freq; + } + + elapsed_time = cz_ts - rps_ei->cz_clock; + rps_ei->cz_clock = cz_ts; + + elapsed_render = render_count - rps_ei->render_c0; + rps_ei->render_c0 = render_count; + + elapsed_media = media_count - rps_ei->media_c0; + rps_ei->media_c0 = media_count; + + /* Convert all the counters into common unit of milli sec */ + elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; + elapsed_render /= cz_freq_khz; + elapsed_media /= cz_freq_khz; + + /* + * Calculate overall C0 residency percentage + * only if elapsed time is non zero + */ + if (elapsed_time) { + residency = + ((max(elapsed_render, elapsed_media) * 100) + / elapsed_time); + } + + return residency; +} + +/** + * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU + * busy-ness calculated from C0 counters of render & media power wells + * @dev_priv: DRM device private + * + */ +static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +{ + u32 residency_C0_up = 0, residency_C0_down = 0; + u8 new_delay, adj; + + dev_priv->rps.ei_interrupt_count++; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + + if (dev_priv->rps.up_ei.cz_clock == 0) { + vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); + vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); + return dev_priv->rps.cur_freq; + } + + + /* + * To down throttle, C0 residency should be less than down threshold + * for continous EI intervals. So calculate down EI counters + * once in VLV_INT_COUNT_FOR_DOWN_EI + */ + if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { + + dev_priv->rps.ei_interrupt_count = 0; + + residency_C0_down = vlv_c0_residency(dev_priv, + &dev_priv->rps.down_ei); + } else { + residency_C0_up = vlv_c0_residency(dev_priv, + &dev_priv->rps.up_ei); + } + + new_delay = dev_priv->rps.cur_freq; + + adj = dev_priv->rps.last_adj; + /* C0 residency is greater than UP threshold. Increase Frequency */ + if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { + if (adj > 0) + adj *= 2; + else + adj = 1; + + if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + + /* + * For better performance, jump directly + * to RPe if we're below it. + */ + if (new_delay < dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; + + } else if (!dev_priv->rps.ei_interrupt_count && + (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { + if (adj < 0) + adj *= 2; + else + adj = -1; + /* + * This means, C0 residency is less than down threshold over + * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq + */ + if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + } + + return new_delay; +} + static void gen6_pm_rps_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -1252,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) adj *= 2; - else - adj = 1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; + } new_delay = dev_priv->rps.cur_freq + adj; /* @@ -1268,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work) else new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + new_delay = vlv_calc_delay_from_C0_counters(dev_priv); } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; - else - adj = -1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; + } new_delay = dev_priv->rps.cur_freq + adj; } else { /* unknown event */ new_delay = dev_priv->rps.cur_freq; @@ -1458,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(0)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; rcs = tmp >> GEN8_RCS_IRQ_SHIFT; bcs = tmp >> GEN8_BCS_IRQ_SHIFT; @@ -1465,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, notify_ring(dev, &dev_priv->ring[RCS]); if (bcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); - I915_WRITE(GEN8_GT_IIR(0), tmp); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1473,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(1)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) @@ -1480,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS2]); - I915_WRITE(GEN8_GT_IIR(1), tmp); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1488,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_PM_IRQ) { tmp = I915_READ(GEN8_GT_IIR(2)); if (tmp & dev_priv->pm_rps_events) { - ret = IRQ_HANDLED; - gen8_rps_irq_handler(dev_priv, tmp); I915_WRITE(GEN8_GT_IIR(2), tmp & dev_priv->pm_rps_events); + ret = IRQ_HANDLED; + gen8_rps_irq_handler(dev_priv, tmp); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1499,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_VECS_IRQ) { tmp = I915_READ(GEN8_GT_IIR(3)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VECS_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VECS]); - I915_WRITE(GEN8_GT_IIR(3), tmp); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -1514,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 +static int ilk_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 0; + case PORT_C: + return 8; + case PORT_D: + return 16; + } +} + +static int g4x_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 17; + case PORT_C: + return 19; + case PORT_D: + return 21; + } +} + +static inline enum port get_port_from_pin(enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_B: + return PORT_B; + case HPD_PORT_C: + return PORT_C; + case HPD_PORT_D: + return PORT_D; + default: + return PORT_A; /* no hpd */ + } +} + static inline void intel_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + u32 dig_hotplug_reg, const u32 *hpd) { struct drm_i915_private *dev_priv = dev->dev_private; int i; + enum port port; bool storm_detected = false; + bool queue_dig = false, queue_hp = false; + u32 dig_shift; + u32 dig_port_mask = 0; if (!hotplug_trigger) return; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_trigger); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", + hotplug_trigger, dig_hotplug_reg); spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { + if (!(hpd[i] & hotplug_trigger)) + continue; + + port = get_port_from_pin(i); + if (port && dev_priv->hpd_irq_port[port]) { + bool long_hpd; + if (IS_G4X(dev)) { + dig_shift = g4x_port_to_hotplug_shift(port); + long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } else { + dig_shift = ilk_port_to_hotplug_shift(port); + long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } + + DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); + /* for long HPD pulses we want to have the digital queue happen, + but we still want HPD storm detection to function. */ + if (long_hpd) { + dev_priv->long_hpd_port_mask |= (1 << port); + dig_port_mask |= hpd[i]; + } else { + /* for short HPD just trigger the digital queue */ + dev_priv->short_hpd_port_mask |= (1 << port); + hotplug_trigger &= ~hpd[i]; + } + queue_dig = true; + } + } + + for (i = 1; i < HPD_NUM_PINS; i++) { if (hpd[i] & hotplug_trigger && dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { /* @@ -1550,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) continue; - dev_priv->hpd_event_bits |= (1 << i); + if (!(dig_port_mask & hpd[i])) { + dev_priv->hpd_event_bits |= (1 << i); + queue_hp = true; + } + if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { @@ -1579,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, * queue for otherwise the flush_work in the pageflip code will * deadlock. */ - schedule_work(&dev_priv->hotplug_work); + if (queue_dig) + queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); + if (queue_hp) + schedule_work(&dev_priv->hotplug_work); } static void gmbus_irq_handler(struct drm_device *dev) @@ -1809,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - if (IS_G4X(dev)) { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; + if (hotplug_status) { + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + /* + * Make sure hotplug status is cleared before we clear IIR, or else we + * may miss hotplug events. + */ + POSTING_READ(PORT_HOTPLUG_STAT); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); - } else { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + if (IS_G4X(dev)) { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); - } + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); + } else { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && - hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); + } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - /* - * Make sure hotplug status is cleared before we clear IIR, or else we - * may miss hotplug events. - */ - POSTING_READ(PORT_HOTPLUG_STAT); + if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && + hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) + dp_aux_irq_handler(dev); + } } static irqreturn_t valleyview_irq_handler(int irq, void *arg) @@ -1839,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) irqreturn_t ret = IRQ_NONE; while (true) { - iir = I915_READ(VLV_IIR); + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); + if (gt_iir) + I915_WRITE(GTIIR, gt_iir); + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) + I915_WRITE(GEN6_PMIIR, pm_iir); + + iir = I915_READ(VLV_IIR); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } if (gt_iir == 0 && pm_iir == 0 && iir == 0) goto out; ret = IRQ_HANDLED; - snb_gt_irq_handler(dev, dev_priv, gt_iir); - - valleyview_pipestat_irq_handler(dev, iir); - - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) - i9xx_hpd_irq_handler(dev); - + if (gt_iir) + snb_gt_irq_handler(dev, dev_priv, gt_iir); if (pm_iir) gen6_rps_irq_handler(dev_priv, pm_iir); - - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); } out: @@ -1882,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) if (master_ctl == 0 && iir == 0) break; + ret = IRQ_HANDLED; + I915_WRITE(GEN8_MASTER_IRQ, 0); - gen8_gt_irq_handler(dev, dev_priv, master_ctl); + /* Find, clear, then process each source of interrupt */ - valleyview_pipestat_irq_handler(dev, iir); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } - /* Consume port. Then clear IIR or we'll miss events */ - i9xx_hpd_irq_handler(dev); + gen8_gt_irq_handler(dev, dev_priv, master_ctl); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - - ret = IRQ_HANDLED; } return ret; @@ -1907,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + u32 dig_hotplug_reg; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -2014,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2132,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) } } +/* + * To handle irqs with the minimum potential races with fresh interrupts, we: + * 1 - Disable Master Interrupt Control. + * 2 - Find the source(s) of the interrupt. + * 3 - Clear the Interrupt Identity bits (IIR). + * 4 - Process the interrupt(s) that had bits set in the IIRs. + * 5 - Re-enable Master Interrupt Control. + */ static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; @@ -2159,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) POSTING_READ(SDEIER); } + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); if (gt_iir) { + I915_WRITE(GTIIR, gt_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 6) snb_gt_irq_handler(dev, dev_priv, gt_iir); else ilk_gt_irq_handler(dev, dev_priv, gt_iir); - I915_WRITE(GTIIR, gt_iir); - ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { + I915_WRITE(DEIIR, de_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 7) ivb_display_irq_handler(dev, de_iir); else ilk_display_irq_handler(dev, de_iir); - I915_WRITE(DEIIR, de_iir); - ret = IRQ_HANDLED; } if (INTEL_INFO(dev)->gen >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { - gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; + gen6_rps_irq_handler(dev_priv, pm_iir); } } @@ -2215,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Misc interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_MISC_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_DE_MISC_GSE) + intel_opregion_asle_intr(dev); + else + DRM_ERROR("Unexpected DE Misc interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); } if (master_ctl & GEN8_DE_PORT_IRQ) { tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp & GEN8_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Port interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_PORT_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + else + DRM_ERROR("Unexpected DE Port interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); } for_each_pipe(pipe) { @@ -2254,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) continue; pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir & GEN8_PIPE_VBLANK) - intel_pipe_handle_vblank(dev, pipe); - - if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (pipe_iir) { + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_PIPE_VBLANK) + intel_pipe_handle_vblank(dev, pipe); - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); - if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { - DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, + false)) + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); + } - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); + } } else DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); } @@ -2292,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * on older pch-split platforms. But this needs testing. */ u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - if (pch_iir) { I915_WRITE(SDEIIR, pch_iir); ret = IRQ_HANDLED; - } + cpt_irq_handler(dev, pch_iir); + } else + DRM_ERROR("The master control interrupt lied (SDE)!\n"); + } I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); @@ -2753,12 +3054,7 @@ static bool ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) { if (INTEL_INFO(dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return false; + return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | @@ -2767,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) } static struct intel_engine_cs * -semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) +semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return NULL; + for_each_ring(signaller, dev_priv, i) { + if (ring == signaller) + continue; + + if (offset == signaller->semaphore.signal_ggtt[ring->id]) + return signaller; + } } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; @@ -2792,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) } } - DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", - ring->id, ipehr); + DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", + ring->id, ipehr, offset); return NULL; } @@ -2803,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 cmd, ipehr, head; - int i; + u64 offset = 0; + int i, backwards; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) @@ -2812,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) /* * HEAD is likely pointing to the dword after the actual command, * so scan backwards until we find the MBOX. But limit it to just 3 - * dwords. Note that we don't care about ACTHD here since that might + * or 4 dwords depending on the semaphore wait command size. + * Note that we don't care about ACTHD here since that might * point at at batch, and semaphores are always emitted into the * ringbuffer itself. */ head = I915_READ_HEAD(ring) & HEAD_ADDR; + backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; - for (i = 4; i; --i) { + for (i = backwards; i; --i) { /* * Be paranoid and presume the hw has gone off into the wild - * our ring is smaller than what the hardware (and hence @@ -2838,7 +3138,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) return NULL; *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; - return semaphore_wait_to_signaller_ring(ring, ipehr); + if (INTEL_INFO(ring->dev)->gen >= 8) { + offset = ioread32(ring->buffer->virtual_start + head + 12); + offset <<= 32; + offset = ioread32(ring->buffer->virtual_start + head + 8); + } + return semaphore_wait_to_signaller_ring(ring, ipehr, offset); } static int semaphore_passed(struct intel_engine_cs *ring) @@ -4327,12 +4632,17 @@ void intel_irq_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + if (IS_VALLEYVIEW(dev)) + /* WaGsvRC0ResidenncyMethod:VLV */ + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, @@ -4429,7 +4739,9 @@ void intel_hpd_init(struct drm_device *dev) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + connector->polled = DRM_CONNECTOR_POLL_HPD; + if (intel_connector->mst_port) connector->polled = DRM_CONNECTOR_POLL_HPD; } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d05a2afa17dc..81457293cd3e 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = { .disable_display = 0, .enable_cmd_parser = 1, .disable_vtd_wa = 0, + .use_mmio_flip = 0, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -156,3 +157,7 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)" module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); MODULE_PARM_DESC(enable_cmd_parser, "Enable command parsing (1=enabled [default], 0=disabled)"); + +module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); +MODULE_PARM_DESC(use_mmio_flip, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e691b30b2817..b0036cd3fe19 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -29,8 +29,8 @@ #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) -#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c) -#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c) +#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ + (pipe) == PIPE_B ? (b) : (c)) #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) #define _MASKED_BIT_DISABLE(a) ((a) << 16) @@ -240,7 +240,7 @@ #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) -#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_COMPARE (1<<20) @@ -266,6 +266,11 @@ #define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_FORCE_RESTORE (1<<1) #define MI_RESTORE_INHIBIT (1<<0) +#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ +#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) +#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_POLL (1<<15) +#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) @@ -360,6 +365,7 @@ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) @@ -525,10 +531,21 @@ enum punit_power_well { #define PUNIT_REG_GPU_FREQ_STS 0xd8 #define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc +#define PUNIT_REG_CZ_TIMESTAMP 0xce #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ +#define PUNIT_GPU_STATUS_REG 0xdb +#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 +#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff +#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 +#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff + +#define PUNIT_GPU_DUTYCYCLE_REG 0xdf +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff + #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 @@ -540,6 +557,11 @@ enum punit_power_well { #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 +#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 +#define VLV_RP_UP_EI_THRESHOLD 90 +#define VLV_RP_DOWN_EI_THRESHOLD 70 +#define VLV_INT_COUNT_FOR_DOWN_EI 5 + /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 #define CCK_FUSE_HPLL_FREQ_MASK 0x3 @@ -574,6 +596,11 @@ enum punit_power_well { #define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define CCK_DISPLAY_CLOCK_CONTROL 0x6b +#define DISPLAY_TRUNK_FORCE_ON (1 << 17) +#define DISPLAY_TRUNK_FORCE_OFF (1 << 16) +#define DISPLAY_FREQUENCY_STATUS (0x1f << 8) +#define DISPLAY_FREQUENCY_STATUS_SHIFT 8 +#define DISPLAY_FREQUENCY_VALUES (0x1f << 0) /** * DOC: DPIO @@ -761,6 +788,8 @@ enum punit_power_well { #define _VLV_PCS_DW8_CH0 0x8220 #define _VLV_PCS_DW8_CH1 0x8420 +#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20) +#define CHV_PCS_USEDCLKCHANNEL (1 << 21) #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) #define _VLV_PCS01_DW8_CH0 0x0220 @@ -869,6 +898,16 @@ enum punit_power_well { #define DPIO_CHV_PROP_COEFF_SHIFT 0 #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) +#define _CHV_CMN_DW5_CH0 0x8114 +#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) +#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) +#define CHV_BUFRIGHTENA1_FORCE (3 << 20) +#define CHV_BUFRIGHTENA1_MASK (3 << 20) +#define CHV_BUFLEFTENA1_DISABLE (0 << 22) +#define CHV_BUFLEFTENA1_NORMAL (1 << 22) +#define CHV_BUFLEFTENA1_FORCE (3 << 22) +#define CHV_BUFLEFTENA1_MASK (3 << 22) + #define _CHV_CMN_DW13_CH0 0x8134 #define _CHV_CMN_DW0_CH1 0x8080 #define DPIO_CHV_S1_DIV_SHIFT 21 @@ -883,8 +922,21 @@ enum punit_power_well { #define _CHV_CMN_DW1_CH1 0x8084 #define DPIO_AFC_RECAL (1 << 14) #define DPIO_DCLKP_EN (1 << 13) +#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */ #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) +#define _CHV_CMN_DW19_CH0 0x814c +#define _CHV_CMN_DW6_CH1 0x8098 +#define CHV_CMN_USEDCLKCHANNEL (1 << 13) +#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) + #define CHV_CMN_DW30 0x8178 #define DPIO_LRC_BYPASS (1 << 3) @@ -933,6 +985,7 @@ enum punit_power_well { #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 + /* control register for cpu gtt access */ #define TILECTL 0x101000 #define TILECTL_SWZCTL (1 << 0) @@ -1167,6 +1220,8 @@ enum punit_power_well { #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) +#define VLV_PCBR_ADDR_SHIFT 12 + #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ #define EIR 0x020b0 #define EMR 0x020b4 @@ -1567,11 +1622,10 @@ enum punit_power_well { /* * Clock control & power management */ -#define DPLL_A_OFFSET 0x6014 -#define DPLL_B_OFFSET 0x6018 -#define CHV_DPLL_C_OFFSET 0x6030 -#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ - dev_priv->info.display_mmio_offset) +#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) +#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) +#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) +#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) #define VGA0 0x6000 #define VGA1 0x6004 @@ -1659,11 +1713,10 @@ enum punit_power_well { #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ -#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ -#define CHV_DPLL_C_MD_OFFSET 0x603c -#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ - dev_priv->info.display_mmio_offset) +#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) +#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) +#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) +#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. @@ -2373,6 +2426,7 @@ enum punit_power_well { #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) #define EDP_PSR_ENABLE (1<<31) +#define BDW_PSR_SINGLE_FRAME (1<<30) #define EDP_PSR_LINK_DISABLE (0<<27) #define EDP_PSR_LINK_STANDBY (1<<27) #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) @@ -2530,8 +2584,14 @@ enum punit_power_well { #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) +#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) +#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) +#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19) +#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) +#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17) +#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17) /* CRT/TV common between gen3+ */ #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) @@ -2585,7 +2645,7 @@ enum punit_power_well { #define PORT_DFT_I9XX 0x61150 #define DC_BALANCE_RESET (1 << 25) -#define PORT_DFT2_G4X 0x61154 +#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) #define PIPE_B_SCRAMBLE_RESET (1 << 1) @@ -4627,6 +4687,8 @@ enum punit_power_well { #define GEN7_L3CNTLREG1 0xB01C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_L3AGDIS (1<<19) +#define GEN7_L3CNTLREG2 0xB020 +#define GEN7_L3CNTLREG3 0xB024 #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 @@ -4873,8 +4935,7 @@ enum punit_power_well { #define _PCH_TRANSA_LINK_M2 0xe0048 #define _PCH_TRANSA_LINK_N2 0xe004c -/* Per-transcoder DIP controls */ - +/* Per-transcoder DIP controls (PCH) */ #define _VIDEO_DIP_CTL_A 0xe0200 #define _VIDEO_DIP_DATA_A 0xe0208 #define _VIDEO_DIP_GCP_A 0xe0210 @@ -4887,6 +4948,7 @@ enum punit_power_well { #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) +/* Per-transcoder DIP controls (VLV) */ #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) @@ -4895,12 +4957,19 @@ enum punit_power_well { #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) +#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) +#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) +#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) + #define VLV_TVIDEO_DIP_CTL(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) + _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ + VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) #define VLV_TVIDEO_DIP_DATA(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) + _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ + VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) #define VLV_TVIDEO_DIP_GCP(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) + _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ + VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ #define HSW_VIDEO_DIP_CTL_A 0x60200 @@ -5331,6 +5400,7 @@ enum punit_power_well { #define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) +#define VLV_GTLC_SURVIVABILITY_REG 0x130098 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_USER 0x2 @@ -5478,6 +5548,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNT_RANGE_HIGH (1<<15) +#define VLV_MEDIA_RC0_COUNT_EN (1<<5) +#define VLV_RENDER_RC0_COUNT_EN (1<<4) #define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_RENDER_RC6_COUNT_EN (1<<0) #define GEN6_GT_GFX_RC6 0x138108 @@ -5486,6 +5558,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6pp 0x138110 +#define VLV_RENDER_C0_COUNT_REG 0x138118 +#define VLV_MEDIA_C0_COUNT_REG 0x13811C #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) @@ -5720,6 +5794,7 @@ enum punit_power_well { #define TRANS_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ #define TRANS_DDI_PORT_MASK (7<<28) +#define TRANS_DDI_PORT_SHIFT 28 #define TRANS_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_PORT_NONE (0<<28) #define TRANS_DDI_MODE_SELECT_MASK (7<<24) @@ -5740,6 +5815,7 @@ enum punit_power_well { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) +#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) #define TRANS_DDI_BFI_ENABLE (1<<4) /* DisplayPort Transport Control */ @@ -5749,6 +5825,7 @@ enum punit_power_well { #define DP_TP_CTL_ENABLE (1<<31) #define DP_TP_CTL_MODE_SST (0<<27) #define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL_FORCE_ACT (1<<25) #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) @@ -5763,15 +5840,19 @@ enum punit_power_well { #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) -#define DP_TP_STATUS_IDLE_DONE (1<<25) -#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +#define DP_TP_STATUS_IDLE_DONE (1<<25) +#define DP_TP_STATUS_ACT_SENT (1<<24) +#define DP_TP_STATUS_MODE_STATUS_MST (1<<23) +#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) /* DDI Buffer Control */ #define DDI_BUF_CTL_A 0x64000 #define DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1<<31) -/* Haswell */ #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ @@ -5781,16 +5862,6 @@ enum punit_power_well { #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ -/* Broadwell */ -#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ -#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ -#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ -#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ -#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ -#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ -#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ -#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ -#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_PORT_REVERSAL (1<<16) #define DDI_BUF_IS_IDLE (1<<7) @@ -5858,10 +5929,12 @@ enum punit_power_well { /* WRPLL */ #define WRPLL_CTL1 0x46040 #define WRPLL_CTL2 0x46060 +#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2) #define WRPLL_PLL_ENABLE (1<<31) -#define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) -#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) +#define WRPLL_PLL_SSC (1<<28) +#define WRPLL_PLL_NON_SSC (2<<28) +#define WRPLL_PLL_LCPLL (3<<28) +#define WRPLL_PLL_REF_MASK (3<<28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REF_MASK (0xff) @@ -5880,6 +5953,7 @@ enum punit_power_well { #define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29) #define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_NONE (7<<29) @@ -5921,7 +5995,10 @@ enum punit_power_well { #define LCPLL_CD_SOURCE_FCLK (1<<21) #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) -#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, + * since on HSW we can't write to it using I915_WRITE. */ +#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +#define D_COMP_BDW 0x138144 #define D_COMP_RCOMP_IN_PROGRESS (1<<9) #define D_COMP_COMP_FORCE (1<<8) #define D_COMP_COMP_DISABLE (1<<0) @@ -6002,7 +6079,8 @@ enum punit_power_well { #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) -#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) +#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \ + _MIPIB_PORT_CTRL) #define DPI_ENABLE (1 << 31) /* A + B */ #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) @@ -6044,18 +6122,20 @@ enum punit_power_well { #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) -#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) +#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \ + _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) #define TEARING_EFFECT_DELAY_SHIFT 0 #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) /* XXX: all bits reserved */ -#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) +#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) /* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) -#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) -#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) +#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) +#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) +#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \ + _MIPIB_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) #define ULPS_STATE_ENTER (2 << 1) @@ -6063,12 +6143,14 @@ enum punit_power_well { #define ULPS_STATE_NORMAL_OPERATION (0 << 1) #define DEVICE_READY (1 << 0) -#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) -#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) -#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) -#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) -#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) -#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) +#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) +#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) +#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \ + _MIPIB_INTR_STAT) +#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) +#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) +#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \ + _MIPIB_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) #define GEN_READ_DATA_AVAIL (1 << 29) @@ -6102,9 +6184,10 @@ enum punit_power_well { #define RXSOT_SYNC_ERROR (1 << 1) #define RXSOT_ERROR (1 << 0) -#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) -#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) -#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) +#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) +#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) +#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \ + _MIPIB_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) @@ -6125,78 +6208,94 @@ enum punit_power_well { #define DATA_LANES_PRG_REG_SHIFT 0 #define DATA_LANES_PRG_REG_MASK (7 << 0) -#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) -#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) -#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) +#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) +#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) +#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \ + _MIPIB_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) -#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) -#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) +#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) +#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) +#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \ + _MIPIB_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) -#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) -#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) +#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) +#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) +#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \ + _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f -#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) -#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) -#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) +#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) +#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) +#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \ + _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff -#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) -#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) -#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) +#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) +#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) +#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \ + _MIPIB_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 #define HORIZONTAL_ADDRESS_MASK 0xffff -#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) -#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) -#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) +#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) +#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) +#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \ + _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) /* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) -#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) -#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) - -#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) -#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) -#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) - -#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) -#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) -#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) - -#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) -#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) -#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) - -#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) -#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) -#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) - -#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) -#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) -#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) +#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) +#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) +#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) + +#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) +#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) +#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \ + _MIPIB_HBP_COUNT) + +#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) +#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) +#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \ + _MIPIB_HFP_COUNT) + +#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) +#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) +#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) + +#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) +#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) +#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) + +#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) +#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) +#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \ + _MIPIB_VBP_COUNT) + +#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) +#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) +#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \ + _MIPIB_VFP_COUNT) + +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) +#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) +#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) -#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) -#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) -#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) - -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) -#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) -#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) /* regs above are bits 15:0 */ -#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) -#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) -#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) +#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) +#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) +#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \ + _MIPIB_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) #define BACKLIGHT_ON (1 << 4) @@ -6205,27 +6304,31 @@ enum punit_power_well { #define TURN_ON (1 << 1) #define SHUTDOWN (1 << 0) -#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) -#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) -#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) +#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) +#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) +#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \ + _MIPIB_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) -#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) -#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) -#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) +#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) +#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) +#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \ + _MIPIB_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) -#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) -#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) -#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) +#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) +#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) +#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \ + _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) -#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) -#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) -#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) +#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) +#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) +#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \ + _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) #define IP_TG_CONFIG (1 << 2) @@ -6233,9 +6336,10 @@ enum punit_power_well { #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) #define VIDEO_MODE_BURST (3 << 0) -#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) -#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) -#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) +#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) +#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) +#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \ + _MIPIB_EOT_DISABLE) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) @@ -6245,28 +6349,33 @@ enum punit_power_well { #define CLOCKSTOP (1 << 1) #define EOT_DISABLE (1 << 0) -#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) -#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) -#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) +#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) +#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) +#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \ + _MIPIB_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) /* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) -#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) -#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) +#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) +#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) +#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \ + _MIPIB_LP_GEN_DATA) /* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) -#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) -#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) - -#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) -#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) -#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) -#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) -#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) +#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) +#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) +#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \ + _MIPIB_HS_GEN_DATA) + +#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) +#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) +#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \ + _MIPIB_LP_GEN_CTRL) +#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) +#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) +#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \ + _MIPIB_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) #define SHORT_PACKET_PARAM_SHIFT 8 @@ -6277,9 +6386,10 @@ enum punit_power_well { #define DATA_TYPE_MASK (3f << 0) /* data type values, see include/video/mipi_display.h */ -#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) -#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) -#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) +#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) +#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) +#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \ + _MIPIB_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) #define LP_CTRL_FIFO_EMPTY (1 << 26) @@ -6295,16 +6405,18 @@ enum punit_power_well { #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) #define HS_DATA_FIFO_FULL (1 << 0) -#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) -#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) -#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) +#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) +#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) +#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \ + _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) -#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) -#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) -#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) +#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) +#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) +#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \ + _MIPIB_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) #define TRAIL_COUNT_SHIFT 16 @@ -6315,34 +6427,41 @@ enum punit_power_well { #define PREPARE_COUNT_MASK (0x3f << 0) /* bits 31:0 */ -#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) -#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) -#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) - -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) -#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) +#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) +#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) +#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \ + _MIPIB_DBI_BW_CTRL) + +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ + + 0xb088) +#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ + + 0xb888) +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \ + _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) -#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) -#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) -#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) +#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) +#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) +#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \ + _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) -#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) -#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) -#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) -#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) -#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) +#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) +#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) +#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \ + _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) +#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) +#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) +#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \ + _MIPIB_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) +#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) #define DBI_TYPEC_ENABLE (1 << 31) #define DBI_TYPEC_WIP (1 << 30) #define DBI_TYPEC_OPTION_SHIFT 28 @@ -6356,9 +6475,10 @@ enum punit_power_well { /* MIPI adapter registers */ -#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) -#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) -#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) +#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) +#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904) +#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \ + _MIPIB_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) @@ -6370,50 +6490,52 @@ enum punit_power_well { #define READ_REQUEST_PRIORITY_HIGH (3 << 3) #define RGB_FLIP_TO_BGR (1 << 2) -#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) -#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) -#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) +#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) +#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) +#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \ + _MIPIB_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) -#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) -#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) -#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) +#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) +#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) +#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \ + _MIPIB_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) -#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) -#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) -#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) +#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) +#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) +#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \ + _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) #define AUTO_PWG_ENABLE (1 << 2) #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) #define COMMAND_VALID (1 << 0) -#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) -#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) -#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) +#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) +#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) +#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \ + _MIPIB_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) -#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) -#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) -#define MIPI_READ_DATA_RETURN(pipe, n) \ - (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ +#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) +#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) +#define MIPI_READ_DATA_RETURN(tc, n) \ + (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \ + + 4 * (n)) /* n: 0...7 */ -#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) -#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) -#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) +#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) +#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) +#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \ + _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) /* For UMS only (deprecated): */ #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) -#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) -#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) -#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) -#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 5a045d3bd77e..88db4b6b6884 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); } +static void hsw_crt_pre_enable(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); + I915_WRITE(SPLL_CTL, + SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); + POSTING_READ(SPLL_CTL); + udelay(20); +} + /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder) intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); } + +static void hsw_crt_post_disable(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val; + + DRM_DEBUG_KMS("Disabling SPLL\n"); + val = I915_READ(SPLL_CTL); + WARN_ON(!(val & SPLL_PLL_ENABLE)); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); +} + static void intel_enable_crt(struct intel_encoder *encoder) { struct intel_crt *crt = intel_encoder_to_crt(encoder); @@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = 24; /* FDI must always be 2.7 GHz */ - if (HAS_DDI(dev)) + if (HAS_DDI(dev)) { + pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; pipe_config->port_clock = 135000 * 2; + } return true; } @@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev) if (HAS_DDI(dev)) { crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; + crt->base.pre_enable = hsw_crt_pre_enable; + crt->base.post_disable = hsw_crt_post_disable; } else { crt->base.get_config = intel_crt_get_config; crt->base.get_hw_state = intel_crt_get_hw_state; @@ -869,7 +899,7 @@ void intel_crt_init(struct drm_device *dev) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (!I915_HAS_HOTPLUG(dev)) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b17b9c7c769f..9b1542f1cf01 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = { 0x00FFFFFF, 0x00000012, /* eDP parameters */ 0x00EBAFFF, 0x00020011, 0x00C71FFF, 0x0006000F, + 0x00AAAFFF, 0x000E000A, 0x00FFFFFF, 0x00020011, 0x00DB6FFF, 0x0005000F, 0x00BEEFFF, 0x000A000C, 0x00FFFFFF, 0x0005000F, 0x00DB6FFF, 0x000A000C, - 0x00FFFFFF, 0x000A000C, 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; @@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = { 0x00FFFFFF, 0x0007000E, /* DP parameters */ 0x00D75FFF, 0x000E000A, 0x00BEFFFF, 0x00140006, + 0x80B2CFFF, 0x001B0002, 0x00FFFFFF, 0x000E000A, 0x00D75FFF, 0x00180004, 0x80CB2FFF, 0x001B0002, 0x00F7DFFF, 0x00180004, 0x80D75FFF, 0x001B0002, - 0x80FFFFFF, 0x001B0002, 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; @@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || + if (type == INTEL_OUTPUT_DP_MST) { + struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary; + return intel_dig_port->port; + } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); /* Configure Port Clock Select */ - I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); + I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel); + WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL); /* Start the training iterating through available voltages and emphasis, * testing each value twice. */ @@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI link training failed!\n"); } +void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); + + intel_dp->DP = intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; + intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); + +} + static struct intel_encoder * intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) { @@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) return ret; } -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t val; - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - plls->spll_refcount--; - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Disabling SPLL\n"); - val = I915_READ(SPLL_CTL); - WARN_ON(!(val & SPLL_PLL_ENABLE)); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); - } - break; - case PORT_CLK_SEL_WRPLL1: - plls->wrpll1_refcount--; - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 1\n"); - val = I915_READ(WRPLL_CTL1); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL1); - } - break; - case PORT_CLK_SEL_WRPLL2: - plls->wrpll2_refcount--; - if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 2\n"); - val = I915_READ(WRPLL_CTL2); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL2); - } - break; - } - - WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); - WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); - WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); - - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; -} - #define LC_FREQ 2700 #define LC_FREQ_2K (LC_FREQ * 2000) @@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, u32 wrpll; wrpll = I915_READ(reg); - switch (wrpll & SPLL_PLL_REF_MASK) { - case SPLL_PLL_SSC: - case SPLL_PLL_NON_SSC: + switch (wrpll & WRPLL_PLL_REF_MASK) { + case WRPLL_PLL_SSC: + case WRPLL_PLL_NON_SSC: /* * We could calculate spread here, but our checking * code only cares about 5% accuracy, and spread is a max of @@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, */ refclk = 135; break; - case SPLL_PLL_LCPLL: + case WRPLL_PLL_LCPLL: refclk = LC_FREQ; break; default: @@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -static void intel_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - enum port port = intel_ddi_get_encoder_port(encoder); int link_clock = 0; u32 val, pll; - val = I915_READ(PORT_CLK_SEL(port)); + val = pipe_config->ddi_pll_sel; switch (val & PORT_CLK_SEL_MASK) { case PORT_CLK_SEL_LCPLL_810: link_clock = 81000; @@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; int type = intel_encoder->type; - enum pipe pipe = intel_crtc->pipe; int clock = intel_crtc->config.port_clock; - intel_ddi_put_crtc_pll(crtc); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - switch (intel_dp->link_bw) { - case DP_LINK_BW_1_62: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; - break; - case DP_LINK_BW_2_7: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; - break; - case DP_LINK_BW_5_4: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; - break; - default: - DRM_ERROR("Link bandwidth %d unsupported\n", - intel_dp->link_bw); - return false; - } + intel_put_shared_dpll(intel_crtc); - } else if (type == INTEL_OUTPUT_HDMI) { - uint32_t reg, val; + if (type == INTEL_OUTPUT_HDMI) { + struct intel_shared_dpll *pll; + uint32_t val; unsigned p, n2, r2; intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); - val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | + val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); - if (val == I915_READ(WRPLL_CTL1)) { - DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL1; - } else if (val == I915_READ(WRPLL_CTL2)) { - DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL2; - } else if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL1; - } else if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL2; - } else { - DRM_ERROR("No WRPLLs available!\n"); - return false; - } - - DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", - clock, p, n2, r2); - - if (reg == WRPLL_CTL1) { - plls->wrpll1_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; - } else { - plls->wrpll2_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; - } + intel_crtc->config.dpll_hw_state.wrpll = val; - } else if (type == INTEL_OUTPUT_ANALOG) { - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Using SPLL on pipe %c\n", - pipe_name(pipe)); - plls->spll_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; - } else { - DRM_ERROR("SPLL already in use\n"); + pll = intel_get_shared_dpll(intel_crtc); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(intel_crtc->pipe)); return false; } - } else { - WARN(1, "Invalid DDI encoder type %d\n", type); - return false; + intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); } return true; } -/* - * To be called after intel_ddi_pll_select(). That one selects the PLL to be - * used, this one actually enables the PLL. - */ -void intel_ddi_pll_enable(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - int clock = crtc->config.port_clock; - uint32_t reg, cur_val, new_val; - int refcount; - const char *pll_name; - uint32_t enable_bit = (1 << 31); - unsigned int p, n2, r2; - - BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE); - BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE); - - switch (crtc->ddi_pll_sel) { - case PORT_CLK_SEL_LCPLL_2700: - case PORT_CLK_SEL_LCPLL_1350: - case PORT_CLK_SEL_LCPLL_810: - /* - * LCPLL should always be enabled at this point of the mode set - * sequence, so nothing to do. - */ - return; - - case PORT_CLK_SEL_SPLL: - pll_name = "SPLL"; - reg = SPLL_CTL; - refcount = plls->spll_refcount; - new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | - SPLL_PLL_SSC; - break; - - case PORT_CLK_SEL_WRPLL1: - case PORT_CLK_SEL_WRPLL2: - if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) { - pll_name = "WRPLL1"; - reg = WRPLL_CTL1; - refcount = plls->wrpll1_refcount; - } else { - pll_name = "WRPLL2"; - reg = WRPLL_CTL2; - refcount = plls->wrpll2_refcount; - } - - intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); - - new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | - WRPLL_DIVIDER_REFERENCE(r2) | - WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); - - break; - - case PORT_CLK_SEL_NONE: - WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n"); - return; - default: - WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel); - return; - } - - cur_val = I915_READ(reg); - - WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount); - if (refcount == 1) { - WARN(cur_val & enable_bit, "%s already enabled\n", pll_name); - I915_WRITE(reg, new_val); - POSTING_READ(reg); - udelay(20); - } else { - WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name); - } -} - void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; @@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) int type = intel_encoder->type; uint32_t temp; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { temp = TRANS_MSA_SYNC_CLK; switch (intel_crtc->config.pipe_bpp) { case 18: @@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) } } +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + uint32_t temp; + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (state == true) + temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + else + temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) * eDP when not using the panel fitter, and when not * using motion blur mitigation (which we don't * support). */ - if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) + if (IS_HASWELL(dev) && + (intel_crtc->config.pch_pfit.enabled || + intel_crtc->config.pch_pfit.force_thru)) temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; else temp |= TRANS_DDI_EDP_INPUT_A_ON; @@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - temp |= TRANS_DDI_MODE_SELECT_DP_SST; + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; + + temp |= DDI_PORT_WIDTH(intel_dp->lane_count); + } else if (type == INTEL_OUTPUT_DP_MST) { + struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; + + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(intel_dp->lane_count); } else { @@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); } @@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) case TRANS_DDI_MODE_SELECT_DP_SST: if (type == DRM_MODE_CONNECTOR_eDP) return true; - case TRANS_DDI_MODE_SELECT_DP_MST: return (type == DRM_MODE_CONNECTOR_DisplayPort); + case TRANS_DDI_MODE_SELECT_DP_MST: + /* if the transcoder is in MST state then + * connector isn't connected */ + return false; case TRANS_DDI_MODE_SELECT_FDI: return (type == DRM_MODE_CONNECTOR_VGA); @@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) + return false; + *pipe = i; return true; } @@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, return false; } -static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t temp, ret; - enum port port = I915_MAX_PORTS; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - int i; - - if (cpu_transcoder == TRANSCODER_EDP) { - port = PORT_A; - } else { - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - temp &= TRANS_DDI_PORT_MASK; - - for (i = PORT_B; i <= PORT_E; i++) - if (temp == TRANS_DDI_SELECT_PORT(i)) - port = i; - } - - if (port == I915_MAX_PORTS) { - WARN(1, "Pipe %c enabled on an unknown port\n", - pipe_name(pipe)); - ret = PORT_CLK_SEL_NONE; - } else { - ret = I915_READ(PORT_CLK_SEL(port)); - DRM_DEBUG_KMS("Pipe %c connected to port %c using clock " - "0x%08x\n", pipe_name(pipe), port_name(port), - ret); - } - - return ret; -} - -void intel_ddi_setup_hw_pll_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *intel_crtc; - - dev_priv->ddi_plls.spll_refcount = 0; - dev_priv->ddi_plls.wrpll1_refcount = 0; - dev_priv->ddi_plls.wrpll2_refcount = 0; - - for_each_pipe(pipe) { - intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (!intel_crtc->active) { - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; - continue; - } - - intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, - pipe); - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - dev_priv->ddi_plls.spll_refcount++; - break; - case PORT_CLK_SEL_WRPLL1: - dev_priv->ddi_plls.wrpll1_refcount++; - break; - case PORT_CLK_SEL_WRPLL2: - dev_priv->ddi_plls.wrpll2_refcount++; - break; - } - } -} - void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; @@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_edp_panel_on(intel_dp); } - WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel); + WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); + I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->saved_port_bits | - DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; - intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); + intel_ddi_init_dp_buf_reg(intel_encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); @@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) } } +static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll); + POSTING_READ(WRPLL_CTL(pll->id)); + udelay(20); +} + +static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(WRPLL_CTL(pll->id)); + I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL(pll->id)); +} + +static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(WRPLL_CTL(pll->id)); + hw_state->wrpll = val; + + return val & WRPLL_PLL_ENABLE; +} + +static char *hsw_ddi_pll_names[] = { + "WRPLL 1", + "WRPLL 2", +}; + void intel_ddi_pll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t val = I915_READ(LCPLL_CTL); + int i; + + dev_priv->num_shared_dpll = 2; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + dev_priv->shared_dplls[i].id = i; + dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; + dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; + dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; + dev_priv->shared_dplls[i].get_hw_state = + hsw_ddi_pll_get_hw_state; + } /* The LCPLL register should be turned on by the BIOS. For now let's * just check its state and print errors in case something is wrong. @@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) intel_wait_ddi_buf_idle(dev_priv, port); } - val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | + val = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + if (intel_dp->is_mst) + val |= DP_TP_CTL_MODE_MST; + else { + val |= DP_TP_CTL_MODE_SST; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } I915_WRITE(DP_TP_CTL(port), val); POSTING_READ(DP_TP_CTL(port)); @@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - int type = intel_encoder->type; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base); + int type = intel_dig_port->base.type; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) - intel_dp_check_link_status(intel_dp); + if (type != INTEL_OUTPUT_DISPLAYPORT && + type != INTEL_OUTPUT_EDP && + type != INTEL_OUTPUT_UNKNOWN) { + return; + } + + intel_dp_hot_plug(intel_encoder); } void intel_ddi_get_config(struct intel_encoder *encoder, @@ -1705,6 +1557,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_ddi_hot_plug; + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + if (init_dp) dp_connector = intel_ddi_init_dp_connector(intel_dig_port); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5f285fba4e41..7b542b477a4e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -39,12 +39,45 @@ #include "i915_trace.h" #include <drm/drm_dp_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_rect.h> #include <linux/dma_remapping.h> +/* Primary plane formats supported by all gen */ +#define COMMON_PRIMARY_FORMATS \ + DRM_FORMAT_C8, \ + DRM_FORMAT_RGB565, \ + DRM_FORMAT_XRGB8888, \ + DRM_FORMAT_ARGB8888 + +/* Primary plane formats for gen <= 3 */ +static const uint32_t intel_primary_formats_gen2[] = { + COMMON_PRIMARY_FORMATS, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_ARGB1555, +}; + +/* Primary plane formats for gen >= 4 */ +static const uint32_t intel_primary_formats_gen4[] = { + COMMON_PRIMARY_FORMATS, \ + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, +}; + +/* Cursor formats */ +static const uint32_t intel_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + #define DIV_ROUND_CLOSEST_ULL(ll, d) \ - ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) +({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) -static void intel_increase_pllclock(struct drm_crtc *crtc); +static void intel_increase_pllclock(struct drm_device *dev, + enum pipe pipe); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); static void i9xx_crtc_clock_get(struct intel_crtc *crtc, @@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc); static void intel_set_pipe_csc(struct drm_crtc *crtc); static void vlv_prepare_pll(struct intel_crtc *crtc); +static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) +{ + if (!connector->mst_port) + return connector->encoder; + else + return &connector->mst_port->mst_encoders[pipe]->base; +} + typedef struct { int min, max; } intel_range_t; @@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (HAS_PCH_LPT(dev_priv->dev)) { - DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); - return; - } - if (WARN (!pll, "asserting DPLL %s with no DPLL\n", state_string(state))) return; @@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!IS_VALLEYVIEW(dev)) - return; - if (IS_CHERRYVIEW(dev)) { enum dpio_phy phy; u32 val; @@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev) I915_WRITE(DISPLAY_PHY_CONTROL, PHY_COM_LANE_RESET_DEASSERT(phy, val)); } - - } else { - /* - * If DPIO has already been reset, e.g. by BIOS, just skip all - * this. - */ - if (I915_READ(DPIO_CTL) & DPIO_CMNRST) - return; - - /* - * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: - * Need to assert and de-assert PHY SB reset by gating the - * common lane power, then un-gating it. - * Simply ungating isn't enough to reset the PHY enough to get - * ports and lanes running. - */ - __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC, - false); - __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC, - true); } } @@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) val &= ~DPIO_DCLKP_EN; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); + /* disable left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + mutex_unlock(&dev_priv->dpio_lock); } @@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); + if (WARN_ON(pll == NULL)) + return; + WARN_ON(!pll->refcount); if (pll->active == 0) { DRM_DEBUG_DRIVER("setting up %s\n", pll->name); @@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc) } WARN_ON(pll->on); + intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); + DRM_DEBUG_KMS("enabling %s\n", pll->name); pll->enable(dev_priv, pll); pll->on = true; } -static void intel_disable_shared_dpll(struct intel_crtc *crtc) +void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc) DRM_DEBUG_KMS("disabling %s\n", pll->name); pll->disable(dev_priv, pll); pll->on = false; + + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); } static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, @@ -2087,6 +2118,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv, static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { + struct drm_device *dev = dev_priv->dev; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); int reg; @@ -2106,6 +2138,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); intel_flush_primary_plane(dev_priv, plane); + + /* + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( + */ + if (IS_BROADWELL(dev)) + intel_wait_for_vblank(dev, intel_crtc->pipe); } /** @@ -2163,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, u32 alignment; int ret; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) @@ -2219,6 +2261,8 @@ err_interruptible: void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) { + WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); + i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_from_display_plane(obj); } @@ -2305,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc, goto out_unref_obj; } + obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); mutex_unlock(&dev->struct_mutex); DRM_DEBUG_KMS("plane fb obj %p\n", obj); @@ -2322,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, struct drm_device *dev = intel_crtc->base.dev; struct drm_crtc *c; struct intel_crtc *i; - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; if (!intel_crtc->base.primary->fb) return; @@ -2343,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, if (c == &intel_crtc->base) continue; - if (!i->active || !c->primary->fb) + if (!i->active) + continue; + + obj = intel_fb_obj(c->primary->fb); + if (obj == NULL) continue; - fb = to_intel_framebuffer(c->primary->fb); - if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { + if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { drm_framebuffer_reference(c->primary->fb); intel_crtc->base.primary->fb = c->primary->fb; + obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); break; } } @@ -2362,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; u32 reg; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ @@ -2452,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; u32 reg; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ @@ -2537,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - intel_increase_pllclock(crtc); + intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); dev_priv->display.update_primary_plane(crtc, fb, x, y); @@ -2592,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev) static int intel_finish_fb(struct drm_framebuffer *old_fb) { - struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); struct drm_i915_private *dev_priv = obj->base.dev->dev_private; bool was_interruptible = dev_priv->mm.interruptible; int ret; @@ -2638,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_framebuffer *old_fb; + enum pipe pipe = intel_crtc->pipe; + struct drm_framebuffer *old_fb = crtc->primary->fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); int ret; if (intel_crtc_has_pending_flip(crtc)) { @@ -2660,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(fb)->obj, - NULL); + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret == 0) + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); if (ret != 0) { DRM_ERROR("pin & fence failed\n"); @@ -2702,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, dev_priv->display.update_primary_plane(crtc, fb, x, y); - old_fb = crtc->primary->fb; + if (intel_crtc->active) + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + crtc->primary->fb = fb; crtc->x = x; crtc->y = y; @@ -2711,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (intel_crtc->active && old_fb != fb) intel_wait_for_vblank(dev, intel_crtc->pipe); mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + intel_unpin_fb_obj(old_obj); mutex_unlock(&dev->struct_mutex); } mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); return 0; @@ -3578,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void intel_put_shared_dpll(struct intel_crtc *crtc) +void intel_put_shared_dpll(struct intel_crtc *crtc) { struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3598,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc) crtc->config.shared_dpll = DPLL_ID_PRIVATE; } -static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3851,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) */ } -/** - * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware - * cursor plane briefly if not already running after enabling the display - * plane. - * This workaround avoids occasional blank screens when self refresh is - * enabled. - */ -static void -g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - u32 cntl = I915_READ(CURCNTR(pipe)); - - if ((cntl & CURSOR_MODE) == 0) { - u32 fw_bcl_self = I915_READ(FW_BLC_SELF); - - I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); - I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); - intel_wait_for_vblank(dev_priv->dev, pipe); - I915_WRITE(CURCNTR(pipe), cntl); - I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); - I915_WRITE(FW_BLC_SELF, fw_bcl_self); - } -} - static void intel_crtc_enable_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3883,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + drm_vblank_on(dev, pipe); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); - /* The fixup needs to happen before cursor is enabled */ - if (IS_G4X(dev)) - g4x_fixup_plane(dev_priv, pipe); intel_crtc_update_cursor(crtc, true); intel_crtc_dpms_overlay(intel_crtc, true); @@ -3895,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); + + /* + * FIXME: Once we grow proper nuclear flip support out of this we need + * to compute the mask of flip planes precisely. For the time being + * consider this a flip from a NULL plane. + */ + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); } static void intel_crtc_disable_planes(struct drm_crtc *crtc) @@ -3908,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) int plane = intel_crtc->plane; intel_crtc_wait_for_pending_flips(crtc); - drm_crtc_vblank_off(crtc); if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); @@ -3919,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); intel_disable_primary_hw_plane(dev_priv, plane, pipe); + + /* + * FIXME: Once we grow proper nuclear flip support out of this we need + * to compute the mask of flip planes precisely. For the time being + * consider this a flip to a NULL plane. + */ + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); + + drm_vblank_off(dev, pipe); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -3997,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) cpt_verify_modeset(dev, intel_crtc->pipe); intel_crtc_enable_planes(crtc); - - drm_crtc_vblank_on(crtc); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -4050,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->active) return; + if (intel_crtc_to_shared_dpll(intel_crtc)) + intel_enable_shared_dpll(intel_crtc); + if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); @@ -4074,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); - if (intel_crtc->config.has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); - - if (intel_crtc->config.has_pch_encoder) - dev_priv->display.fdi_link_train(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); + if (intel_crtc->config.has_pch_encoder) { + intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); + dev_priv->display.fdi_link_train(crtc); + } + intel_ddi_enable_pipe_clock(intel_crtc); ironlake_pfit_enable(intel_crtc); @@ -4103,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) lpt_pch_enable(crtc); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, true); + for_each_encoder_on_crtc(dev, crtc, encoder) { encoder->enable(encoder); intel_opregion_notify_encoder(encoder, true); @@ -4112,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) * to change the workaround. */ haswell_mode_set_planes_workaround(intel_crtc); intel_crtc_enable_planes(crtc); - - drm_crtc_vblank_on(crtc); } static void ironlake_pfit_disable(struct intel_crtc *crtc) @@ -4153,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_disable_pipe(dev_priv, pipe); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, false); + ironlake_pfit_disable(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4191,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); } @@ -4224,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_ddi_disable_pipe_clock(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - if (intel_crtc->config.has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); intel_ddi_fdi_disable(crtc); } + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->post_disable) + encoder->post_disable(encoder); + intel_crtc->active = false; intel_update_watermarks(crtc); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); + + if (intel_crtc_to_shared_dpll(intel_crtc)) + intel_disable_shared_dpll(intel_crtc); } static void ironlake_crtc_off(struct drm_crtc *crtc) @@ -4249,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc) intel_put_shared_dpll(intel_crtc); } -static void haswell_crtc_off(struct drm_crtc *crtc) -{ - intel_ddi_put_crtc_pll(crtc); -} static void i9xx_pfit_enable(struct intel_crtc *crtc) { @@ -4278,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) I915_WRITE(BCLRPAT(crtc->pipe), 0); } +static enum intel_display_power_domain port_to_power_domain(enum port port) +{ + switch (port) { + case PORT_A: + return POWER_DOMAIN_PORT_DDI_A_4_LANES; + case PORT_B: + return POWER_DOMAIN_PORT_DDI_B_4_LANES; + case PORT_C: + return POWER_DOMAIN_PORT_DDI_C_4_LANES; + case PORT_D: + return POWER_DOMAIN_PORT_DDI_D_4_LANES; + default: + WARN_ON_ONCE(1); + return POWER_DOMAIN_PORT_OTHER; + } +} + #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ if ((1 << (domain)) & (mask)) @@ -4296,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); - switch (intel_dig_port->port) { - case PORT_A: - return POWER_DOMAIN_PORT_DDI_A_4_LANES; - case PORT_B: - return POWER_DOMAIN_PORT_DDI_B_4_LANES; - case PORT_C: - return POWER_DOMAIN_PORT_DDI_C_4_LANES; - case PORT_D: - return POWER_DOMAIN_PORT_DDI_D_4_LANES; - default: - WARN_ON_ONCE(1); - return POWER_DOMAIN_PORT_OTHER; - } + return port_to_power_domain(intel_dig_port->port); + case INTEL_OUTPUT_DP_MST: + intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; + return port_to_power_domain(intel_dig_port->port); case INTEL_OUTPUT_ANALOG: return POWER_DOMAIN_PORT_CRT; case INTEL_OUTPUT_DSI: @@ -4324,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) struct intel_encoder *intel_encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; - bool pfit_enabled = intel_crtc->config.pch_pfit.enabled; unsigned long mask; enum transcoder transcoder; @@ -4332,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) mask = BIT(POWER_DOMAIN_PIPE(pipe)); mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); - if (pfit_enabled) + if (intel_crtc->config.pch_pfit.enabled || + intel_crtc->config.pch_pfit.force_thru) mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); for_each_encoder_on_crtc(dev, crtc, intel_encoder) @@ -4389,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) intel_display_set_init_power(dev_priv, false); } -int valleyview_get_vco(struct drm_i915_private *dev_priv) +/* returns HPLL frequency in kHz */ +static int valleyview_get_vco(struct drm_i915_private *dev_priv) { int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; @@ -4399,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv) CCK_FUSE_HPLL_FREQ_MASK; mutex_unlock(&dev_priv->dpio_lock); - return vco_freq[hpll_freq]; + return vco_freq[hpll_freq] * 1000; +} + +static void vlv_update_cdclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", + dev_priv->vlv_cdclk_freq); + + /* + * Program the gmbus_freq based on the cdclk frequency. + * BSpec erroneously claims we should aim for 4MHz, but + * in fact 1MHz is the correct frequency. + */ + I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq); } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -4408,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) struct drm_i915_private *dev_priv = dev->dev_private; u32 val, cmd; - WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq); - dev_priv->vlv_cdclk_freq = cdclk; + WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); - if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ + if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ cmd = 2; - else if (cdclk == 266) + else if (cdclk == 266667) cmd = 1; else cmd = 0; @@ -4430,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) } mutex_unlock(&dev_priv->rps.hw_lock); - if (cdclk == 400) { + if (cdclk == 400000) { u32 divider, vco; vco = valleyview_get_vco(dev_priv); - divider = ((vco << 1) / cdclk) - 1; + divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1; mutex_lock(&dev_priv->dpio_lock); /* adjust cdclk divider */ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); - val &= ~0xf; + val &= ~DISPLAY_FREQUENCY_VALUES; val |= divider; vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); + + if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & + DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), + 50)) + DRM_ERROR("timed out waiting for CDclk change\n"); mutex_unlock(&dev_priv->dpio_lock); } @@ -4454,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) * For high bandwidth configs, we set a higher latency in the bunit * so that the core display fetch happens in time to avoid underruns. */ - if (cdclk == 400) + if (cdclk == 400000) val |= 4500 / 250; /* 4.5 usec */ else val |= 3000 / 250; /* 3.0 usec */ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); mutex_unlock(&dev_priv->dpio_lock); - /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ - intel_i2c_reset(dev); -} - -int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) -{ - int cur_cdclk, vco; - int divider; - - vco = valleyview_get_vco(dev_priv); - - mutex_lock(&dev_priv->dpio_lock); - divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); - mutex_unlock(&dev_priv->dpio_lock); - - divider &= 0xf; - - cur_cdclk = (vco << 1) / (divider + 1); - - return cur_cdclk; + vlv_update_cdclk(dev); } static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, int max_pixclk) { + int vco = valleyview_get_vco(dev_priv); + int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; + /* * Really only a few cases to deal with, as only 4 CDclks are supported: * 200MHz * 267MHz - * 320MHz + * 320/333MHz (depends on HPLL freq) * 400MHz * So we check to see whether we're above 90% of the lower bin and * adjust if needed. + * + * We seem to get an unstable or solid color picture at 200MHz. + * Not sure what's wrong. For now use 200MHz only when all pipes + * are off. */ - if (max_pixclk > 288000) { - return 400; - } else if (max_pixclk > 240000) { - return 320; - } else - return 266; - /* Looks like the 200MHz CDclk freq doesn't work on some configs */ + if (max_pixclk > freq_320*9/10) + return 400000; + else if (max_pixclk > 266667*9/10) + return freq_320; + else if (max_pixclk > 0) + return 266667; + else + return 200000; } /* compute the max pixel clock for new configuration */ @@ -4624,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_enable_planes(crtc); - drm_crtc_vblank_on(crtc); - /* Underruns don't raise interrupts, so check manually. */ i9xx_check_fifo_underruns(dev); } @@ -4718,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) if (IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); - drm_crtc_vblank_on(crtc); - /* Underruns don't raise interrupts, so check manually. */ i9xx_check_fifo_underruns(dev); } @@ -4759,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); + /* + * Vblank time updates from the shadow to live plane control register + * are blocked if the memory self-refresh mode is active at that + * moment. So to make sure the plane gets truly disabled, disable + * first the self-refresh mode. The self-refresh enable bit in turn + * will be checked/applied by the HW only at the next frame start + * event which is after the vblank start event, so we need to have a + * wait-for-vblank between disabling the plane and the pipe. + */ + intel_set_memory_cxsr(dev_priv, false); intel_crtc_disable_planes(crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4767,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. + * We also need to wait on all gmch platforms because of the + * self-refresh mode constraint explained above. */ - if (IS_GEN2(dev)) - intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev, pipe); intel_disable_pipe(dev_priv, pipe); @@ -4796,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); } @@ -4841,16 +4901,43 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder; + enum intel_display_power_domain domain; + unsigned long domains; bool enable = false; for_each_encoder_on_crtc(dev, crtc, intel_encoder) enable |= intel_encoder->connectors_active; - if (enable) - dev_priv->display.crtc_enable(crtc); - else - dev_priv->display.crtc_disable(crtc); + if (enable) { + if (!intel_crtc->active) { + /* + * FIXME: DDI plls and relevant code isn't converted + * yet, so do runtime PM for DPMS only for all other + * platforms for now. + */ + if (!HAS_DDI(dev)) { + domains = get_crtc_power_domains(crtc); + for_each_power_domain(domain, domains) + intel_display_power_get(dev_priv, domain); + intel_crtc->enabled_power_domains = domains; + } + + dev_priv->display.crtc_enable(crtc); + } + } else { + if (intel_crtc->active) { + dev_priv->display.crtc_disable(crtc); + + if (!HAS_DDI(dev)) { + domains = intel_crtc->enabled_power_domains; + for_each_power_domain(domain, domains) + intel_display_power_put(dev_priv, domain); + intel_crtc->enabled_power_domains = 0; + } + } + } intel_crtc_update_sarea(crtc, enable); } @@ -4860,6 +4947,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb); + enum pipe pipe = to_intel_crtc(crtc)->pipe; /* crtc should still be enabled when we disable it. */ WARN_ON(!crtc->enabled); @@ -4869,12 +4958,14 @@ static void intel_crtc_disable(struct drm_crtc *crtc) dev_priv->display.off(crtc); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); - assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); - assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); + assert_cursor_disabled(dev_priv, pipe); + assert_pipe_disabled(dev->dev_private, pipe); if (crtc->primary->fb) { mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); + intel_unpin_fb_obj(old_obj); + i915_gem_track_fb(old_obj, NULL, + INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); crtc->primary->fb = NULL; } @@ -4930,24 +5021,31 @@ static void intel_connector_check_state(struct intel_connector *connector) connector->base.base.id, connector->base.name); + /* there is no real hw state for MST connectors */ + if (connector->mst_port) + return; + WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, "wrong connector dpms state\n"); WARN(connector->base.encoder != &encoder->base, "active connector not linked to encoder\n"); - WARN(!encoder->connectors_active, - "encoder->connectors_active not set\n"); - encoder_enabled = encoder->get_hw_state(encoder, &pipe); - WARN(!encoder_enabled, "encoder not enabled\n"); - if (WARN_ON(!encoder->base.crtc)) - return; + if (encoder) { + WARN(!encoder->connectors_active, + "encoder->connectors_active not set\n"); + + encoder_enabled = encoder->get_hw_state(encoder, &pipe); + WARN(!encoder_enabled, "encoder not enabled\n"); + if (WARN_ON(!encoder->base.crtc)) + return; - crtc = encoder->base.crtc; + crtc = encoder->base.crtc; - WARN(!crtc->enabled, "crtc not enabled\n"); - WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); - WARN(pipe != to_intel_crtc(crtc)->pipe, - "encoder active on the wrong pipe\n"); + WARN(!crtc->enabled, "crtc not enabled\n"); + WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); + WARN(pipe != to_intel_crtc(crtc)->pipe, + "encoder active on the wrong pipe\n"); + } } } @@ -5152,9 +5250,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, if (HAS_IPS(dev)) hsw_compute_ips_config(crtc, pipe_config); - /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old - * clock survives for now. */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + /* + * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the + * old clock survives for now. + */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev)) pipe_config->shared_dpll = crtc->config.shared_dpll; if (pipe_config->has_pch_encoder) @@ -5165,7 +5265,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int valleyview_get_display_clock_speed(struct drm_device *dev) { - return 400000; /* FIXME */ + struct drm_i915_private *dev_priv = dev->dev_private; + int vco = valleyview_get_vco(dev_priv); + u32 val; + int divider; + + mutex_lock(&dev_priv->dpio_lock); + val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); + mutex_unlock(&dev_priv->dpio_lock); + + divider = val & DISPLAY_FREQUENCY_VALUES; + + WARN((val & DISPLAY_FREQUENCY_STATUS) != + (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), + "cdclk change in progress\n"); + + return DIV_ROUND_CLOSEST(vco << 1, divider + 1); } static int i945_get_display_clock_speed(struct drm_device *dev) @@ -6116,8 +6231,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height, PAGE_SIZE); + plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height); DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7136,8 +7251,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height, PAGE_SIZE); + plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height); DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7154,6 +7269,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; + if (!intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) + return false; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -7228,7 +7347,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) @@ -7236,9 +7354,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) pipe_name(crtc->pipe)); WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); - WARN(plls->spll_refcount, "SPLL enabled\n"); - WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); - WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); + WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); + WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); + WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); @@ -7259,6 +7377,16 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); } +static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + if (IS_HASWELL(dev)) + return I915_READ(D_COMP_HSW); + else + return I915_READ(D_COMP_BDW); +} + static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { struct drm_device *dev = dev_priv->dev; @@ -7267,12 +7395,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) mutex_lock(&dev_priv->rps.hw_lock); if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) - DRM_ERROR("Failed to disable D_COMP\n"); + DRM_ERROR("Failed to write to D_COMP\n"); mutex_unlock(&dev_priv->rps.hw_lock); } else { - I915_WRITE(D_COMP, val); + I915_WRITE(D_COMP_BDW, val); + POSTING_READ(D_COMP_BDW); } - POSTING_READ(D_COMP); } /* @@ -7310,12 +7438,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) DRM_ERROR("LCPLL still locked\n"); - val = I915_READ(D_COMP); + val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); ndelay(100); - if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, + 1)) DRM_ERROR("D_COMP RCOMP still in progress\n"); if (allow_power_down) { @@ -7364,7 +7493,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) POSTING_READ(LCPLL_CTL); } - val = I915_READ(D_COMP); + val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_FORCE; val &= ~D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); @@ -7470,13 +7599,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, if (!intel_ddi_pll_select(intel_crtc)) return -EINVAL; - intel_ddi_pll_enable(intel_crtc); intel_crtc->lowfreq_avail = false; return 0; } +static void haswell_get_ddi_port_state(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_shared_dpll *pll; + enum port port; + uint32_t tmp; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); + + port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; + + pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + + switch (pipe_config->ddi_pll_sel) { + case PORT_CLK_SEL_WRPLL1: + pipe_config->shared_dpll = DPLL_ID_WRPLL1; + break; + case PORT_CLK_SEL_WRPLL2: + pipe_config->shared_dpll = DPLL_ID_WRPLL2; + break; + } + + if (pipe_config->shared_dpll >= 0) { + pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; + + WARN_ON(!pll->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); + } + + /* + * Haswell has only FDI/PCH transcoder A. It is which is connected to + * DDI E. So just check whether this pipe is wired to DDI E and whether + * the PCH transcoder is on. + */ + if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { + pipe_config->has_pch_encoder = true; + + tmp = I915_READ(FDI_RX_CTL(PIPE_A)); + pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ironlake_get_fdi_m_n_config(crtc, pipe_config); + } +} + static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -7522,22 +7697,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; - /* - * Haswell has only FDI/PCH transcoder A. It is which is connected to - * DDI E. So just check whether this pipe is wired to DDI E and whether - * the PCH transcoder is on. - */ - tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); - if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && - I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { - pipe_config->has_pch_encoder = true; - - tmp = I915_READ(FDI_RX_CTL(PIPE_A)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ironlake_get_fdi_m_n_config(crtc, pipe_config); - } + haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); @@ -7982,8 +8142,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int x = intel_crtc->cursor_x; - int y = intel_crtc->cursor_y; + int x = crtc->cursor_x; + int y = crtc->cursor_y; u32 base = 0, pos = 0; if (on) @@ -8027,21 +8187,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, intel_crtc->cursor_base = base; } -static int intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file, - uint32_t handle, - uint32_t width, uint32_t height) +/* + * intel_crtc_cursor_set_obj - Set cursor to specified GEM object + * + * Note that the object's reference will be consumed if the update fails. If + * the update succeeds, the reference of the old object (if any) will be + * consumed. + */ +static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, + struct drm_i915_gem_object *obj, + uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj; + enum pipe pipe = intel_crtc->pipe; unsigned old_width; uint32_t addr; int ret; /* if we want to turn off the cursor ignore width and height */ - if (!handle) { + if (!obj) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; obj = NULL; @@ -8057,12 +8223,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); - if (&obj->base == NULL) - return -ENOENT; - if (obj->base.size < width * height * 4) { - DRM_DEBUG_KMS("buffer is to small\n"); + DRM_DEBUG_KMS("buffer is too small\n"); ret = -ENOMEM; goto fail; } @@ -8117,9 +8279,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (intel_crtc->cursor_bo) { if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); - drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } + i915_gem_track_fb(intel_crtc->cursor_bo, obj, + INTEL_FRONTBUFFER_CURSOR(pipe)); mutex_unlock(&dev->struct_mutex); old_width = intel_crtc->cursor_width; @@ -8135,6 +8298,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); } + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); + return 0; fail_unpin: i915_gem_object_unpin_from_display_plane(obj); @@ -8145,19 +8310,6 @@ fail: return ret; } -static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); - intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); - - if (intel_crtc->active) - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); - - return 0; -} - static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { @@ -8233,7 +8385,7 @@ static u32 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) { u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); - return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); + return PAGE_ALIGN(pitch * mode->vdisplay); } static struct drm_framebuffer * @@ -8658,12 +8810,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -static void intel_increase_pllclock(struct drm_crtc *crtc) +static void intel_increase_pllclock(struct drm_device *dev, + enum pipe pipe) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); int dpll; @@ -8764,28 +8914,179 @@ out: intel_runtime_pm_put(dev_priv); } -void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring) + +/** + * intel_mark_fb_busy - mark given planes as busy + * @dev: DRM device + * @frontbuffer_bits: bits for the affected planes + * @ring: optional ring for asynchronous commands + * + * This function gets called every time the screen contents change. It can be + * used to keep e.g. the update rate at the nominal refresh rate with DRRS. + */ +static void intel_mark_fb_busy(struct drm_device *dev, + unsigned frontbuffer_bits, + struct intel_engine_cs *ring) { - struct drm_device *dev = obj->base.dev; - struct drm_crtc *crtc; + enum pipe pipe; if (!i915.powersave) return; - for_each_crtc(dev, crtc) { - if (!crtc->primary->fb) - continue; - - if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) + for_each_pipe(pipe) { + if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) continue; - intel_increase_pllclock(crtc); + intel_increase_pllclock(dev, pipe); if (ring && intel_fbc_enabled(dev)) ring->fbc_dirty = true; } } +/** + * intel_fb_obj_invalidate - invalidate frontbuffer object + * @obj: GEM object to invalidate + * @ring: set for asynchronous rendering + * + * This function gets called every time rendering on the given object starts and + * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must + * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed + * until the rendering completes or a flip on this frontbuffer plane is + * scheduled. + */ +void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + if (ring) { + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.busy_bits + |= obj->frontbuffer_bits; + dev_priv->fb_tracking.flip_bits + &= ~obj->frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); + + intel_edp_psr_exit(dev); +} + +/** + * intel_frontbuffer_flush - flush frontbuffer + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed and frontbuffer caching can be started again. Flushes will get + * delayed if they're blocked by some oustanding asynchronous rendering. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flush(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Delay flushing when rings are still busy.*/ + mutex_lock(&dev_priv->fb_tracking.lock); + frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_mark_fb_busy(dev, frontbuffer_bits, NULL); + + intel_edp_psr_exit(dev); +} + +/** + * intel_fb_obj_flush - flush frontbuffer object + * @obj: GEM object to flush + * @retire: set when retiring asynchronous rendering + * + * This function gets called every time rendering on the given object has + * completed and frontbuffer caching can be started again. If @retire is true + * then any delayed flushes will be unblocked. + */ +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned frontbuffer_bits; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + frontbuffer_bits = obj->frontbuffer_bits; + + if (retire) { + mutex_lock(&dev_priv->fb_tracking.lock); + /* Filter out new bits since rendering started. */ + frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; + + dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + +/** + * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after scheduling a flip on @obj. The actual + * frontbuffer flushing will be delayed until completion is signalled with + * intel_frontbuffer_flip_complete. If an invalidate happens in between this + * flush will be cancelled. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_prepare(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.flip_bits + |= frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); +} + +/** + * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after the flip has been latched and will complete + * on the next vblank. It will execute the fush if it hasn't been cancalled yet. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_complete(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + /* Mask any cancelled flips. */ + frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; + dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -8803,8 +9104,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) kfree(work); } - intel_crtc_cursor_set(crtc, NULL, 0, 0, 0); - drm_crtc_cleanup(crtc); kfree(intel_crtc); @@ -8815,6 +9114,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) struct intel_unpin_work *work = container_of(__work, struct intel_unpin_work, work); struct drm_device *dev = work->crtc->dev; + enum pipe pipe = to_intel_crtc(work->crtc)->pipe; mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb_obj); @@ -8824,6 +9124,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); + intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); @@ -9193,6 +9495,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev, return 0; } +static bool use_mmio_flip(struct intel_engine_cs *ring, + struct drm_i915_gem_object *obj) +{ + /* + * This is not being used for older platforms, because + * non-availability of flip done interrupt forces us to use + * CS flips. Older platforms derive flip done using some clever + * tricks involving the flip_pending status bits and vblank irqs. + * So using MMIO flips there would disrupt this mechanism. + */ + + if (ring == NULL) + return true; + + if (INTEL_INFO(ring->dev)->gen < 5) + return false; + + if (i915.use_mmio_flip < 0) + return false; + else if (i915.use_mmio_flip > 0) + return true; + else + return ring != obj->ring; +} + +static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_framebuffer *intel_fb = + to_intel_framebuffer(intel_crtc->base.primary->fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + u32 dspcntr; + u32 reg; + + intel_mark_page_flip_active(intel_crtc); + + reg = DSPCNTR(intel_crtc->plane); + dspcntr = I915_READ(reg); + + if (INTEL_INFO(dev)->gen >= 4) { + if (obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + I915_WRITE(reg, dspcntr); + + I915_WRITE(DSPSURF(intel_crtc->plane), + intel_crtc->unpin_work->gtt_offset); + POSTING_READ(DSPSURF(intel_crtc->plane)); +} + +static int intel_postpone_flip(struct drm_i915_gem_object *obj) +{ + struct intel_engine_cs *ring; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (!obj->last_write_seqno) + return 0; + + ring = obj->ring; + + if (i915_seqno_passed(ring->get_seqno(ring, true), + obj->last_write_seqno)) + return 0; + + ret = i915_gem_check_olr(ring, obj->last_write_seqno); + if (ret) + return ret; + + if (WARN_ON(!ring->irq_get(ring))) + return 0; + + return 1; +} + +void intel_notify_mmio_flip(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct intel_crtc *intel_crtc; + unsigned long irq_flags; + u32 seqno; + + seqno = ring->get_seqno(ring, false); + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + for_each_intel_crtc(ring->dev, intel_crtc) { + struct intel_mmio_flip *mmio_flip; + + mmio_flip = &intel_crtc->mmio_flip; + if (mmio_flip->seqno == 0) + continue; + + if (ring->id != mmio_flip->ring_id) + continue; + + if (i915_seqno_passed(seqno, mmio_flip->seqno)) { + intel_do_mmio_flip(intel_crtc); + mmio_flip->seqno = 0; + ring->irq_put(ring); + } + } + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); +} + +static int intel_queue_mmio_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring, + uint32_t flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long irq_flags; + int ret; + + if (WARN_ON(intel_crtc->mmio_flip.seqno)) + return -EBUSY; + + ret = intel_postpone_flip(obj); + if (ret < 0) + return ret; + if (ret == 0) { + intel_do_mmio_flip(intel_crtc); + return 0; + } + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + intel_crtc->mmio_flip.seqno = obj->last_write_seqno; + intel_crtc->mmio_flip.ring_id = obj->ring->id; + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); + + /* + * Double check to catch cases where irq fired before + * mmio flip data was ready + */ + intel_notify_mmio_flip(obj->ring); + return 0; +} + static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9211,13 +9657,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *old_fb = crtc->primary->fb; - struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; struct intel_unpin_work *work; struct intel_engine_cs *ring; unsigned long flags; int ret; + /* + * drm_mode_page_flip_ioctl() should already catch this, but double + * check to be safe. In the future we may enable pageflipping from + * a disabled primary plane. + */ + if (WARN_ON(intel_fb_obj(old_fb) == NULL)) + return -EBUSY; + /* Can't change pixel format via MI display flips. */ if (fb->pixel_format != crtc->primary->fb->pixel_format) return -EINVAL; @@ -9240,7 +9695,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; - work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; + work->old_fb_obj = intel_fb_obj(old_fb); INIT_WORK(&work->work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); @@ -9281,10 +9736,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) - work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1; + work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; if (IS_VALLEYVIEW(dev)) { ring = &dev_priv->ring[BCS]; + if (obj->tiling_mode != work->old_fb_obj->tiling_mode) + /* vlv: DISPLAY_FLIP fails to change tiling */ + ring = NULL; + } else if (IS_IVYBRIDGE(dev)) { + ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { ring = obj->ring; if (ring == NULL || ring->id != RCS) @@ -9300,12 +9760,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags); + if (use_mmio_flip(ring, obj)) + ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, + page_flip_flags); + else + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, + page_flip_flags); if (ret) goto cleanup_unpin; + i915_gem_track_fb(work->old_fb_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(pipe)); + intel_disable_fbc(dev); - intel_mark_fb_busy(obj, NULL); + intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); @@ -9335,7 +9803,7 @@ out_hang: intel_crtc_wait_for_pending_flips(crtc); ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); if (ret == 0 && event) - drm_send_vblank_event(dev, intel_crtc->pipe, event); + drm_send_vblank_event(dev, pipe, event); } return ret; } @@ -10008,11 +10476,14 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(double_wide); + PIPE_CONF_CHECK_X(ddi_pll_sel); + PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); @@ -10074,6 +10545,14 @@ check_encoder_state(struct drm_device *dev) if (connector->base.dpms != DRM_MODE_DPMS_OFF) active = true; } + /* + * for MST connectors if we unplug the connector is gone + * away but the encoder is still connected to a crtc + * until a modeset happens in response to the hotplug. + */ + if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) + continue; + WARN(!!encoder->base.crtc != enabled, "encoder's enabled state mismatch " "(expected %i, found %i)\n", @@ -10369,20 +10848,23 @@ static int __intel_set_mode(struct drm_crtc *crtc, * on the DPLL. */ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { - struct drm_framebuffer *old_fb; + struct drm_framebuffer *old_fb = crtc->primary->fb; + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(fb)->obj, + obj, NULL); if (ret != 0) { DRM_ERROR("pin & fence failed\n"); mutex_unlock(&dev->struct_mutex); goto done; } - old_fb = crtc->primary->fb; if (old_fb) - intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + intel_unpin_fb_obj(old_obj); + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); mutex_unlock(&dev->struct_mutex); crtc->primary->fb = fb; @@ -10554,12 +11036,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, if (is_crtc_connector_off(set)) { config->mode_changed = true; } else if (set->crtc->primary->fb != set->fb) { - /* If we have no fb then treat it as a full mode set */ + /* + * If we have no fb, we can only flip as long as the crtc is + * active, otherwise we need a full mode set. The crtc may + * be active if we've only disabled the primary plane, or + * in fastboot situations. + */ if (set->crtc->primary->fb == NULL) { struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); - if (intel_crtc->active && i915.fastboot) { + if (intel_crtc->active) { DRM_DEBUG_KMS("crtc has no fb, will flip\n"); config->fb_changed = true; } else { @@ -10611,7 +11098,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, * for them. */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == &connector->base) { - connector->new_encoder = connector->encoder; + connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe); break; } } @@ -10657,7 +11144,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, new_crtc)) { return -EINVAL; } - connector->encoder->new_crtc = to_intel_crtc(new_crtc); + connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, @@ -10691,7 +11178,12 @@ intel_modeset_stage_output_state(struct drm_device *dev, } } /* Now we've also updated encoder->new_crtc for all encoders. */ - + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->new_encoder) + if (connector->new_encoder != connector->encoder) + connector->encoder = connector->new_encoder; + } for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; @@ -10797,10 +11289,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set) ret = intel_set_mode(set->crtc, set->mode, set->x, set->y, set->fb); } else if (config->fb_changed) { + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); + intel_crtc_wait_for_pending_flips(set->crtc); ret = intel_pipe_set_base(set->crtc, set->x, set->y, set->fb); + + /* + * We need to make sure the primary plane is re-enabled if it + * has previously been turned off. + */ + if (!intel_crtc->primary_enabled && ret == 0) { + WARN_ON(!intel_crtc->active); + intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, + intel_crtc->pipe); + } + /* * In the fastboot case this may be our only check of the * state after boot. It would be better to only do it on @@ -10841,26 +11347,21 @@ out_config: } static const struct drm_crtc_funcs intel_crtc_funcs = { - .cursor_set = intel_crtc_cursor_set, - .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, .set_config = intel_crtc_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; -static void intel_cpu_pll_init(struct drm_device *dev) -{ - if (HAS_DDI(dev)) - intel_ddi_pll_init(dev); -} - static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { uint32_t val; + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + val = I915_READ(PCH_DPLL(pll->id)); hw_state->dpll = val; hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); @@ -10942,7 +11443,9 @@ static void intel_shared_dpll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + if (HAS_DDI(dev)) + intel_ddi_pll_init(dev); + else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ibx_pch_dpll_init(dev); else dev_priv->num_shared_dpll = 0; @@ -10950,17 +11453,328 @@ static void intel_shared_dpll_init(struct drm_device *dev) BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); } +static int +intel_primary_plane_disable(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_crtc *intel_crtc; + + if (!plane->fb) + return 0; + + BUG_ON(!plane->crtc); + + intel_crtc = to_intel_crtc(plane->crtc); + + /* + * Even though we checked plane->fb above, it's still possible that + * the primary plane has been implicitly disabled because the crtc + * coordinates given weren't visible, or because we detected + * that it was 100% covered by a sprite plane. Or, the CRTC may be + * off and we've set a fb, but haven't actually turned on the CRTC yet. + * In either case, we need to unpin the FB and let the fb pointer get + * updated, but otherwise we don't need to touch the hardware. + */ + if (!intel_crtc->primary_enabled) + goto disable_unpin; + + intel_crtc_wait_for_pending_flips(plane->crtc); + intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, + intel_plane->pipe); +disable_unpin: + mutex_lock(&dev->struct_mutex); + i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + intel_unpin_fb_obj(intel_fb_obj(plane->fb)); + mutex_unlock(&dev->struct_mutex); + plane->fb = NULL; + + return 0; +} + +static int +intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); + struct drm_rect dest = { + /* integer pixels */ + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + struct drm_rect src = { + /* 16.16 fixed point */ + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + const struct drm_rect clip = { + /* integer pixels */ + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, + }; + bool visible; + int ret; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &visible); + + if (ret) + return ret; + + /* + * If the CRTC isn't enabled, we're just pinning the framebuffer, + * updating the fb pointer, and returning without touching the + * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to + * turn on the display with all planes setup as desired. + */ + if (!crtc->enabled) { + mutex_lock(&dev->struct_mutex); + + /* + * If we already called setplane while the crtc was disabled, + * we may have an fb pinned; unpin it. + */ + if (plane->fb) + intel_unpin_fb_obj(old_obj); + + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + + /* Pin and return without programming hardware */ + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + mutex_unlock(&dev->struct_mutex); + + return ret; + } + + intel_crtc_wait_for_pending_flips(crtc); + + /* + * If clipping results in a non-visible primary plane, we'll disable + * the primary plane. Note that this is a bit different than what + * happens if userspace explicitly disables the plane by passing fb=0 + * because plane->fb still gets set and pinned. + */ + if (!visible) { + mutex_lock(&dev->struct_mutex); + + /* + * Try to pin the new fb first so that we can bail out if we + * fail. + */ + if (plane->fb != fb) { + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + + if (intel_crtc->primary_enabled) + intel_disable_primary_hw_plane(dev_priv, + intel_plane->plane, + intel_plane->pipe); + + + if (plane->fb != fb) + if (plane->fb) + intel_unpin_fb_obj(old_obj); + + mutex_unlock(&dev->struct_mutex); + + return 0; + } + + ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); + if (ret) + return ret; + + if (!intel_crtc->primary_enabled) + intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, + intel_crtc->pipe); + + return 0; +} + +/* Common destruction function for both primary and cursor planes */ +static void intel_plane_destroy(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + drm_plane_cleanup(plane); + kfree(intel_plane); +} + +static const struct drm_plane_funcs intel_primary_plane_funcs = { + .update_plane = intel_primary_plane_setplane, + .disable_plane = intel_primary_plane_disable, + .destroy = intel_plane_destroy, +}; + +static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, + int pipe) +{ + struct intel_plane *primary; + const uint32_t *intel_primary_formats; + int num_formats; + + primary = kzalloc(sizeof(*primary), GFP_KERNEL); + if (primary == NULL) + return NULL; + + primary->can_scale = false; + primary->max_downscale = 1; + primary->pipe = pipe; + primary->plane = pipe; + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) + primary->plane = !pipe; + + if (INTEL_INFO(dev)->gen <= 3) { + intel_primary_formats = intel_primary_formats_gen2; + num_formats = ARRAY_SIZE(intel_primary_formats_gen2); + } else { + intel_primary_formats = intel_primary_formats_gen4; + num_formats = ARRAY_SIZE(intel_primary_formats_gen4); + } + + drm_universal_plane_init(dev, &primary->base, 0, + &intel_primary_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY); + return &primary->base; +} + +static int +intel_cursor_plane_disable(struct drm_plane *plane) +{ + if (!plane->fb) + return 0; + + BUG_ON(!plane->crtc); + + return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); +} + +static int +intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_rect dest = { + /* integer pixels */ + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + struct drm_rect src = { + /* 16.16 fixed point */ + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + const struct drm_rect clip = { + /* integer pixels */ + .x2 = intel_crtc->config.pipe_src_w, + .y2 = intel_crtc->config.pipe_src_h, + }; + bool visible; + int ret; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true, &visible); + if (ret) + return ret; + + crtc->cursor_x = crtc_x; + crtc->cursor_y = crtc_y; + if (fb != crtc->cursor->fb) { + return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); + } else { + intel_crtc_update_cursor(crtc, visible); + return 0; + } +} +static const struct drm_plane_funcs intel_cursor_plane_funcs = { + .update_plane = intel_cursor_plane_update, + .disable_plane = intel_cursor_plane_disable, + .destroy = intel_plane_destroy, +}; + +static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, + int pipe) +{ + struct intel_plane *cursor; + + cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); + if (cursor == NULL) + return NULL; + + cursor->can_scale = false; + cursor->max_downscale = 1; + cursor->pipe = pipe; + cursor->plane = pipe; + + drm_universal_plane_init(dev, &cursor->base, 0, + &intel_cursor_plane_funcs, + intel_cursor_formats, + ARRAY_SIZE(intel_cursor_formats), + DRM_PLANE_TYPE_CURSOR); + return &cursor->base; +} + static void intel_crtc_init(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; - int i; + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); if (intel_crtc == NULL) return; - drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); + primary = intel_primary_plane_create(dev, pipe); + if (!primary) + goto fail; + + cursor = intel_cursor_plane_create(dev, pipe); + if (!cursor) + goto fail; + + ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, + cursor, &intel_crtc_funcs); + if (ret) + goto fail; drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); for (i = 0; i < 256; i++) { @@ -10971,7 +11785,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port - * is hooked to plane B. Hence we want plane A feeding pipe B. + * is hooked to pipe B. Hence we want plane A feeding pipe B. */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; @@ -10993,6 +11807,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + return; + +fail: + if (primary) + drm_plane_cleanup(primary); + if (cursor) + drm_plane_cleanup(cursor); + kfree(intel_crtc); } enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) @@ -11012,21 +11834,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; - struct drm_mode_object *drmmode_obj; + struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; - drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, - DRM_MODE_OBJECT_CRTC); + drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); - if (!drmmode_obj) { + if (!drmmode_crtc) { DRM_ERROR("no such CRTC id\n"); return -ENOENT; } - crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + crtc = to_intel_crtc(drmmode_crtc); pipe_from_crtc_id->pipe = crtc->pipe; return 0; @@ -11088,6 +11909,22 @@ const char *intel_output_name(int output) return names[output]; } +static bool intel_crt_present(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_ULT(dev)) + return false; + + if (IS_CHERRYVIEW(dev)) + return false; + + if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) + return false; + + return true; +} + static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -11096,7 +11933,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_lvds_init(dev); - if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support) + if (intel_crt_present(dev)) intel_crt_init(dev); if (HAS_DDI(dev)) { @@ -11211,6 +12048,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); + intel_edp_psr_init(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = @@ -11224,11 +12063,14 @@ static void intel_setup_outputs(struct drm_device *dev) static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { + struct drm_device *dev = fb->dev; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); + mutex_lock(&dev->struct_mutex); WARN_ON(!intel_fb->obj->framebuffer_references--); - drm_gem_object_unreference_unlocked(&intel_fb->obj->base); + drm_gem_object_unreference(&intel_fb->obj->base); + mutex_unlock(&dev->struct_mutex); kfree(intel_fb); } @@ -11413,7 +12255,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.off = haswell_crtc_off; + dev_priv->display.off = ironlake_crtc_off; dev_priv->display.update_primary_plane = ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { @@ -11680,6 +12522,9 @@ void intel_modeset_init_hw(struct drm_device *dev) { intel_prepare_ddi(dev); + if (IS_VALLEYVIEW(dev)) + vlv_update_cdclk(dev); + intel_init_clock_gating(dev); intel_reset_dpio(dev); @@ -11756,7 +12601,6 @@ void intel_modeset_init(struct drm_device *dev) intel_init_dpio(dev); intel_reset_dpio(dev); - intel_cpu_pll_init(dev); intel_shared_dpll_init(dev); /* Just disable it once at startup */ @@ -12065,10 +12909,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active ? "enabled" : "disabled"); } - /* FIXME: Smash this into the new shared dpll infrastructure. */ - if (HAS_DDI(dev)) - intel_ddi_setup_hw_pll_state(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -12082,6 +12922,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", pll->name, pll->refcount, pll->on); + + if (pll->refcount) + intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); } list_for_each_entry(encoder, &dev->mode_config.encoder_list, @@ -12199,7 +13042,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, void intel_modeset_gem_init(struct drm_device *dev) { struct drm_crtc *c; - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; mutex_lock(&dev->struct_mutex); intel_init_gt_powersave(dev); @@ -12216,11 +13059,11 @@ void intel_modeset_gem_init(struct drm_device *dev) */ mutex_lock(&dev->struct_mutex); for_each_crtc(dev, c) { - if (!c->primary->fb) + obj = intel_fb_obj(c->primary->fb); + if (obj == NULL) continue; - fb = to_intel_framebuffer(c->primary->fb); - if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { + if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); @@ -12235,13 +13078,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector) struct drm_connector *connector = &intel_connector->base; intel_panel_destroy_backlight(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); } void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; struct drm_connector *connector; /* @@ -12261,14 +13103,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_unregister_dsm_handler(); - for_each_crtc(dev, crtc) { - /* Skip inactive CRTCs */ - if (!crtc->primary->fb) - continue; - - intel_increase_pllclock(crtc); - } - intel_disable_fbc(dev); intel_disable_gt_powersave(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 52fda950fd2a..e7a7953da6d1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -112,7 +112,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp); static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); -static int +int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; @@ -740,12 +740,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) { struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - sysfs_remove_link(&intel_connector->base.kdev->kobj, - intel_dp->aux.ddc.dev.kobj.name); + if (!intel_connector->mst_port) + sysfs_remove_link(&intel_connector->base.kdev->kobj, + intel_dp->aux.ddc.dev.kobj.name); intel_connector_unregister(intel_connector); } static void +hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw) +{ + switch (link_bw) { + case DP_LINK_BW_1_62: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; + break; + case DP_LINK_BW_2_7: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; + break; + case DP_LINK_BW_5_4: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; + break; + } +} + +static void intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config, int link_bw) { @@ -756,8 +773,6 @@ intel_dp_set_clock(struct intel_encoder *encoder, if (IS_G4X(dev)) { divisor = gen4_dpll; count = ARRAY_SIZE(gen4_dpll); - } else if (IS_HASWELL(dev)) { - /* Haswell has special-purpose DP DDI clocks. */ } else if (HAS_PCH_SPLIT(dev)) { divisor = pch_dpll; count = ARRAY_SIZE(pch_dpll); @@ -928,7 +943,10 @@ found: &pipe_config->dp_m2_n2); } - intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); + if (HAS_DDI(dev)) + hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); + else + intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); return true; } @@ -1316,8 +1334,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); - edp_wait_backlight_off(intel_dp); - WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(intel_dp); @@ -1353,6 +1369,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) return; DRM_DEBUG_KMS("\n"); + + intel_panel_enable_backlight(intel_dp->attached_connector); + /* * If we enable the backlight right away following a panel power * on, we may see slight flicker as the panel syncs with the eDP @@ -1367,8 +1386,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - - intel_panel_enable_backlight(intel_dp->attached_connector); } void intel_edp_backlight_off(struct intel_dp *intel_dp) @@ -1381,8 +1398,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return; - intel_panel_disable_backlight(intel_dp->attached_connector); - DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; @@ -1392,6 +1407,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); intel_dp->last_backlight_off = jiffies; + + edp_wait_backlight_off(intel_dp); + + intel_panel_disable_backlight(intel_dp->attached_connector); } static void ironlake_edp_pll_on(struct intel_dp *intel_dp) @@ -1613,11 +1632,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } -static bool is_edp_psr(struct drm_device *dev) +static bool is_edp_psr(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dev->dev_private; - - return dev_priv->psr.sink_support; + return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; } static bool intel_edp_is_psr_enabled(struct drm_device *dev) @@ -1665,7 +1682,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; struct edp_vsc_psr psr_vsc; - if (intel_dp->psr_setup_done) + if (dev_priv->psr.setup_done) return; /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ @@ -1680,21 +1697,26 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); - intel_dp->psr_setup_done = true; + dev_priv->psr.setup_done = true; } static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; int precharge = 0x3; int msg_size = 5; /* Header(4) + Message(1) */ + bool only_standby = false; aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; + /* Enable PSR in sink */ - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); else @@ -1713,18 +1735,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t max_sleep_time = 0x1f; uint32_t idle_frames = 1; uint32_t val = 0x0; const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + bool only_standby = false; - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; + + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { val |= EDP_PSR_LINK_STANDBY; val |= EDP_PSR_TP2_TP3_TIME_0us; val |= EDP_PSR_TP1_TIME_0us; val |= EDP_PSR_SKIP_AUX_EXIT; + val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; } else val |= EDP_PSR_LINK_DISABLE; @@ -1740,9 +1768,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dig_port->base.base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + struct drm_i915_gem_object *obj; struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; dev_priv->psr.source_ok = false; @@ -1752,8 +1780,8 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if ((intel_encoder->type != INTEL_OUTPUT_EDP) || - (dig_port->port != PORT_A)) { + if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP || + dig_port->port != PORT_A)) { DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); return false; } @@ -1775,13 +1803,17 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - obj = to_intel_framebuffer(crtc->primary->fb)->obj; + obj = intel_fb_obj(crtc->primary->fb); if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); return false; } + /* Below limitations aren't valid for Broadwell */ + if (IS_BROADWELL(dev)) + goto out; + if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); return false; @@ -1798,34 +1830,48 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } + out: dev_priv->psr.source_ok = true; return true; } static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; - if (!intel_edp_psr_match_conditions(intel_dp) || - intel_edp_is_psr_enabled(dev)) + if (intel_edp_is_psr_enabled(dev)) return; - /* Setup PSR once */ - intel_edp_psr_setup(intel_dp); - /* Enable PSR on the panel */ intel_edp_psr_enable_sink(intel_dp); /* Enable PSR on the host */ intel_edp_psr_enable_source(intel_dp); + + dev_priv->psr.enabled = true; + dev_priv->psr.active = true; } void intel_edp_psr_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - if (intel_edp_psr_match_conditions(intel_dp) && - !intel_edp_is_psr_enabled(dev)) + if (!HAS_PSR(dev)) { + DRM_DEBUG_KMS("PSR not supported on this platform\n"); + return; + } + + if (!is_edp_psr(intel_dp)) { + DRM_DEBUG_KMS("PSR not supported by this panel\n"); + return; + } + + /* Setup PSR once */ + intel_edp_psr_setup(intel_dp); + + if (intel_edp_psr_match_conditions(intel_dp)) intel_edp_psr_do_enable(intel_dp); } @@ -1834,7 +1880,7 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - if (!intel_edp_is_psr_enabled(dev)) + if (!dev_priv->psr.enabled) return; I915_WRITE(EDP_PSR_CTL(dev), @@ -1844,10 +1890,15 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); + + dev_priv->psr.enabled = false; } -void intel_edp_psr_update(struct drm_device *dev) +static void intel_edp_psr_work(struct work_struct *work) { + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), psr.work.work); + struct drm_device *dev = dev_priv->dev; struct intel_encoder *encoder; struct intel_dp *intel_dp = NULL; @@ -1855,17 +1906,52 @@ void intel_edp_psr_update(struct drm_device *dev) if (encoder->type == INTEL_OUTPUT_EDP) { intel_dp = enc_to_intel_dp(&encoder->base); - if (!is_edp_psr(dev)) - return; - if (!intel_edp_psr_match_conditions(intel_dp)) intel_edp_psr_disable(intel_dp); else - if (!intel_edp_is_psr_enabled(dev)) - intel_edp_psr_do_enable(intel_dp); + intel_edp_psr_do_enable(intel_dp); } } +static void intel_edp_psr_inactivate(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->psr.active = false; + + I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev)) + & ~EDP_PSR_ENABLE); +} + +void intel_edp_psr_exit(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_PSR(dev)) + return; + + if (!dev_priv->psr.setup_done) + return; + + cancel_delayed_work_sync(&dev_priv->psr.work); + + if (dev_priv->psr.active) + intel_edp_psr_inactivate(dev); + + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); +} + +void intel_edp_psr_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_PSR(dev)) + return; + + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work); +} + static void intel_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -2119,6 +2205,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, dport); } +static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->dpio_lock); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->dpio_lock); +} + /* * Native read with retry for link status and receiver capability reads for * cases where the sink may still be asleep. @@ -2156,18 +2306,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } -/* - * These are source-specific values; current Intel hardware supports - * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB - */ - +/* These are source-specific values. */ static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) + if (IS_VALLEYVIEW(dev)) return DP_TRAIN_VOLTAGE_SWING_1200; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_800; @@ -2183,18 +2329,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_BROADWELL(dev)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } - } else if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_9_5; @@ -2666,41 +2801,6 @@ intel_hsw_signal_levels(uint8_t train_set) } } -static uint32_t -intel_bdw_signal_levels(uint8_t train_set) -{ - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); - switch (signal_levels) { - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ - - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ - - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ - - case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ - - default: - DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" - "0x%x\n", signal_levels); - return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ - } -} - /* Properly updates "DP" with the correct signal levels. */ static void intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) @@ -2711,10 +2811,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) uint32_t signal_levels, mask; uint8_t train_set = intel_dp->train_set[0]; - if (IS_BROADWELL(dev)) { - signal_levels = intel_bdw_signal_levels(train_set); - mask = DDI_BUF_EMP_MASK; - } else if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { signal_levels = intel_hsw_signal_levels(train_set); mask = DDI_BUF_EMP_MASK; } else if (IS_CHERRYVIEW(dev)) { @@ -3213,6 +3310,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) edp_panel_vdd_off(intel_dp, false); } +static bool +intel_dp_probe_mst(struct intel_dp *intel_dp) +{ + u8 buf[1]; + + if (!intel_dp->can_mst) + return false; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) + return false; + + _edp_panel_vdd_on(intel_dp); + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { + if (buf[0] & DP_MST_CAP) { + DRM_DEBUG_KMS("Sink is MST capable\n"); + intel_dp->is_mst = true; + } else { + DRM_DEBUG_KMS("Sink is not MST capable\n"); + intel_dp->is_mst = false; + } + } + edp_panel_vdd_off(intel_dp, false); + + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + return intel_dp->is_mst; +} + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -3250,6 +3374,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) sink_irq_vector, 1) == 1; } +static bool +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + int ret; + + ret = intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_SINK_COUNT_ESI, + sink_irq_vector, 14); + if (ret != 14) + return false; + + return true; +} + static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { @@ -3257,6 +3395,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); } +static int +intel_dp_check_mst_status(struct intel_dp *intel_dp) +{ + bool bret; + + if (intel_dp->is_mst) { + u8 esi[16] = { 0 }; + int ret = 0; + int retry; + bool handled; + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); +go_again: + if (bret == true) { + + /* check link status - esi[10] = 0x200c */ + if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]); + ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + + if (handled) { + for (retry = 0; retry < 3; retry++) { + int wret; + wret = drm_dp_dpcd_write(&intel_dp->aux, + DP_SINK_COUNT_ESI+1, + &esi[1], 3); + if (wret == 3) { + break; + } + } + + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); + if (bret == true) { + DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]); + goto go_again; + } + } else + ret = 0; + + return ret; + } else { + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + /* send a hotplug event */ + drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); + } + } + return -EINVAL; +} + /* * According to DP spec * 5.1.2: @@ -3265,7 +3460,6 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ - void intel_dp_check_link_status(struct intel_dp *intel_dp) { @@ -3485,6 +3679,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; enum intel_display_power_domain power_domain; struct edid *edid = NULL; + bool ret; intel_runtime_pm_get(dev_priv); @@ -3494,6 +3689,14 @@ intel_dp_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (intel_dp->is_mst) { + /* MST devices are disconnected from a monitor POV */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) @@ -3506,6 +3709,16 @@ intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_probe_oui(intel_dp); + ret = intel_dp_probe_mst(intel_dp); + if (ret) { + /* if we are in MST mode then this connector + won't appear connected or have anything with EDID on it */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { @@ -3701,6 +3914,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct drm_device *dev = intel_dp_to_dev(intel_dp); drm_dp_aux_unregister(&intel_dp->aux); + intel_dp_mst_encoder_cleanup(intel_dig_port); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); @@ -3729,12 +3943,62 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; -static void +void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + return; +} + +bool +intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) + intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; + + DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, + long_hpd ? "long" : "short"); + + if (long_hpd) { + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) + goto mst_fail; + + if (!intel_dp_get_dpcd(intel_dp)) { + goto mst_fail; + } + + intel_dp_probe_oui(intel_dp); + + if (!intel_dp_probe_mst(intel_dp)) + goto mst_fail; + + } else { + if (intel_dp->is_mst) { + ret = intel_dp_check_mst_status(intel_dp); + if (ret == -EINVAL) + goto mst_fail; + } - intel_dp_check_link_status(intel_dp); + if (!intel_dp->is_mst) { + /* + * we'll check the link status via the normal hot plug path later - + * but for short hpds we should check it now + */ + intel_dp_check_link_status(intel_dp); + } + } + return false; +mst_fail: + /* if we were in MST mode, and device is not there get out of MST mode */ + if (intel_dp->is_mst) { + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + } + return true; } /* Return which DP Port should be selected for Transcoder DP control */ @@ -3785,7 +4049,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port) return false; } -static void +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -4246,7 +4510,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, edp_panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (HAS_DDI(dev)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; @@ -4279,7 +4543,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_aux_init(intel_dp, intel_connector); - intel_dp->psr_setup_done = false; + /* init MST on ports that can support it */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (port == PORT_B || port == PORT_C || port == PORT_D) { + intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); + } + } if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { drm_dp_aux_unregister(&intel_dp->aux); @@ -4289,7 +4558,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, edp_panel_vdd_off_sync(intel_dp); drm_modeset_unlock(&dev->mode_config.connection_mutex); } - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); return false; } @@ -4311,6 +4580,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, void intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -4337,6 +4607,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; if (IS_CHERRYVIEW(dev)) { + intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; intel_encoder->post_disable = chv_post_disable_dp; @@ -4366,9 +4637,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_dp_hot_plug; + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { drm_encoder_cleanup(encoder); kfree(intel_dig_port); kfree(intel_connector); } } + +void intel_dp_mst_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* disable MST */ + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (!intel_dig_port->dp.can_mst) + continue; + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + } + } +} + +void intel_dp_mst_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + int ret; + + if (!intel_dig_port->dp.can_mst) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + if (ret != 0) { + intel_dp_check_mst_status(&intel_dig_port->dp); + } + } + } +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c new file mode 100644 index 000000000000..d9a7a7865f66 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -0,0 +1,548 @@ +/* + * Copyright © 2008 Intel Corporation + * 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include "i915_drv.h" +#include "intel_drv.h" +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + int bpp; + int lane_count, slots; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct intel_connector *found = NULL, *intel_connector; + int mst_pbn; + + pipe_config->dp_encoder_is_mst = true; + pipe_config->has_pch_encoder = false; + pipe_config->has_dp_encoder = true; + bpp = 24; + /* + * for MST we always configure max link bw - the spec doesn't + * seem to suggest we should do otherwise. + */ + lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); + intel_dp->lane_count = lane_count; + + pipe_config->pipe_bpp = 24; + pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return false; + } + + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); + + pipe_config->pbn = mst_pbn; + slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); + + intel_link_compute_m_n(bpp, lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n); + + pipe_config->dp_m_n.tu = slots; + return true; + +} + +static void intel_mst_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + if (ret) { + DRM_ERROR("failed to update payload %d\n", ret); + } +} + +static void intel_mst_post_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + /* this can fail */ + drm_dp_check_act_status(&intel_dp->mst_mgr); + /* and this can also fail */ + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port); + + intel_dp->active_mst_links--; + intel_mst->port = NULL; + if (intel_dp->active_mst_links == 0) { + intel_dig_port->base.post_disable(&intel_dig_port->base); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } +} + +static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + uint32_t temp; + struct intel_connector *found = NULL, *intel_connector; + int slots; + struct drm_crtc *crtc = encoder->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return; + } + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + intel_mst->port = found->port; + + if (intel_dp->active_mst_links == 0) { + enum port port = intel_ddi_get_encoder_port(encoder); + + I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel); + + intel_ddi_init_dp_buf_reg(&intel_dig_port->base); + + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, + intel_mst->port, intel_crtc->config.pbn, &slots); + if (ret == false) { + DRM_ERROR("failed to allocate vcpi\n"); + return; + } + + + intel_dp->active_mst_links++; + temp = I915_READ(DP_TP_STATUS(port)); + I915_WRITE(DP_TP_STATUS(port), temp); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); +} + +static void intel_mst_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), + 1)) + DRM_ERROR("Timed out waiting for ACT sent\n"); + + ret = drm_dp_check_act_status(&intel_dp->mst_mgr); + + ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); +} + +static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + *pipe = intel_mst->pipe; + if (intel_mst->port) + return true; + return false; +} + +static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; + u32 temp, flags = 0; + + pipe_config->has_dp_encoder = true; + + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (temp & TRANS_DDI_PHSYNC) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + if (temp & TRANS_DDI_PVSYNC) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + pipe_config->adjusted_mode.flags |= flags; + intel_dp_get_m_n(crtc, pipe_config); + + intel_ddi_clock_get(&intel_dig_port->base, pipe_config); +} + +static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct edid *edid; + int ret; + + edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static enum drm_connector_status +intel_mst_port_dp_detect(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); +} + +static enum drm_connector_status +intel_dp_mst_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status; + status = intel_mst_port_dp_detect(connector); + return status; +} + +static int +intel_dp_mst_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void +intel_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (!IS_ERR_OR_NULL(intel_connector->edid)) + kfree(intel_connector->edid); + + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_mst_set_property, + .destroy = intel_dp_mst_connector_destroy, +}; + +static int intel_dp_mst_get_modes(struct drm_connector *connector) +{ + return intel_dp_mst_get_ddc_modes(connector); +} + +static enum drm_mode_status +intel_dp_mst_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO - validate mode against available PBN for link */ + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + return &intel_dp->mst_encoders[0]->base.base; +} + +static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { + .get_modes = intel_dp_mst_get_modes, + .mode_valid = intel_dp_mst_mode_valid, + .best_encoder = intel_mst_best_encoder, +}; + +static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_mst); +} + +static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { + .destroy = intel_dp_mst_encoder_destroy, +}; + +static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) +{ + if (connector->encoder) { + enum pipe pipe; + if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) + return false; + return true; + } + return false; +} + +static void intel_connector_add_to_fbdev(struct intel_connector *connector) +{ +#ifdef CONFIG_DRM_I915_FBDEV + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); +#endif +} + +static void intel_connector_remove_from_fbdev(struct intel_connector *connector) +{ +#ifdef CONFIG_DRM_I915_FBDEV + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); +#endif +} + +static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_connector *intel_connector; + struct drm_connector *connector; + int i; + + intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + if (!intel_connector) + return NULL; + + connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); + + intel_connector->unregister = intel_connector_unregister; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + + for (i = PIPE_A; i <= PIPE_C; i++) { + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_dp->mst_encoders[i]->base.base); + } + intel_dp_add_properties(intel_dp, connector); + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); + drm_reinit_primary_mode_group(dev); + mutex_lock(&dev->mode_config.mutex); + intel_connector_add_to_fbdev(intel_connector); + mutex_unlock(&dev->mode_config.mutex); + drm_connector_register(&intel_connector->base); + return connector; +} + +static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; + /* need to nuke the connector */ + mutex_lock(&dev->mode_config.mutex); + intel_connector_dpms(connector, DRM_MODE_DPMS_OFF); + mutex_unlock(&dev->mode_config.mutex); + + intel_connector->unregister(intel_connector); + + mutex_lock(&dev->mode_config.mutex); + intel_connector_remove_from_fbdev(intel_connector); + drm_connector_cleanup(connector); + mutex_unlock(&dev->mode_config.mutex); + + drm_reinit_primary_mode_group(dev); + + kfree(intel_connector); + DRM_DEBUG_KMS("\n"); +} + +static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + + drm_kms_helper_hotplug_event(dev); +} + +static struct drm_dp_mst_topology_cbs mst_cbs = { + .add_connector = intel_dp_add_mst_connector, + .destroy_connector = intel_dp_destroy_mst_connector, + .hotplug = intel_dp_mst_hotplug, +}; + +static struct intel_dp_mst_encoder * +intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe) +{ + struct intel_dp_mst_encoder *intel_mst; + struct intel_encoder *intel_encoder; + struct drm_device *dev = intel_dig_port->base.base.dev; + + intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); + + if (!intel_mst) + return NULL; + + intel_mst->pipe = pipe; + intel_encoder = &intel_mst->base; + intel_mst->primary = intel_dig_port; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, + DRM_MODE_ENCODER_DPMST); + + intel_encoder->type = INTEL_OUTPUT_DP_MST; + intel_encoder->crtc_mask = 0x7; + intel_encoder->cloneable = 0; + + intel_encoder->compute_config = intel_dp_mst_compute_config; + intel_encoder->disable = intel_mst_disable_dp; + intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->pre_enable = intel_mst_pre_enable_dp; + intel_encoder->enable = intel_mst_enable_dp; + intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; + intel_encoder->get_config = intel_dp_mst_enc_get_config; + + return intel_mst; + +} + +static bool +intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) +{ + int i; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + for (i = PIPE_A; i <= PIPE_C; i++) + intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i); + return true; +} + +int +intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + int ret; + + intel_dp->can_mst = true; + intel_dp->mst_mgr.cbs = &mst_cbs; + + /* create encoders */ + intel_dp_create_fake_mst_encoders(intel_dig_port); + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id); + if (ret) { + intel_dp->can_mst = false; + return ret; + } + return 0; +} + +void +intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (!intel_dp->can_mst) + return; + + drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); + /* encoders will get killed by normal cleanup */ +} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index eaa27ee9e367..1dfd1e518551 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -32,7 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> -#include <drm/drm_dp_helper.h> +#include <drm/drm_dp_mst_helper.h> /** * _wait_for - magic (register) wait macro @@ -100,6 +100,7 @@ #define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_DSI 9 #define INTEL_OUTPUT_UNKNOWN 10 +#define INTEL_OUTPUT_DP_MST 11 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -207,6 +208,10 @@ struct intel_connector { /* since POLL and HPD connectors may use the same HPD line keep the native state of connector->polled in case hotplug storm detection changes it */ u8 polled; + + void *port; /* store this opaque as its illegal to dereference it */ + + struct intel_dp *mst_port; }; typedef struct dpll { @@ -307,6 +312,9 @@ struct intel_crtc_config { /* Selected dpll when shared or DPLL_ID_PRIVATE. */ enum intel_dpll_id shared_dpll; + /* PORT_CLK_SEL for DDI ports. */ + uint32_t ddi_pll_sel; + /* Actual register state of the dpll, for shared dpll cross-checking. */ struct intel_dpll_hw_state dpll_hw_state; @@ -338,6 +346,7 @@ struct intel_crtc_config { u32 pos; u32 size; bool enabled; + bool force_thru; } pch_pfit; /* FDI configuration, only valid if has_pch_encoder is set. */ @@ -347,6 +356,9 @@ struct intel_crtc_config { bool ips_enabled; bool double_wide; + + bool dp_encoder_is_mst; + int pbn; }; struct intel_pipe_wm { @@ -358,6 +370,11 @@ struct intel_pipe_wm { bool sprites_scaled; }; +struct intel_mmio_flip { + u32 seqno; + u32 ring_id; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -384,7 +401,6 @@ struct intel_crtc { struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; - int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; uint32_t cursor_cntl; uint32_t cursor_base; @@ -394,8 +410,6 @@ struct intel_crtc { struct intel_crtc_config *new_config; bool new_enabled; - uint32_t ddi_pll_sel; - /* reset counter value when the last flip was submitted */ unsigned int reset_counter; @@ -412,6 +426,7 @@ struct intel_crtc { wait_queue_head_t vbl_wait; int scanline_offset; + struct intel_mmio_flip mmio_flip; }; struct intel_plane_wm_parameters { @@ -428,7 +443,6 @@ struct intel_plane { struct drm_i915_gem_object *obj; bool can_scale; int max_downscale; - u32 lut_r[1024], lut_g[1024], lut_b[1024]; int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; uint32_t src_x, src_y; @@ -481,6 +495,7 @@ struct cxsr_latency { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) +#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) struct intel_hdmi { u32 hdmi_reg; @@ -499,6 +514,7 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +struct intel_dp_mst_encoder; #define DP_MAX_DOWNSTREAM_PORTS 0x10 /** @@ -537,10 +553,17 @@ struct intel_dp { unsigned long last_power_cycle; unsigned long last_power_on; unsigned long last_backlight_off; - bool psr_setup_done; bool use_tps3; + bool can_mst; /* this port supports mst */ + bool is_mst; + int active_mst_links; + /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + /* mst connector list */ + struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; + struct drm_dp_mst_topology_mgr mst_mgr; + uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); /* * This function returns the value we have to program the AUX_CTL @@ -564,6 +587,14 @@ struct intel_digital_port { u32 saved_port_bits; struct intel_dp dp; struct intel_hdmi hdmi; + bool (*hpd_pulse)(struct intel_digital_port *, bool); +}; + +struct intel_dp_mst_encoder { + struct intel_encoder base; + enum pipe pipe; + struct intel_digital_port *primary; + void *port; /* store this opaque as its illegal to dereference it */ }; static inline int @@ -650,6 +681,12 @@ enc_to_dig_port(struct drm_encoder *encoder) return container_of(encoder, struct intel_digital_port, base.base); } +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dp_mst_encoder, base.base); +} + static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; @@ -703,10 +740,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder); void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); -void intel_ddi_setup_hw_pll_state(struct drm_device *dev); bool intel_ddi_pll_select(struct intel_crtc *crtc); -void intel_ddi_pll_enable(struct intel_crtc *crtc); -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); @@ -714,15 +748,43 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); +void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder); +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_display.c */ const char *intel_output_name(int output); bool intel_has_pending_fb_unpin(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev); -int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); void intel_mark_busy(struct drm_device *dev); -void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring); +void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring); +void intel_frontbuffer_flip_prepare(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_frontbuffer_flip_complete(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_frontbuffer_flush(struct drm_device *dev, + unsigned frontbuffer_bits); +/** + * intel_frontbuffer_flip - prepare frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after scheduling a flip on @obj. This is for + * synchronous plane updates which will happen on the next vblank and which will + * not get delayed by pending gpu rendering. + * + * Can be called without any locks held. + */ +static inline +void intel_frontbuffer_flip(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); void intel_mark_idle(struct drm_device *dev); void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_update_dpms(struct drm_crtc *crtc); @@ -765,12 +827,18 @@ __intel_framebuffer_create(struct drm_device *dev, void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip_plane(struct drm_device *dev, int plane); + +/* shared dpll functions */ struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); void assert_shared_dpll(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc); +void intel_put_shared_dpll(struct intel_crtc *crtc); + +/* modesetting asserts */ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pll_enabled(d, p) assert_pll(d, p, true) @@ -803,7 +871,6 @@ void hsw_disable_ips(struct intel_crtc *crtc); void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); -int valleyview_get_vco(struct drm_i915_private *dev_priv); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_config *pipe_config); int intel_format_to_fourcc(int format); @@ -824,6 +891,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); bool intel_dp_is_edp(struct drm_device *dev, enum port port); +bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, + bool long_hpd); void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); @@ -831,11 +900,21 @@ void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp); void intel_edp_psr_disable(struct intel_dp *intel_dp); -void intel_edp_psr_update(struct drm_device *dev); void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); - +void intel_edp_psr_exit(struct drm_device *dev); +void intel_edp_psr_init(struct drm_device *dev); + +int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd); +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); +void intel_dp_mst_suspend(struct drm_device *dev); +void intel_dp_mst_resume(struct drm_device *dev); +int intel_dp_max_link_bw(struct intel_dp *intel_dp); +void intel_dp_hot_plug(struct intel_encoder *intel_encoder); +/* intel_dp_mst.c */ +int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); +void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ -bool intel_dsi_init(struct drm_device *dev); +void intel_dsi_init(struct drm_device *dev); /* intel_dvo.c */ @@ -961,6 +1040,7 @@ void intel_init_gt_powersave(struct drm_device *dev); void intel_cleanup_gt_powersave(struct drm_device *dev); void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); +void intel_suspend_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev); void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); @@ -974,8 +1054,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv); void intel_init_runtime_pm(struct drm_i915_private *dev_priv); void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_device *dev); -void __vlv_set_power_well(struct drm_i915_private *dev_priv, - enum punit_power_well power_well_id, bool enable); + /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 02f99d768d49..2ee1722c0af4 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -657,7 +657,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, }; -bool intel_dsi_init(struct drm_device *dev) +void intel_dsi_init(struct drm_device *dev) { struct intel_dsi *intel_dsi; struct intel_encoder *intel_encoder; @@ -673,29 +673,29 @@ bool intel_dsi_init(struct drm_device *dev) /* There is no detection method for MIPI so rely on VBT */ if (!dev_priv->vbt.has_mipi) - return false; + return; + + if (IS_VALLEYVIEW(dev)) { + dev_priv->mipi_mmio_base = VLV_MIPI_BASE; + } else { + DRM_ERROR("Unsupported Mipi device to reg base"); + return; + } intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) - return false; + return; intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_dsi); - return false; + return; } intel_encoder = &intel_dsi->base; encoder = &intel_encoder->base; intel_dsi->attached_connector = intel_connector; - if (IS_VALLEYVIEW(dev)) { - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; - } else { - DRM_ERROR("Unsupported Mipi device to reg base"); - return false; - } - connector = &intel_connector->base; drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); @@ -742,7 +742,7 @@ bool intel_dsi_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev); if (!fixed_mode) { @@ -753,12 +753,10 @@ bool intel_dsi_init(struct drm_device *dev) fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); - return true; + return; err: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dsi); kfree(intel_connector); - - return false; } diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 21a0d348cedc..47c7584a4aa0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) case MIPI_DSI_DCS_LONG_WRITE: dsi_vc_dcs_write(intel_dsi, vc, data, len); break; - }; + } data += len; @@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi) intel_dsi->rst_timer_val = mipi_config->device_reset_timer; intel_dsi->init_count = mipi_config->master_init_timer; intel_dsi->bw_timer = mipi_config->dbi_bw_timer; - intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + intel_dsi->video_frmt_cfg_bits = + mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; switch (intel_dsi->escape_clk_div) { case 0: @@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi) * * prepare count */ - ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2); /* exit zero count */ diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a3631c0a5c28..56b47d2ffaf7 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + u32 tmp; + + tmp = I915_READ(intel_dvo->dev.dvo_reg); + + if (!(tmp & DVO_ENABLE)) + return false; return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); } @@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->panel_wants_dither = true; } - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 088fe9378a4c..7cdb8861bb7c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -43,10 +43,36 @@ #include <drm/i915_drm.h> #include "i915_drv.h" +static int intel_fbdev_set_par(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct intel_fbdev *ifbdev = + container_of(fb_helper, struct intel_fbdev, helper); + int ret; + + ret = drm_fb_helper_set_par(info); + + if (ret == 0) { + /* + * FIXME: fbdev presumes that all callbacks also work from + * atomic contexts and relies on that for emergency oops + * printing. KMS totally doesn't do that and the locking here is + * by far not the only place this goes wrong. Ignore this for + * now until we solve this for real. + */ + mutex_lock(&fb_helper->dev->struct_mutex); + ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj, + true); + mutex_unlock(&fb_helper->dev->struct_mutex); + } + + return ret; +} + static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, + .fb_set_par = intel_fbdev_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); + size = PAGE_ALIGN(size); obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) obj = i915_gem_alloc_object(dev, size); @@ -349,6 +375,11 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, } encoder = connector->encoder; + if (!encoder) { + struct drm_connector_helper_funcs *connector_funcs; + connector_funcs = connector->helper_private; + encoder = connector_funcs->best_encoder(connector); + } if (!encoder || WARN_ON(!encoder->crtc)) { DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", connector->name); @@ -417,7 +448,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n", + DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", connector->name, pipe_name(to_intel_crtc(encoder->crtc)->pipe), encoder->crtc->base.id, @@ -452,7 +483,7 @@ out: return true; } -static struct drm_fb_helper_funcs intel_fb_helper_funcs = { +static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .initial_config = intel_fb_initial_config, .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, @@ -623,7 +654,8 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; - ifbdev->helper.funcs = &intel_fb_helper_funcs; + drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); + if (!intel_fbdev_init_bios(dev, ifbdev)) ifbdev->preferred_bpp = 32; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index eee2bbec2958..24224131ebf1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1229,6 +1229,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); } +static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->dpio_lock); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->dpio_lock); +} + static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); @@ -1490,7 +1554,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being @@ -1528,6 +1592,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_CHERRYVIEW(dev)) { + intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable; intel_encoder->pre_enable = chv_hdmi_pre_enable; intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = chv_hdmi_post_disable; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d33b61d0dd33..b31088a551f2 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -34,11 +34,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -enum disp_clk { - CDCLK, - CZCLK -}; - struct gmbus_port { const char *name; int reg; @@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c) return container_of(i2c, struct intel_gmbus, adapter); } -static int get_disp_clk_div(struct drm_i915_private *dev_priv, - enum disp_clk clk) -{ - u32 reg_val; - int clk_ratio; - - reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO); - - if (clk == CDCLK) - clk_ratio = - ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1; - else - clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1; - - return clk_ratio; -} - -static void gmbus_set_freq(struct drm_i915_private *dev_priv) -{ - int vco, gmbus_freq = 0, cdclk_div; - - BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); - - vco = valleyview_get_vco(dev_priv); - - /* Get the CDCLK divide ratio */ - cdclk_div = get_disp_clk_div(dev_priv, CDCLK); - - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ - if (cdclk_div) - gmbus_freq = (vco << 1) / cdclk_div; - - if (WARN_ON(gmbus_freq == 0)) - return; - - I915_WRITE(GMBUSFREQ_VLV, gmbus_freq); -} - void intel_i2c_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* - * In BIOS-less system, program the correct gmbus frequency - * before reading edid. - */ - if (IS_VALLEYVIEW(dev)) - gmbus_set_freq(dev_priv); - I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 23126023aeba..c511287bbb86 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -51,6 +51,7 @@ struct intel_lvds_encoder { bool is_dual_link; u32 reg; + u32 a3_power; struct intel_lvds_connector *attached_connector; }; @@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + enum intel_display_power_domain power_domain; u32 tmp; + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + tmp = I915_READ(lvds_encoder->reg); if (!(tmp & LVDS_PORT_EN)) @@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how - * panels behave in the two modes. + * panels behave in the two modes. For now, let's just maintain the + * value we got from the BIOS. */ + temp &= ~LVDS_A3_POWER_MASK; + temp |= lvds_encoder->a3_power; /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is @@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct intel_crtc_config *pipe_config) { struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&intel_encoder->base); struct intel_connector *intel_connector = @@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return false; } - if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == - LVDS_A3_POWER_UP) + if (lvds_encoder->a3_power == LVDS_A3_POWER_UP) lvds_bpp = 8*3; else lvds_bpp = 6*3; @@ -1081,6 +1088,9 @@ out: DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); + lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & + LVDS_A3_POWER_MASK; + /* * Unlock registers and just * leave them unlocked @@ -1097,7 +1107,7 @@ out: DRM_DEBUG_KMS("lid notifier registration failed\n"); lvds_connector->lid_notifier.notifier_call = NULL; } - drm_sysfs_connector_add(connector); + drm_connector_register(connector); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_panel_setup_backlight(connector); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 2e2c71fcc9ed..d37934b6338e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, case INTEL_OUTPUT_UNKNOWN: case INTEL_OUTPUT_DISPLAYPORT: case INTEL_OUTPUT_HDMI: + case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; break; case INTEL_OUTPUT_EDP: diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index daa118978eec..dc2f4f26c961 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) } intel_overlay_release_old_vid_tail(overlay); + + + i915_gem_track_fb(overlay->old_vid_bo, NULL, + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); return 0; } @@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, bool scale_changed = false; struct drm_device *dev = overlay->dev; u32 swidth, swidthsw, sheight, ostride; + enum pipe pipe = overlay->crtc->pipe; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); @@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, oconfig = OCONF_CC_OUT_8BIT; if (IS_GEN4(overlay->dev)) oconfig |= OCONF_CSC_MODE_BT709; - oconfig |= overlay->crtc->pipe == 0 ? + oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; iowrite32(oconfig, ®s->OCONFIG); intel_overlay_unmap_regs(overlay, regs); @@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; + i915_gem_track_fb(overlay->vid_bo, new_bo, + INTEL_FRONTBUFFER_OVERLAY(pipe)); + overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; + intel_frontbuffer_flip(dev, + INTEL_FRONTBUFFER_OVERLAY(pipe)); + return 0; out_unpin: @@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; - struct drm_mode_object *drmmode_obj; + struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; struct drm_i915_gem_object *new_bo; struct put_image_params *params; @@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, if (!params) return -ENOMEM; - drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, - DRM_MODE_OBJECT_CRTC); - if (!drmmode_obj) { + drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id); + if (!drmmode_crtc) { ret = -ENOENT; goto out_free; } - crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + crtc = to_intel_crtc(drmmode_crtc); new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle)); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9ad0c6afc487..780c3ab26f4f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int cfb_pitch; int i; @@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; @@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dev_priv->fbc.threshold++; + + switch (dev_priv->fbc.threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; - else + break; + case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev)) dpfc_ctl |= obj->fence_reg; @@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dev_priv->fbc.threshold++; + + switch (dev_priv->fbc.threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; - else + break; + case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev) struct drm_crtc *crtc = NULL, *tmp_crtc; struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; const struct drm_display_mode *adjusted_mode; unsigned int max_width, max_height; @@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev) intel_crtc = to_intel_crtc(crtc); fb = crtc->primary->fb; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; + obj = intel_fb_obj(fb); adjusted_mode = &intel_crtc->config.adjusted_mode; if (i915.enable_fbc < 0) { @@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev) goto out_disable; } - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { + max_width = 4096; + max_height = 4096; + } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { max_width = 4096; max_height = 2048; } else { @@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev) if (in_dbg_master()) goto out_disable; - if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { + if (i915_gem_stolen_setup_compression(dev, obj->base.size, + drm_format_plane_cpp(fb->pixel_format, 0))) { if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); goto out_disable; @@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, return NULL; } -static void pineview_disable_cxsr(struct drm_device *dev) +void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; + u32 val; + + if (IS_VALLEYVIEW(dev)) { + I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { + I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + } else if (IS_PINEVIEW(dev)) { + val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; + val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; + I915_WRITE(DSPFW3, val); + } else if (IS_I945G(dev) || IS_I945GM(dev)) { + val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : + _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); + I915_WRITE(FW_BLC_SELF, val); + } else if (IS_I915GM(dev)) { + val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : + _MASKED_BIT_DISABLE(INSTPM_SELF_EN); + I915_WRITE(INSTPM, val); + } else { + return; + } - /* deactivate cxsr */ - I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); + DRM_DEBUG_KMS("memory self-refresh is %s\n", + enable ? "enabled" : "disabled"); } /* @@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) /* Pineview has different values for various configs */ static const struct intel_watermark_params pineview_display_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_display_hplloff_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_HPLLOFF_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_HPLLOFF_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_cursor_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE, + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_cursor_hplloff_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params g4x_wm_info = { - G4X_FIFO_SIZE, - G4X_MAX_WM, - G4X_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = G4X_FIFO_SIZE, + .max_wm = G4X_MAX_WM, + .default_wm = G4X_MAX_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params g4x_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params valleyview_wm_info = { - VALLEYVIEW_FIFO_SIZE, - VALLEYVIEW_MAX_WM, - VALLEYVIEW_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = VALLEYVIEW_FIFO_SIZE, + .max_wm = VALLEYVIEW_MAX_WM, + .default_wm = VALLEYVIEW_MAX_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params valleyview_cursor_wm_info = { - I965_CURSOR_FIFO, - VALLEYVIEW_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = VALLEYVIEW_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i965_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - I915_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i945_wm_info = { - I945_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE + .fifo_size = I945_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i915_wm_info = { - I915_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE + .fifo_size = I915_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i830_wm_info = { - I855GM_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i845_wm_info = { - I830_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE + .fifo_size = I830_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, }; /** @@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - pineview_disable_cxsr(dev); + intel_set_memory_cxsr(dev_priv, false); return; } @@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); - /* activate cxsr */ - I915_WRITE(DSPFW3, - I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); - DRM_DEBUG_KMS("Self-refresh is enabled\n"); + intel_set_memory_cxsr(dev_priv, true); } else { - pineview_disable_cxsr(dev); - DRM_DEBUG_KMS("Self-refresh is disabled\n"); + intel_set_memory_cxsr(dev_priv, false); } } @@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc) int plane_sr, cursor_sr; int ignore_plane_sr, ignore_cursor_sr; unsigned int enabled = 0; + bool cxsr_enabled; vlv_update_drain_latency(dev); @@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc) &valleyview_wm_info, &valleyview_cursor_wm_info, &ignore_plane_sr, &cursor_sr)) { - I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + cxsr_enabled = true; } else { - I915_WRITE(FW_BLC_SELF_VLV, - I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + cxsr_enabled = false; + intel_set_memory_cxsr(dev_priv, false); plane_sr = cursor_sr = 0; } @@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc) I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void g4x_update_wm(struct drm_crtc *crtc) @@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; unsigned int enabled = 0; + bool cxsr_enabled; if (g4x_compute_wm0(dev, PIPE_A, &g4x_wm_info, latency_ns, @@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc) &g4x_wm_info, &g4x_cursor_wm_info, &plane_sr, &cursor_sr)) { - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + cxsr_enabled = true; } else { - I915_WRITE(FW_BLC_SELF, - I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + cxsr_enabled = false; + intel_set_memory_cxsr(dev_priv, false); plane_sr = cursor_sr = 0; } @@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc) I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i965_update_wm(struct drm_crtc *unused_crtc) @@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) struct drm_crtc *crtc; int srwm = 1; int cursor_sr = 16; + bool cxsr_enabled; /* Calc sr entries for one plane configs */ crtc = single_enabled_crtc(dev); @@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) DRM_DEBUG_KMS("self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + cxsr_enabled = true; } else { + cxsr_enabled = false; /* Turn off self refresh if both pipes are enabled */ - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + intel_set_memory_cxsr(dev_priv, false); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); /* update cursor SR watermark */ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i9xx_update_wm(struct drm_crtc *unused_crtc) @@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); if (IS_I915GM(dev) && enabled) { - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; - fb = to_intel_framebuffer(enabled->primary->fb); + obj = intel_fb_obj(enabled->primary->fb); /* self-refresh seems busted with untiled */ - if (fb->obj->tiling_mode == I915_TILING_NONE) + if (obj->tiling_mode == I915_TILING_NONE) enabled = NULL; } @@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) cwm = 2; /* Play safe and disable self-refresh before adjusting watermarks. */ - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN)); + intel_set_memory_cxsr(dev_priv, false); /* Calc sr entries for one plane configs */ if (HAS_FW_BLC(dev) && enabled) { @@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(FW_BLC, fwater_lo); I915_WRITE(FW_BLC2, fwater_hi); - if (HAS_FW_BLC(dev)) { - if (enabled) { - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN)); - DRM_DEBUG_KMS("memory self refresh enabled\n"); - } else - DRM_DEBUG_KMS("memory self refresh disabled\n"); - } + if (enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i845_update_wm(struct drm_crtc *unused_crtc) @@ -3147,6 +3183,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) if (val < dev_priv->rps.max_freq_softlimit) mask |= GEN6_PM_RP_UP_THRESHOLD; + mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); + mask &= dev_priv->pm_rps_events; + /* IVB and SNB hard hangs on looping batchbuffer * if GEN6_PM_UP_EI_EXPIRED is masked. */ @@ -3209,6 +3248,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + + /* Latest VLV doesn't need to force the gfx clock */ + if (dev->pdev->revision >= 0xd) { + valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + return; + } + /* * When we are idle. Drop to min voltage state. */ @@ -3340,6 +3387,13 @@ static void gen6_disable_rps(struct drm_device *dev) gen6_disable_rps_interrupts(dev); } +static void cherryview_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RC_CONTROL, 0); +} + static void valleyview_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3475,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_ring(ring, dev_priv, unused) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + if (IS_BROADWELL(dev)) + I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ + else + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; intel_print_rc6_info(dev, rc6_mask); - I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_EI_MODE(1) | - rc6_mask); + if (IS_BROADWELL(dev)) + I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | + GEN7_RC_CTL_TO_MODE | + rc6_mask); + else + I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_EI_MODE(1) | + rc6_mask); /* 4 Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RPNSWREQ, @@ -3719,6 +3781,35 @@ void gen6_update_ring_freq(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } +int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rp0; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); + rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; + + return rp0; +} + +static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rpe; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); + rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; + + return rpe; +} + +int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rpn; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); + rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK; + return rpn; +} + int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) { u32 val, rp0; @@ -3758,6 +3849,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv) dev_priv->vlv_pctx->stolen->start); } + +/* Check that the pcbr address is not empty. */ +static void cherryview_check_pctx(struct drm_i915_private *dev_priv) +{ + unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; + + WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); +} + +static void cherryview_setup_pctx(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long pctx_paddr, paddr; + struct i915_gtt *gtt = &dev_priv->gtt; + u32 pcbr; + int pctx_size = 32*1024; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + pcbr = I915_READ(VLV_PCBR); + if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + paddr = (dev_priv->mm.stolen_base + + (gtt->stolen_size - pctx_size)); + + pctx_paddr = (paddr & (~4095)); + I915_WRITE(VLV_PCBR, pctx_paddr); + } +} + static void valleyview_setup_pctx(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3847,11 +3967,135 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } +static void cherryview_init_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + cherryview_setup_pctx(dev); + + mutex_lock(&dev_priv->rps.hw_lock); + + dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); + dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), + dev_priv->rps.max_freq); + + dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); + + dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), + dev_priv->rps.min_freq); + + /* Preserve min/max settings in case of re-init */ + if (dev_priv->rps.max_freq_softlimit == 0) + dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + + if (dev_priv->rps.min_freq_softlimit == 0) + dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; + + mutex_unlock(&dev_priv->rps.hw_lock); +} + static void valleyview_cleanup_gt_powersave(struct drm_device *dev) { valleyview_cleanup_pctx(dev); } +static void cherryview_enable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + u32 gtfifodbg, val, rc6_mode = 0, pcbr; + int i; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + gtfifodbg = I915_READ(GTFIFODBG); + if (gtfifodbg) { + DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", + gtfifodbg); + I915_WRITE(GTFIFODBG, gtfifodbg); + } + + cherryview_check_pctx(dev_priv); + + /* 1a & 1b: Get forcewake during program sequence. Although the driver + * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ + gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + + /* 2a: Program RC6 thresholds.*/ + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + + for_each_ring(ring, dev_priv, i) + I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + I915_WRITE(GEN6_RC_SLEEP, 0); + + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* allows RC6 residency counter to work */ + I915_WRITE(VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + /* For now we assume BIOS is allocating and populating the PCBR */ + pcbr = I915_READ(VLV_PCBR); + + DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); + + /* 3: Enable RC6 */ + if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) + rc6_mode = GEN6_RC_CTL_EI_MODE(1); + + I915_WRITE(GEN6_RC_CONTROL, rc6_mode); + + /* 4 Program defaults and thresholds for RPS*/ + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); + + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + + /* WaDisablePwrmtrEvent:chv (pre-production hw) */ + I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff); + I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00); + + /* 5: Enable RPS */ + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + dev_priv->rps.cur_freq = (val >> 8) & 0xff; + DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + dev_priv->rps.cur_freq); + + DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); + + valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); + + gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); +} + static void valleyview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3878,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | @@ -3898,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev) /* allows RC6 residency counter to work */ I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); + if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; @@ -4660,33 +4907,57 @@ void intel_init_gt_powersave(struct drm_device *dev) { i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + cherryview_init_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev)) valleyview_init_gt_powersave(dev); } void intel_cleanup_gt_powersave(struct drm_device *dev) { - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + return; + else if (IS_VALLEYVIEW(dev)) valleyview_cleanup_gt_powersave(dev); } +/** + * intel_suspend_gt_powersave - suspend PM work and helper threads + * @dev: drm device + * + * We don't want to disable RC6 or other features here, we just want + * to make sure any work we've queued has finished and won't bother + * us while we're suspended. + */ +void intel_suspend_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Interrupts should be disabled already to avoid re-arming. */ + WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + cancel_work_sync(&dev_priv->rps.work); +} + void intel_disable_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(dev->irq_enabled); + WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled); if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); ironlake_disable_rc6(dev); - } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { - if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work)) - intel_runtime_pm_put(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + intel_suspend_gt_powersave(dev); - cancel_work_sync(&dev_priv->rps.work); mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + cherryview_disable_rps(dev); + else if (IS_VALLEYVIEW(dev)) valleyview_disable_rps(dev); else gen6_disable_rps(dev); @@ -4704,7 +4975,9 @@ static void intel_gen6_powersave_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev)) { + if (IS_CHERRYVIEW(dev)) { + cherryview_enable_rps(dev); + } else if (IS_VALLEYVIEW(dev)) { valleyview_enable_rps(dev); } else if (IS_BROADWELL(dev)) { gen8_enable_rps(dev); @@ -4729,7 +5002,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); mutex_unlock(&dev->struct_mutex); - } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { + } else if (INTEL_INFO(dev)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -5102,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev) I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); I915_WRITE(_3D_CHICKEN3, - _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); + _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); I915_WRITE(COMMON_SLICE_CHICKEN2, _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); @@ -5337,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) } DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); - dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv); - DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz", - dev_priv->vlv_cdclk_freq); - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:vlv */ @@ -5756,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, return true; } -void __vlv_set_power_well(struct drm_i915_private *dev_priv, - enum punit_power_well power_well_id, bool enable) +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) { - struct drm_device *dev = dev_priv->dev; + enum punit_power_well power_well_id = power_well->data; u32 mask; u32 state; u32 ctrl; - enum pipe pipe; - - if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) { - if (enable) { - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. - */ - I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | - DPLL_REFA_CLK_ENABLE_VLV | - DPLL_INTEGRATED_CRI_CLK_VLV); - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - } else { - for_each_pipe(pipe) - assert_pll_disabled(dev_priv, pipe); - /* Assert common reset */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & - ~DPIO_CMNRST); - } - } mask = PUNIT_PWRGT_MASK(power_well_id); state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : @@ -5811,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv, out: mutex_unlock(&dev_priv->rps.hw_lock); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable) - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - enum punit_power_well power_well_id = power_well->data; - - __vlv_set_power_well(dev_priv, power_well_id, enable); } static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -5924,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, false); } +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. + */ + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + struct drm_device *dev = dev_priv->dev; + enum pipe pipe; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + for_each_pipe(pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + static void check_power_well_state(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -6038,6 +6311,27 @@ int i915_release_power_well(void) } EXPORT_SYMBOL_GPL(i915_release_power_well); +/* + * Private interface for the audio driver to get CDCLK in kHz. + * + * Caller must request power well using i915_request_power_well() prior to + * making the call. + */ +int i915_get_cdclk_freq(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + return intel_ddi_get_cdclk_freq(dev_priv); +} +EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); + + #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ @@ -6052,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_release_power_well); BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_PLLS) | \ BIT(POWER_DOMAIN_INIT)) #define HSW_DISPLAY_POWER_DOMAINS ( \ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ @@ -6151,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; +static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + static const struct i915_power_well_ops vlv_dpio_power_well_ops = { .sync_hw = vlv_power_well_sync_hw, .enable = vlv_power_well_enable, @@ -6211,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = { .name = "dpio-common", .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, .data = PUNIT_POWER_WELL_DPIO_CMN_BC, - .ops = &vlv_dpio_power_well_ops, + .ops = &vlv_dpio_cmn_power_well_ops, }, }; +static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, + enum punit_power_well power_well_id) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + int i; + + for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { + if (power_well->data == power_well_id) + return power_well; + } + + return NULL; +} + #define set_power_wells(power_domains, __power_wells) ({ \ (power_domains)->power_wells = (__power_wells); \ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ @@ -6265,11 +6582,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + struct i915_power_well *disp2d = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); + + /* nothing to do if common lane is already off */ + if (!cmn->ops->is_enabled(dev_priv, cmn)) + return; + + /* If the display might be already active skip this */ + if (disp2d->ops->is_enabled(dev_priv, disp2d) && + I915_READ(DPIO_CTL) & DPIO_CMNRST) + return; + + DRM_DEBUG_KMS("toggling display PHY side reset\n"); + + /* cmnlane needs DPLL registers */ + disp2d->ops->enable(dev_priv, disp2d); + + /* + * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: + * Need to assert and de-assert PHY SB reset by gating the + * common lane power, then un-gating it. + * Simply ungating isn't enough to reset the PHY enough to get + * ports and lanes running. + */ + cmn->ops->disable(dev_priv, cmn); +} + void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; struct i915_power_domains *power_domains = &dev_priv->power_domains; power_domains->initializing = true; + + if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + mutex_lock(&power_domains->lock); + vlv_cmnlane_wa(dev_priv); + mutex_unlock(&power_domains->lock); + } + /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); intel_power_domains_resume(dev_priv); @@ -6442,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev) (dev_priv->is_ddr3 == 1) ? "3" : "2", dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ - pineview_disable_cxsr(dev); + intel_set_memory_cxsr(dev_priv, false); dev_priv->display.update_wm = NULL; } else dev_priv->display.update_wm = pineview_update_wm; diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h index a5e783a9928a..fd4f66231d30 100644 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ b/drivers/gpu/drm/i915/intel_renderstate.h @@ -28,7 +28,6 @@ struct intel_renderstate_rodata { const u32 *reloc; - const u32 reloc_items; const u32 *batch; const u32 batch_items; }; @@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state; #define RO_RENDERSTATE(_g) \ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ .reloc = gen ## _g ## _null_state_relocs, \ - .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \ .batch = gen ## _g ## _null_state_batch, \ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ } diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c index 740538ad0977..56c1429d8a60 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c @@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = { 0x0000002c, 0x000001e0, 0x000001e4, + -1, }; static const u32 gen6_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c index 6fa7ff2a1298..419e35a7b0ff 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c @@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = { 0x00000010, 0x00000018, 0x000001ec, + -1, }; static const u32 gen7_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c index 5c875615d42a..75ef1b5de45c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c @@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = { 0x00000050, 0x00000060, 0x000003ec, + -1, }; static const u32 gen8_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 279488addf3f..599709e80a16 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size) return space; } -static inline int ring_space(struct intel_engine_cs *ring) +static inline int ring_space(struct intel_ringbuffer *ringbuf) { - struct intel_ringbuffer *ringbuf = ring->buffer; return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); } @@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring) else { ringbuf->head = I915_READ_HEAD(ring); ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); ringbuf->last_retired_head = -1; } @@ -604,6 +603,8 @@ static int init_render_ring(struct intel_engine_cs *ring) struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); + if (ret) + return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) @@ -658,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring) static void render_ring_cleanup(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->semaphore_obj) { + i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); + drm_gem_object_unreference(&dev_priv->semaphore_obj->base); + dev_priv->semaphore_obj = NULL; + } if (ring->scratch.obj == NULL) return; @@ -671,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) ring->scratch.obj = NULL; } +static int gen8_rcs_signal(struct intel_engine_cs *signaller, + unsigned int num_dwords) +{ +#define MBOX_UPDATE_DWORDS 8 + struct drm_device *dev = signaller->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *waiter; + int i, ret, num_rings; + + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; +#undef MBOX_UPDATE_DWORDS + + ret = intel_ring_begin(signaller, num_dwords); + if (ret) + return ret; + + for_each_ring(waiter, dev_priv, i) { + u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) + continue; + + intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_FLUSH_ENABLE); + intel_ring_emit(signaller, lower_32_bits(gtt_offset)); + intel_ring_emit(signaller, upper_32_bits(gtt_offset)); + intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); + intel_ring_emit(signaller, 0); + intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->id)); + intel_ring_emit(signaller, 0); + } + + return 0; +} + +static int gen8_xcs_signal(struct intel_engine_cs *signaller, + unsigned int num_dwords) +{ +#define MBOX_UPDATE_DWORDS 6 + struct drm_device *dev = signaller->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *waiter; + int i, ret, num_rings; + + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; +#undef MBOX_UPDATE_DWORDS + + ret = intel_ring_begin(signaller, num_dwords); + if (ret) + return ret; + + for_each_ring(waiter, dev_priv, i) { + u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) + continue; + + intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | + MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(signaller, lower_32_bits(gtt_offset) | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(signaller, upper_32_bits(gtt_offset)); + intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); + intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->id)); + intel_ring_emit(signaller, 0); + } + + return 0; +} + static int gen6_signal(struct intel_engine_cs *signaller, unsigned int num_dwords) { struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *useless; - int i, ret; + int i, ret, num_rings; - /* NB: In order to be able to do semaphore MBOX updates for varying - * number of rings, it's easiest if we round up each individual update - * to a multiple of 2 (since ring updates must always be a multiple of - * 2) even though the actual update only requires 3 dwords. - */ -#define MBOX_UPDATE_DWORDS 4 - if (i915_semaphore_is_enabled(dev)) - num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); - else - return intel_ring_begin(signaller, num_dwords); +#define MBOX_UPDATE_DWORDS 3 + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); +#undef MBOX_UPDATE_DWORDS ret = intel_ring_begin(signaller, num_dwords); if (ret) return ret; -#undef MBOX_UPDATE_DWORDS for_each_ring(useless, dev_priv, i) { u32 mbox_reg = signaller->semaphore.mbox.signal[i]; @@ -701,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller, intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(signaller, mbox_reg); intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); - intel_ring_emit(signaller, MI_NOOP); - } else { - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); } } + /* If num_dwords was rounded, make sure the tail pointer is correct */ + if (num_rings % 2 == 0) + intel_ring_emit(signaller, MI_NOOP); + return 0; } @@ -727,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring) { int ret; - ret = ring->semaphore.signal(ring, 4); + if (ring->semaphore.signal) + ret = ring->semaphore.signal(ring, 4); + else + ret = intel_ring_begin(ring, 4); + if (ret) return ret; @@ -754,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, * @signaller - ring which has, or will signal * @seqno - seqno which the waiter will block on */ + +static int +gen8_ring_sync(struct intel_engine_cs *waiter, + struct intel_engine_cs *signaller, + u32 seqno) +{ + struct drm_i915_private *dev_priv = waiter->dev->dev_private; + int ret; + + ret = intel_ring_begin(waiter, 4); + if (ret) + return ret; + + intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD); + intel_ring_emit(waiter, seqno); + intel_ring_emit(waiter, + lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_emit(waiter, + upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_advance(waiter); + return 0; +} + static int gen6_ring_sync(struct intel_engine_cs *waiter, struct intel_engine_cs *signaller, @@ -1329,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring) struct drm_i915_gem_object *obj; if ((obj = ring->status_page.obj) == NULL) { + unsigned flags; int ret; obj = i915_gem_alloc_object(ring->dev, 4096); @@ -1341,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring) if (ret) goto err_unref; - ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); + flags = 0; + if (!HAS_LLC(ring->dev)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actualy map it). + */ + flags |= PIN_MAPPABLE; + ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); if (ret) { err_unref: drm_gem_object_unreference(&obj->base); @@ -1378,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring) return 0; } -static int allocate_ring_buffer(struct intel_engine_cs *ring) +static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +{ + if (!ringbuf->obj) + return; + + iounmap(ringbuf->virtual_start); + i915_gem_object_ggtt_unpin(ringbuf->obj); + drm_gem_object_unreference(&ringbuf->obj->base); + ringbuf->obj = NULL; +} + +static int intel_alloc_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_ringbuffer *ringbuf = ring->buffer; struct drm_i915_gem_object *obj; int ret; - if (intel_ring_initialized(ring)) + if (ringbuf->obj) return 0; obj = NULL; @@ -1397,6 +1524,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) if (obj == NULL) return -ENOMEM; + /* mark ring buffers as read-only from GPU side by default */ + obj->gt_ro = 1; + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); if (ret) goto err_unref; @@ -1455,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = allocate_ring_buffer(ring); + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); if (ret) { DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); goto error; @@ -1496,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) intel_stop_ring_buffer(ring); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); - iounmap(ringbuf->virtual_start); - - i915_gem_object_ggtt_unpin(ringbuf->obj); - drm_gem_object_unreference(&ringbuf->obj->base); - ringbuf->obj = NULL; + intel_destroy_ringbuffer_obj(ringbuf); ring->preallocated_lazy_request = NULL; ring->outstanding_lazy_seqno = 0; @@ -1526,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); if (ringbuf->space >= n) return 0; } @@ -1549,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); return 0; } @@ -1578,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) trace_i915_ring_wait_begin(ring); do { ringbuf->head = I915_READ_HEAD(ring); - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); if (ringbuf->space >= n) { ret = 0; break; @@ -1630,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) iowrite32(MI_NOOP, virt++); ringbuf->tail = 0; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); return 0; } @@ -1746,14 +1872,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring) void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; BUG_ON(ring->outstanding_lazy_seqno); - if (INTEL_INFO(ring->dev)->gen >= 6) { + if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); - if (HAS_VEBOX(ring->dev)) + if (HAS_VEBOX(dev)) I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); } @@ -1941,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct drm_i915_gem_object *obj; + int ret; ring->name = "render ring"; ring->id = RCS; ring->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_INFO(dev)->gen >= 8) { + if (i915_semaphore_is_enabled(dev)) { + obj = i915_gem_alloc_object(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else + dev_priv->semaphore_obj = obj; + } + } + ring->add_request = gen6_add_request; + ring->flush = gen8_render_ring_flush; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + if (i915_semaphore_is_enabled(dev)) { + WARN_ON(!dev_priv->semaphore_obj); + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_rcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } + } else if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen7_render_ring_flush; if (INTEL_INFO(dev)->gen == 6) ring->flush = gen6_render_ring_flush; - if (INTEL_INFO(dev)->gen >= 8) { - ring->flush = gen8_render_ring_flush; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - } else { - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - } + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. - * And there is no VCS2 ring on the pre-gen8 platform. So the - * semaphore between RCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between VCS2 and RCS later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between RCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between VCS2 and RCS later. + */ + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->flush = gen4_render_ring_flush; @@ -2007,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; + if (IS_HASWELL(dev)) ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; else if (IS_GEN8(dev)) @@ -2024,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) /* Workaround batchbuffer to combat CS tlb bug. */ if (HAS_BROKEN_CS_TLB(dev)) { - struct drm_i915_gem_object *obj; - int ret; - obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); if (obj == NULL) { DRM_ERROR("Failed to allocate batch bo\n"); @@ -2157,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. - * And there is no VCS2 ring on the pre-gen8 platform. So the - * semaphore between VCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between VCS2 and VCS later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } else { ring->mmio_base = BSD_RING_BASE; ring->flush = bsd_ring_flush; @@ -2218,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) return -EINVAL; } - ring->name = "bds2_ring"; + ring->name = "bsd2 ring"; ring->id = VCS2; ring->write_tail = ring_write_tail; @@ -2233,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on the pre-gen8. And there - * is no bsd2 ring on the pre-gen8. So now the semaphore_register - * between VCS2 and other ring is initialized as invalid. - * Gen8 will initialize the sema between VCS2 and other ring later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); @@ -2277,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->irq_get = gen8_ring_get_irq; ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.signal = gen6_signal; + ring->semaphore.sync_to = gen6_ring_sync; + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between BCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between BCS and VCS2 later. + */ + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. And - * there is no VCS2 ring on the pre-gen8 platform. So the semaphore - * between BCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between BCS and VCS2 later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); @@ -2327,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->irq_get = gen8_ring_get_irq; ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index e72017bdcd7f..ed5941078f92 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -40,6 +40,32 @@ struct intel_hw_status_page { #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) +/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to + * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. + */ +#define i915_semaphore_seqno_size sizeof(uint64_t) +#define GEN8_SIGNAL_OFFSET(__ring, to) \ + (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ + (i915_semaphore_seqno_size * (to))) + +#define GEN8_WAIT_OFFSET(__ring, from) \ + (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ + (i915_semaphore_seqno_size * (__ring)->id)) + +#define GEN8_RING_SEMAPHORE_INIT do { \ + if (!dev_priv->semaphore_obj) { \ + break; \ + } \ + ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ + ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ + ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ + ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ + ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ + ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ + } while(0) + enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, @@ -127,15 +153,55 @@ struct intel_engine_cs { #define I915_DISPATCH_PINNED 0x2 void (*cleanup)(struct intel_engine_cs *ring); + /* GEN8 signal/wait table - never trust comments! + * signal to signal to signal to signal to signal to + * RCS VCS BCS VECS VCS2 + * -------------------------------------------------------------------- + * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | + * |------------------------------------------------------------------- + * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | + * |------------------------------------------------------------------- + * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | + * |------------------------------------------------------------------- + * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | + * |------------------------------------------------------------------- + * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | + * |------------------------------------------------------------------- + * + * Generalization: + * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) + * ie. transpose of g(x, y) + * + * sync from sync from sync from sync from sync from + * RCS VCS BCS VECS VCS2 + * -------------------------------------------------------------------- + * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | + * |------------------------------------------------------------------- + * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | + * |------------------------------------------------------------------- + * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | + * |------------------------------------------------------------------- + * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | + * |------------------------------------------------------------------- + * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | + * |------------------------------------------------------------------- + * + * Generalization: + * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) + * ie. transpose of f(x, y) + */ struct { u32 sync_seqno[I915_NUM_RINGS-1]; - struct { - /* our mbox written by others */ - u32 wait[I915_NUM_RINGS]; - /* mboxes this ring signals to */ - u32 signal[I915_NUM_RINGS]; - } mbox; + union { + struct { + /* our mbox written by others */ + u32 wait[I915_NUM_RINGS]; + /* mboxes this ring signals to */ + u32 signal[I915_NUM_RINGS]; + } mbox; + u64 signal_ggtt[I915_NUM_RINGS]; + }; /* AKA wait() */ int (*sync_to)(struct intel_engine_cs *ring, @@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring, int idx; /* - * cs -> 0 = vcs, 1 = bcs - * vcs -> 0 = bcs, 1 = cs, - * bcs -> 0 = cs, 1 = vcs. + * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; + * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; + * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; + * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; + * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; */ idx = (other - ring) - 1; @@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev); u64 intel_ring_get_active_head(struct intel_engine_cs *ring); void intel_ring_setup_status_page(struct intel_engine_cs *ring); -static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) +static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) { - return ring->buffer->tail; + return ringbuf->tail; } static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 20375cc7f82d..9350edd6728d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.unregister = intel_sdvo_connector_unregister; intel_connector_attach_encoder(&connector->base, &encoder->base); - ret = drm_sysfs_connector_add(drm_connector); + ret = drm_connector_register(drm_connector); if (ret < 0) goto err1; @@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, return 0; err2: - drm_sysfs_connector_remove(drm_connector); + drm_connector_unregister(drm_connector); err1: drm_connector_cleanup(drm_connector); @@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return true; err: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (intel_attached_encoder(connector) == &intel_sdvo->base) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); } } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 1b66ddcdfb33..6afd1cfe7c44 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -691,6 +691,14 @@ intel_post_enable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( + */ + if (IS_BROADWELL(dev)) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + /* * FIXME IPS should be fine as long as one plane is * enabled, but in practice it seems to have problems * when going from primary only to sprite only and vice @@ -811,6 +819,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_device *dev = plane->dev; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane = to_intel_plane(plane); + enum pipe pipe = intel_crtc->pipe; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct drm_i915_gem_object *old_obj = intel_plane->obj; @@ -998,6 +1007,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_SPRITE(pipe)); mutex_unlock(&dev->struct_mutex); if (ret) @@ -1031,6 +1042,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, else intel_plane->disable_plane(plane, crtc); + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); + if (!primary_was_enabled && primary_enabled) intel_post_enable_primary(crtc); } @@ -1060,6 +1073,7 @@ intel_disable_plane(struct drm_plane *plane) struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_crtc *intel_crtc; + enum pipe pipe; if (!plane->fb) return 0; @@ -1068,6 +1082,7 @@ intel_disable_plane(struct drm_plane *plane) return -EINVAL; intel_crtc = to_intel_crtc(plane->crtc); + pipe = intel_crtc->pipe; if (intel_crtc->active) { bool primary_was_enabled = intel_crtc->primary_enabled; @@ -1086,6 +1101,8 @@ intel_disable_plane(struct drm_plane *plane) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(intel_plane->obj); + i915_gem_track_fb(intel_plane->obj, NULL, + INTEL_FRONTBUFFER_SPRITE(pipe)); mutex_unlock(&dev->struct_mutex); intel_plane->obj = NULL; @@ -1106,7 +1123,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *set = data; - struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; @@ -1120,13 +1136,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); - obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); - if (!obj) { + plane = drm_plane_find(dev, set->plane_id); + if (!plane) { ret = -ENOENT; goto out_unlock; } - plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); ret = intel_plane->update_colorkey(plane, set); @@ -1139,7 +1154,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *get = data; - struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; @@ -1149,13 +1163,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); - obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); - if (!obj) { + plane = drm_plane_find(dev, get->plane_id); + if (!plane) { ret = -ENOENT; goto out_unlock; } - plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); intel_plane->get_colorkey(plane, get); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 67c6c9a2eb1c..e211eef4b7e4 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1680,5 +1680,5 @@ intel_tv_init(struct drm_device *dev) drm_object_attach_property(&connector->base, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f6fef7ac069..e0f0843569a6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, } /* WaRsForcewakeWaitTC0:vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); - + if (!IS_CHERRYVIEW(dev_priv->dev)) + __gen6_gt_wait_for_thread_c0(dev_priv); } static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, @@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); - + /* something from same cacheline, but !FORCEWAKE_VLV */ + __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); + if (!IS_CHERRYVIEW(dev_priv->dev)) + gen6_gt_check_fifodbg(dev_priv); } static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) @@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg) intel_runtime_pm_put(dev_priv); } -static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; - } else { - dev_priv->uncore.forcewake_count = 0; - dev_priv->uncore.fw_rendercount = 0; - dev_priv->uncore.fw_mediacount = 0; } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -void intel_uncore_early_sanitize(struct drm_device *dev) +void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev, restore_forcewake); } void intel_uncore_sanitize(struct drm_device *dev) @@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) #define NEEDS_FORCE_WAKE(dev_priv, reg) \ ((reg) < 0x40000 && (reg) != FORCEWAKE) -#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ - (((reg) >= 0x2000 && (reg) < 0x4000) ||\ - ((reg) >= 0x5000 && (reg) < 0x8000) ||\ - ((reg) >= 0xB000 && (reg) < 0x12000) ||\ - ((reg) >= 0x2E000 && (reg) < 0x30000)) +#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) -#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ - (((reg) >= 0x12000 && (reg) < 0x14000) ||\ - ((reg) >= 0x22000 && (reg) < 0x24000) ||\ - ((reg) >= 0x30000 && (reg) < 0x40000)) +#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x2000, 0x4000) || \ + REG_RANGE((reg), 0x5000, 0x8000) || \ + REG_RANGE((reg), 0xB000, 0x12000) || \ + REG_RANGE((reg), 0x2E000, 0x30000)) + +#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x12000, 0x14000) || \ + REG_RANGE((reg), 0x22000, 0x24000) || \ + REG_RANGE((reg), 0x30000, 0x40000)) + +#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x2000, 0x4000) || \ + REG_RANGE((reg), 0x5000, 0x8000) || \ + REG_RANGE((reg), 0x8300, 0x8500) || \ + REG_RANGE((reg), 0xB000, 0xC000) || \ + REG_RANGE((reg), 0xE000, 0xE800)) + +#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x8800, 0x8900) || \ + REG_RANGE((reg), 0xD000, 0xD800) || \ + REG_RANGE((reg), 0x12000, 0x14000) || \ + REG_RANGE((reg), 0x1A000, 0x1C000) || \ + REG_RANGE((reg), 0x1E800, 0x1EA00) || \ + REG_RANGE((reg), 0x30000, 0x40000)) + +#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x4000, 0x5000) || \ + REG_RANGE((reg), 0x8000, 0x8300) || \ + REG_RANGE((reg), 0x8500, 0x8600) || \ + REG_RANGE((reg), 0x9000, 0xB000) || \ + REG_RANGE((reg), 0xC000, 0xC800) || \ + REG_RANGE((reg), 0xF000, 0x10000) || \ + REG_RANGE((reg), 0x14000, 0x14400) || \ + REG_RANGE((reg), 0x22000, 0x24000)) static void ilk_dummy_write(struct drm_i915_private *dev_priv) @@ -573,7 +597,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ REG_READ_FOOTER; \ } +#define __chv_read(x) \ +static u##x \ +chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + unsigned fwengine = 0; \ + REG_READ_HEADER(x); \ + if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + val = __raw_i915_read##x(dev_priv, reg); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ + REG_READ_FOOTER; \ +} +__chv_read(8) +__chv_read(16) +__chv_read(32) +__chv_read(64) __vlv_read(8) __vlv_read(16) __vlv_read(32) @@ -591,6 +643,7 @@ __gen4_read(16) __gen4_read(32) __gen4_read(64) +#undef __chv_read #undef __vlv_read #undef __gen6_read #undef __gen5_read @@ -695,6 +748,38 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace REG_WRITE_FOOTER; \ } +#define __chv_write(x) \ +static void \ +chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + unsigned fwengine = 0; \ + bool shadowed = is_gen8_shadowed(dev_priv, reg); \ + REG_WRITE_HEADER; \ + if (!shadowed) { \ + if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ + REG_WRITE_FOOTER; \ +} + +__chv_write(8) +__chv_write(16) +__chv_write(32) +__chv_write(64) __gen8_write(8) __gen8_write(16) __gen8_write(32) @@ -716,6 +801,7 @@ __gen4_write(16) __gen4_write(32) __gen4_write(64) +#undef __chv_write #undef __gen8_write #undef __hsw_write #undef __gen6_write @@ -731,7 +817,7 @@ void intel_uncore_init(struct drm_device *dev) setup_timer(&dev_priv->uncore.force_wake_timer, gen6_force_wake_timer, (unsigned long)dev_priv); - intel_uncore_early_sanitize(dev); + intel_uncore_early_sanitize(dev, false); if (IS_VALLEYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; @@ -779,14 +865,26 @@ void intel_uncore_init(struct drm_device *dev) switch (INTEL_INFO(dev)->gen) { default: - dev_priv->uncore.funcs.mmio_writeb = gen8_write8; - dev_priv->uncore.funcs.mmio_writew = gen8_write16; - dev_priv->uncore.funcs.mmio_writel = gen8_write32; - dev_priv->uncore.funcs.mmio_writeq = gen8_write64; - dev_priv->uncore.funcs.mmio_readb = gen6_read8; - dev_priv->uncore.funcs.mmio_readw = gen6_read16; - dev_priv->uncore.funcs.mmio_readl = gen6_read32; - dev_priv->uncore.funcs.mmio_readq = gen6_read64; + if (IS_CHERRYVIEW(dev)) { + dev_priv->uncore.funcs.mmio_writeb = chv_write8; + dev_priv->uncore.funcs.mmio_writew = chv_write16; + dev_priv->uncore.funcs.mmio_writel = chv_write32; + dev_priv->uncore.funcs.mmio_writeq = chv_write64; + dev_priv->uncore.funcs.mmio_readb = chv_read8; + dev_priv->uncore.funcs.mmio_readw = chv_read16; + dev_priv->uncore.funcs.mmio_readl = chv_read32; + dev_priv->uncore.funcs.mmio_readq = chv_read64; + + } else { + dev_priv->uncore.funcs.mmio_writeb = gen8_write8; + dev_priv->uncore.funcs.mmio_writew = gen8_write16; + dev_priv->uncore.funcs.mmio_writel = gen8_write32; + dev_priv->uncore.funcs.mmio_writeq = gen8_write64; + dev_priv->uncore.funcs.mmio_readb = gen6_read8; + dev_priv->uncore.funcs.mmio_readw = gen6_read16; + dev_priv->uncore.funcs.mmio_readl = gen6_read32; + dev_priv->uncore.funcs.mmio_readq = gen6_read64; + } break; case 7: case 6: @@ -912,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, if (args->flags || args->pad) return -EINVAL; - if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) return -EPERM; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1053,18 +1151,16 @@ static int gen6_do_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev) { - switch (INTEL_INFO(dev)->gen) { - case 8: - case 7: - case 6: return gen6_do_reset(dev); - case 5: return ironlake_do_reset(dev); - case 4: - if (IS_G4X(dev)) - return g4x_do_reset(dev); - else - return i965_do_reset(dev); - default: return -ENODEV; - } + if (INTEL_INFO(dev)->gen >= 6) + return gen6_do_reset(dev); + else if (IS_GEN5(dev)) + return ironlake_do_reset(dev); + else if (IS_G4X(dev)) + return g4x_do_reset(dev); + else if (IS_GEN4(dev)) + return i965_do_reset(dev); + else + return -ENODEV; } void intel_uncore_check_errors(struct drm_device *dev) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index cf11ee68a6d9..80de23d9b9c9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 13b7dd83faa9..5451dc58eff1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -272,7 +272,7 @@ static int mga_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs mga_fb_helper_funcs = { +static const struct drm_fb_helper_funcs mga_fb_helper_funcs = { .gamma_set = mga_crtc_fb_gamma_set, .gamma_get = mga_crtc_fb_gamma_get, .fb_probe = mgag200fb_create, @@ -293,9 +293,10 @@ int mgag200_fbdev_init(struct mga_device *mdev) return -ENOMEM; mdev->mfbdev = mfbdev; - mfbdev->helper.funcs = &mga_fb_helper_funcs; spin_lock_init(&mfbdev->dirty_lock); + drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs); + ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); if (ret) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a034ed408252..45f04dea0ac2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = - drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -1621,7 +1611,7 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); mga_connector->i2c = mgag200_i2c_create(dev); if (!mga_connector->i2c) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 28f7e3ec6c28..76960faae38f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -306,7 +306,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector) hdp_disable(hdmi_connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); hdmi_unreference(hdmi_connector->hdmi); @@ -416,7 +416,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); ret = hpd_enable(hdmi_connector); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 5107fc4826bc..c437065933e3 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -177,7 +177,7 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, DBG("fbdev: get gamma"); } -static struct drm_fb_helper_funcs msm_fb_helper_funcs = { +static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { .gamma_set = msm_crtc_fb_gamma_set, .gamma_get = msm_crtc_fb_gamma_get, .fb_probe = msm_fbdev_create, @@ -197,7 +197,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) helper = &fbdev->base; - helper->funcs = &msm_fb_helper_funcs; + drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs); ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, priv->num_connectors); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 690d7e7b6d1e..713722b0ba78 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (iommu_present(&platform_bus_type)) - p = drm_gem_get_pages(obj, 0); + p = drm_gem_get_pages(obj); else p = get_pages_vram(obj, npages); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b6dc85c614be..ba29a701ca1d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -309,7 +309,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) struct ttm_buffer_object *bo = &nvbo->bo; int ret; - ret = ttm_bo_reserve(bo, false, false, false, 0); + ret = ttm_bo_reserve(bo, false, false, false, NULL); if (ret) goto out; @@ -350,7 +350,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) struct ttm_buffer_object *bo = &nvbo->bo; int ret, ref; - ret = ttm_bo_reserve(bo, false, false, false, 0); + ret = ttm_bo_reserve(bo, false, false, false, NULL); if (ret) return ret; @@ -385,7 +385,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) { int ret; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 1fa222e8f007..dbdc9ad59546 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -63,7 +63,7 @@ find_encoder(struct drm_connector *connector, int type) { struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder; - struct drm_mode_object *obj; + struct drm_encoder *enc; int i, id; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { @@ -71,10 +71,10 @@ find_encoder(struct drm_connector *connector, int type) if (!id) break; - obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); - if (!obj) + enc = drm_encoder_find(dev, id); + if (!enc) continue; - nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + nv_encoder = nouveau_encoder(enc); if (type == DCB_OUTPUT_ANY || (nv_encoder->dcb && nv_encoder->dcb->type == type)) @@ -104,7 +104,7 @@ nouveau_connector_destroy(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); nouveau_event_ref(NULL, &nv_connector->hpd); kfree(nv_connector->edid); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); if (nv_connector->aux.transfer) drm_dp_aux_unregister(&nv_connector->aux); @@ -119,7 +119,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_gpio *gpio = nouveau_gpio(drm->device); struct nouveau_encoder *nv_encoder; - struct drm_mode_object *obj; + struct drm_encoder *encoder; int i, panel = -ENODEV; /* eDP panels need powering on by us (if the VBIOS doesn't default it @@ -139,10 +139,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) if (id == 0) break; - obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(dev, id); + if (!encoder) continue; - nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + nv_encoder = nouveau_encoder(encoder); if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { int ret = nouveau_dp_detect(nv_encoder); @@ -1236,6 +1236,6 @@ nouveau_connector_create(struct drm_device *dev, int index) INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 64a42cfd3717..afe706a20f97 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -438,7 +438,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info) info->flags |= FBINFO_HWACCEL_DISABLED; } -static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { +static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .gamma_set = nouveau_fbcon_gamma_set, .gamma_get = nouveau_fbcon_gamma_get, .fb_probe = nouveau_fbcon_create, @@ -464,7 +464,8 @@ nouveau_fbcon_init(struct drm_device *dev) fbcon->dev = dev; drm->fbcon = fbcon; - fbcon->helper.funcs = &nouveau_fbcon_helper_funcs; + + drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); ret = drm_fb_helper_init(dev, &fbcon->helper, dev->mode_config.num_crtc, 4); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index c90c0dc0afe8..df9d451afdcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -61,7 +61,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) if (!cli->base.vm) return 0; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; @@ -132,7 +132,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) if (!cli->base.vm) return; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index ab0228f640a5..7e185c122750 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -76,6 +76,7 @@ static int nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); @@ -162,6 +163,7 @@ static int nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -242,6 +244,7 @@ static int nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_mem *node; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 86f4ead0441d..36bc5cc80816 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -130,7 +130,7 @@ static void omap_connector_destroy(struct drm_connector *connector) struct omap_dss_device *dssdev = omap_connector->dssdev; DBG("%s", omap_connector->dssdev->name); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(omap_connector); @@ -307,7 +307,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f926b4caf449..56c60552abba 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, struct page **pages, uint32_t npages, uint32_t roll) { - dma_addr_t pat_pa = 0; + dma_addr_t pat_pa = 0, data_pa = 0; uint32_t *data; struct pat *pat; struct refill_engine *engine = txn->engine_handle; @@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, .lut_id = engine->tcm->lut_id, }; - data = alloc_dma(txn, 4*i, &pat->data_pa); + data = alloc_dma(txn, 4*i, &data_pa); + /* FIXME: what if data_pa is more than 32-bit ? */ + pat->data_pa = data_pa; while (i--) { int n = i + roll; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 1388ca7f87e8..8436c6857cda 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -281,7 +281,7 @@ fail: return ret; } -static struct drm_fb_helper_funcs omap_fb_helper_funcs = { +static const struct drm_fb_helper_funcs omap_fb_helper_funcs = { .fb_probe = omap_fbdev_create, }; @@ -325,7 +325,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) helper = &fbdev->base; - helper->funcs = &omap_fb_helper_funcs; + drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs); ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, priv->num_connectors); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 95dbce286a41..e4849413ee80 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) WARN_ON(omap_obj->pages); - /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the - * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably - * we actually want CMA memory for it all anyways.. - */ - pages = drm_gem_get_pages(obj, GFP_KERNEL); + pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); return PTR_ERR(pages); @@ -791,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, omap_obj->paddr = tiler_ssptr(block); omap_obj->block = block; - DBG("got paddr: %08x", omap_obj->paddr); + DBG("got paddr: %pad", &omap_obj->paddr); } omap_obj->paddr_cnt++; @@ -985,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) off = drm_vma_node_start(&obj->vma_node); - seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", + seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, obj->refcount.refcount.counter, - off, omap_obj->paddr, omap_obj->paddr_cnt, + off, &omap_obj->paddr, omap_obj->paddr_cnt, omap_obj->vaddr, omap_obj->roll); if (omap_obj->flags & OMAP_BO_TILED) { @@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) } } spin_unlock(&sync_lock); - - if (waiter) - kfree(waiter); + kfree(waiter); } return ret; } @@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, struct omap_drm_private *priv = dev->dev_private; struct omap_gem_object *omap_obj; struct drm_gem_object *obj = NULL; + struct address_space *mapping; size_t size; int ret; @@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj->height = gsize.tiled.height; } - ret = 0; - if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) + if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { drm_gem_private_object_init(dev, obj, size); - else + } else { ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto fail; - if (ret) - goto fail; + mapping = file_inode(obj->filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); + } return obj; @@ -1467,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev) entry->paddr = tiler_ssptr(block); entry->block = block; - DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, - entry->paddr, + DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, + &entry->paddr, usergart[i].stride_pfn << PAGE_SHIFT); } } diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 3cf31ee59aac..6af3398b5278 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply) DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width, info->out_height, info->screen_width); - DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, - info->paddr, info->p_uv_addr); + DBG("%d,%d %pad %pad", info->pos_x, info->pos_y, + &info->paddr, &info->p_uv_addr); /* TODO: */ ilace = false; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5d7ea2461852..b8ced08b6291 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -835,7 +835,7 @@ static void qxl_conn_destroy(struct drm_connector *connector) struct qxl_output *qxl_output = drm_connector_to_qxl_output(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(qxl_output); } @@ -902,7 +902,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output) drm_object_attach_property(&connector->base, qdev->hotplug_mode_update_property, 0); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index f437b30ce689..df567888bb1e 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -660,7 +660,7 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) return 0; } -static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { +static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { .fb_probe = qxl_fb_find_or_create_single, }; @@ -676,9 +676,12 @@ int qxl_fbdev_init(struct qxl_device *qdev) qfbdev->qdev = qdev; qdev->mode_info.qfbdev = qfbdev; - qfbdev->helper.funcs = &qxl_fb_helper_funcs; spin_lock_init(&qfbdev->delayed_ops_lock); INIT_LIST_HEAD(&qfbdev->delayed_ops); + + drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper, + &qxl_fb_helper_funcs); + ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, qxl_num_crtc /* num_crtc - QXL supports just 1 */, QXLFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index d458a140c024..83a423293afd 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; @@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index c5b1f2da3954..35f4182c63b6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; + + char dpcd_hex_dump[DP_DPCD_SIZE * 3]; ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, DP_DPCD_SIZE); if (ret > 0) { memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: "); - for (i = 0; i < DP_DPCD_SIZE; i++) - DRM_DEBUG_KMS("%02x ", msg[i]); - DRM_DEBUG_KMS("\n"); + + hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), + 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); + DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); radeon_dp_probe_oui(radeon_connector); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index ae88660f34ea..0c6e1b55d968 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -1752,12 +1752,12 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) #define EOP_CACHE_POLICY(x) ((x) << 25) /* 0 - LRU * 1 - Stream * 2 - Bypass */ -#define EOP_TCL2_VOLATILE (1 << 27) #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 5a9a5f4d7888..47d31e915758 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } return 0; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 3f6e817d97ee..9ef8c38f2d66 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = false; + pi->bapm_enable = true; pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 004c931606c4..01fc4888e6fe 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 1544efcf1c3a..ebdce08cfefc 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1401,7 +1401,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, */ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) { - struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, waitreloc; @@ -1441,12 +1440,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); reg = R100_CP_PACKET0_GET_REG(header); - obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(p->rdev->ddev, crtc_id); + if (!crtc) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -ENOENT; } - crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 12511bb5fd6f..c47537a1ddba 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, uint32_t *vline_start_end, uint32_t *vline_status) { - struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, wait_reg_mem; @@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = R600_CP_PACKET0_GET_REG(header); - obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(p->rdev->ddev, crtc_id); + if (!crtc) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -ENOENT; } - crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4b0bbf88d5c0..29d9cc04c04e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -102,6 +102,7 @@ extern int radeon_runtime_pm; extern int radeon_hard_reset; extern int radeon_vm_size; extern int radeon_vm_block_size; +extern int radeon_deep_color; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -749,10 +750,6 @@ union radeon_irq_stat_regs { struct cik_irq_stat_regs cik; }; -#define RADEON_MAX_HPD_PINS 7 -#define RADEON_MAX_CRTCS 6 -#define RADEON_MAX_AFMT_BLOCKS 7 - struct radeon_irq { bool installed; spinlock_t lock; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 30844814c25a..173f378428a9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { - if (ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE6(rdev)) + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + else if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } + /* set a reasonable default for DP */ + if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + rdev->clock.default_dispclk / 100); + rdev->clock.default_dispclk = 60000; + } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); rdev->clock.current_dispclk = rdev->clock.default_dispclk; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 1b9177ed181f..c667c43f1c1f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) } } + if ((radeon_deep_color == 0) && (bpc > 8)) + bpc = 8; + DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", connector->name, connector->display_info.bpc, bpc); @@ -213,7 +216,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c struct drm_encoder *best_encoder = NULL; struct drm_encoder *encoder = NULL; struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; - struct drm_mode_object *obj; bool connected; int i; @@ -223,14 +225,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, + connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else @@ -246,7 +245,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { - struct drm_mode_object *obj; struct drm_encoder *encoder; int i; @@ -254,11 +252,10 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); if (encoder->encoder_type == encoder_type) return encoder; } @@ -268,17 +265,9 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -737,7 +726,7 @@ static void radeon_connector_destroy(struct drm_connector *connector) if (radeon_connector->edid) kfree(radeon_connector->edid); kfree(radeon_connector->con_priv); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -1045,7 +1034,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; - struct drm_mode_object *obj; int i, r; enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; @@ -1150,14 +1138,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, + connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1222,19 +1207,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_mode_object *obj; struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1249,13 +1231,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -1388,7 +1365,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector) u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) { - struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; @@ -1397,11 +1373,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { @@ -1418,7 +1393,6 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) { - struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; @@ -1428,11 +1402,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; @@ -2047,7 +2020,7 @@ radeon_add_atom_connector(struct drm_device *dev, connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (has_aux) radeon_dp_aux_init(radeon_connector); @@ -2208,5 +2181,5 @@ radeon_add_legacy_connector(struct drm_device *dev, } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8fc362aa6a1a..13896edcf0b6 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -285,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work) void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; - struct radeon_flip_work *work; unsigned long flags; u32 update_pending; int vpos, hpos; @@ -295,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) return; spin_lock_irqsave(&rdev->ddev->event_lock, flags); - work = radeon_crtc->flip_work; - if (work == NULL) { + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } @@ -344,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->flip_work; - if (work == NULL) { + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } /* Pageflip completed. Clean up. */ + radeon_crtc->flip_status = RADEON_FLIP_NONE; radeon_crtc->flip_work = NULL; /* wakeup userspace */ @@ -476,6 +483,7 @@ static void radeon_flip_work_func(struct work_struct *__work) /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, base); + radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); up_read(&rdev->exclusive_lock); @@ -544,7 +552,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - if (radeon_crtc->flip_work) { + if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); @@ -552,6 +560,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, kfree(work); return -EBUSY; } + radeon_crtc->flip_status = RADEON_FLIP_PENDING; radeon_crtc->flip_work = work; /* update crtc fb */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6e3017413386..cb1421369e3a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -175,6 +175,7 @@ int radeon_runtime_pm = -1; int radeon_hard_reset = 0; int radeon_vm_size = 4096; int radeon_vm_block_size = 9; +int radeon_deep_color = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -248,6 +249,9 @@ module_param_named(vm_size, radeon_vm_size, int, 0444); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); +MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); +module_param_named(deep_color, radeon_deep_color, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 665ced3b7313..db598d712901 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -331,7 +331,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb return 0; } -static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { +static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, .gamma_get = radeon_crtc_fb_gamma_get, .fb_probe = radeonfb_create, @@ -353,7 +353,9 @@ int radeon_fbdev_init(struct radeon_device *rdev) rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; - rfbdev->helper.funcs = &radeon_fb_helper_funcs; + + drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, + &radeon_fb_helper_funcs); ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ad0e4b8cc7e3..0592ddb0904b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -46,6 +46,10 @@ struct radeon_device; #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) +#define RADEON_MAX_HPD_PINS 7 +#define RADEON_MAX_CRTCS 6 +#define RADEON_MAX_AFMT_BLOCKS 7 + enum radeon_rmx_type { RMX_OFF, RMX_FULL, @@ -233,8 +237,8 @@ struct radeon_mode_info { struct card_info *atom_card_info; enum radeon_connector_table connector_table; bool mode_config_initialized; - struct radeon_crtc *crtcs[6]; - struct radeon_afmt *afmt[7]; + struct radeon_crtc *crtcs[RADEON_MAX_CRTCS]; + struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ @@ -302,6 +306,12 @@ struct radeon_atom_ss { uint16_t amount; }; +enum radeon_flip_status { + RADEON_FLIP_NONE, + RADEON_FLIP_PENDING, + RADEON_FLIP_SUBMITTED +}; + struct radeon_crtc { struct drm_crtc base; int crtc_id; @@ -327,6 +337,7 @@ struct radeon_crtc { /* page flipping */ struct workqueue_struct *flip_queue; struct radeon_flip_work *flip_work; + enum radeon_flip_status flip_status; /* pll sharing */ struct radeon_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 12c663e86ca1..e447e390d09a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) rdev->pm.dpm.ac_power = true; else rdev->pm.dpm.ac_power = false; - if (rdev->asic->dpm.enable_bapm) - radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + if (rdev->family == CHIP_ARUBA) { + if (rdev->asic->dpm.enable_bapm) + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + } mutex_unlock(&rdev->pm.mutex); } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 899d9126cad6..eecff6bbd341 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -495,7 +495,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, mutex_unlock(&vm->mutex); r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, - RADEON_GPU_PAGE_SIZE, false, + RADEON_GPU_PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &pt); if (r) return r; @@ -992,7 +992,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) return -ENOMEM; } - r = radeon_bo_create(rdev, pd_size, align, false, + r = radeon_bo_create(rdev, pd_size, align, true, RADEON_GEM_DOMAIN_VRAM, NULL, &vm->page_directory); if (r) diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2a2822c03329..20da6ff183df 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1874,7 +1874,15 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - pi->enable_bapm = false; + /* There are stability issues reported on latops with + * bapm installed when switching between AC and battery + * power. At the same time, some desktop boards hang + * if it's not enabled and dpm is enabled. + */ + if (rdev->flags & RADEON_IS_MOBILITY) + pi->enable_bapm = false; + else + pi->enable_bapm = true; pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 792fd1d20e86..fda64b7b73e8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = { * Power management */ -#if CONFIG_PM_SLEEP +#ifdef CONFIG_PM_SLEEP static int rcar_du_pm_suspend(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 289048d1c7b2..21426bd234eb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -64,7 +64,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -105,7 +105,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index ccfe64c7188f..8af3944d31b9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -32,7 +32,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { static void rcar_du_vga_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -70,7 +70,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index faf176b2daf9..47875de89010 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -692,7 +692,7 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector) struct shmob_drm_connector *scon = to_shmob_connector(connector); shmob_drm_backlight_exit(scon); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -726,7 +726,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) goto err_cleanup; @@ -749,7 +749,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, err_backlight: shmob_drm_backlight_exit(&sdev->connector); err_sysfs: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); err_cleanup: drm_connector_cleanup(connector); return ret; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 82c84c7fd4f6..ff4ba483b602 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = { * Power management */ -#if CONFIG_PM_SLEEP +#ifdef CONFIG_PM_SLEEP static int shmob_drm_pm_suspend(struct device *dev) { struct shmob_drm_device *sdev = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 3396f9f6a9f7..fd736efd14bd 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -40,6 +40,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) drm_mode_config_init(drm); + err = tegra_drm_fb_prepare(drm); + if (err < 0) + return err; + + drm_kms_helper_poll_init(drm); + err = host1x_device_init(device); if (err < 0) return err; @@ -59,8 +65,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (err < 0) return err; - drm_kms_helper_poll_init(drm); - return 0; } diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 6b8fe9d86ed4..0d30689dff01 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -280,6 +280,7 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, unsigned int index); bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); +int tegra_drm_fb_prepare(struct drm_device *drm); int tegra_drm_fb_init(struct drm_device *drm); void tegra_drm_fb_exit(struct drm_device *drm); #ifdef CONFIG_DRM_TEGRA_FBDEV diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 9798a7080322..fc1528e0bda1 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -267,18 +267,13 @@ release: return err; } -static struct drm_fb_helper_funcs tegra_fb_helper_funcs = { +static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = { .fb_probe = tegra_fbdev_probe, }; -static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, - unsigned int preferred_bpp, - unsigned int num_crtc, - unsigned int max_connectors) +static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm) { - struct drm_fb_helper *helper; struct tegra_fbdev *fbdev; - int err; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) { @@ -286,13 +281,23 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, return ERR_PTR(-ENOMEM); } - fbdev->base.funcs = &tegra_fb_helper_funcs; - helper = &fbdev->base; + drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs); + + return fbdev; +} + +static int tegra_fbdev_init(struct tegra_fbdev *fbdev, + unsigned int preferred_bpp, + unsigned int num_crtc, + unsigned int max_connectors) +{ + struct drm_device *drm = fbdev->base.dev; + int err; err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); if (err < 0) { dev_err(drm->dev, "failed to initialize DRM FB helper\n"); - goto free; + return err; } err = drm_fb_helper_single_add_all_connectors(&fbdev->base); @@ -301,21 +306,17 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, goto fini; } - drm_helper_disable_unused_functions(drm); - err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); if (err < 0) { dev_err(drm->dev, "failed to set initial configuration\n"); goto fini; } - return fbdev; + return 0; fini: drm_fb_helper_fini(&fbdev->base); -free: - kfree(fbdev); - return ERR_PTR(err); + return err; } static void tegra_fbdev_free(struct tegra_fbdev *fbdev) @@ -366,7 +367,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { #endif }; -int tegra_drm_fb_init(struct drm_device *drm) +int tegra_drm_fb_prepare(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_drm *tegra = drm->dev_private; @@ -381,8 +382,7 @@ int tegra_drm_fb_init(struct drm_device *drm) drm->mode_config.funcs = &tegra_drm_mode_funcs; #ifdef CONFIG_DRM_TEGRA_FBDEV - tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc, - drm->mode_config.num_connector); + tegra->fbdev = tegra_fbdev_create(drm); if (IS_ERR(tegra->fbdev)) return PTR_ERR(tegra->fbdev); #endif @@ -390,6 +390,21 @@ int tegra_drm_fb_init(struct drm_device *drm) return 0; } +int tegra_drm_fb_init(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + if (err < 0) + return err; +#endif + + return 0; +} + void tegra_drm_fb_exit(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index a3e4f1eca6f7..446837e955b6 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -105,7 +105,7 @@ static void drm_connector_clear(struct drm_connector *connector) static void tegra_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); drm_connector_clear(connector); } @@ -318,7 +318,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_sysfs_connector_add(&output->connector); + drm_connector_register(&output->connector); output->encoder.possible_crtcs = 0x3; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index b20b69488dc9..6be623b4a86f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -120,8 +120,8 @@ static int cpufreq_transition(struct notifier_block *nb, static int tilcdc_unload(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; - struct tilcdc_module *mod, *cur; + drm_fbdev_cma_fini(priv->fbdev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); @@ -148,11 +148,6 @@ static int tilcdc_unload(struct drm_device *dev) pm_runtime_disable(dev->dev); - list_for_each_entry_safe(mod, cur, &module_list, list) { - DBG("destroying module: %s", mod->name); - mod->funcs->destroy(mod); - } - kfree(priv); return 0; @@ -628,13 +623,13 @@ static int __init tilcdc_drm_init(void) static void __exit tilcdc_drm_fini(void) { DBG("fini"); - tilcdc_tfp410_fini(); - tilcdc_slave_fini(); - tilcdc_panel_fini(); platform_driver_unregister(&tilcdc_platform_driver); + tilcdc_panel_fini(); + tilcdc_slave_fini(); + tilcdc_tfp410_fini(); } -late_initcall(tilcdc_drm_init); +module_init(tilcdc_drm_init); module_exit(tilcdc_drm_fini); MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 093803683b25..7596c144a9fb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -98,7 +98,6 @@ struct tilcdc_module; struct tilcdc_module_ops { /* create appropriate encoders/connectors: */ int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev); - void (*destroy)(struct tilcdc_module *mod); #ifdef CONFIG_DEBUG_FS /* create debugfs nodes (can be NULL): */ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 86c67329b605..4c7aa1d8134f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -151,6 +151,7 @@ struct panel_connector { static void panel_connector_destroy(struct drm_connector *connector) { struct panel_connector *panel_connector = to_panel_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(panel_connector); } @@ -247,7 +248,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -281,23 +282,8 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) return 0; } -static void panel_destroy(struct tilcdc_module *mod) -{ - struct panel_module *panel_mod = to_panel_module(mod); - - if (panel_mod->timings) { - display_timings_release(panel_mod->timings); - kfree(panel_mod->timings); - } - - tilcdc_module_cleanup(mod); - kfree(panel_mod->info); - kfree(panel_mod); -} - static const struct tilcdc_module_ops panel_module_ops = { .modeset_init = panel_modeset_init, - .destroy = panel_destroy, }; /* @@ -373,6 +359,7 @@ static int panel_probe(struct platform_device *pdev) return -ENOMEM; mod = &panel_mod->base; + pdev->dev.platform_data = mod; tilcdc_module_init(mod, "panel", &panel_module_ops); @@ -380,17 +367,16 @@ static int panel_probe(struct platform_device *pdev) if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "pins are not configured\n"); - panel_mod->timings = of_get_display_timings(node); if (!panel_mod->timings) { dev_err(&pdev->dev, "could not get panel timings\n"); - goto fail; + goto fail_free; } panel_mod->info = of_get_panel_info(node); if (!panel_mod->info) { dev_err(&pdev->dev, "could not get panel info\n"); - goto fail; + goto fail_timings; } mod->preferred_bpp = panel_mod->info->bpp; @@ -401,13 +387,26 @@ static int panel_probe(struct platform_device *pdev) return 0; -fail: - panel_destroy(mod); +fail_timings: + display_timings_release(panel_mod->timings); + +fail_free: + kfree(panel_mod); + tilcdc_module_cleanup(mod); return ret; } static int panel_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct panel_module *panel_mod = to_panel_module(mod); + + display_timings_release(panel_mod->timings); + + tilcdc_module_cleanup(mod); + kfree(panel_mod->info); + kfree(panel_mod); + return 0; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c index 595068ba2d5e..3775fd49dac4 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c @@ -166,6 +166,7 @@ struct slave_connector { static void slave_connector_destroy(struct drm_connector *connector) { struct slave_connector *slave_connector = to_slave_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(slave_connector); } @@ -261,7 +262,7 @@ static struct drm_connector *slave_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -295,17 +296,8 @@ static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) return 0; } -static void slave_destroy(struct tilcdc_module *mod) -{ - struct slave_module *slave_mod = to_slave_module(mod); - - tilcdc_module_cleanup(mod); - kfree(slave_mod); -} - static const struct tilcdc_module_ops slave_module_ops = { .modeset_init = slave_modeset_init, - .destroy = slave_destroy, }; /* @@ -355,10 +347,13 @@ static int slave_probe(struct platform_device *pdev) } slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL); - if (!slave_mod) - return -ENOMEM; + if (!slave_mod) { + ret = -ENOMEM; + goto fail_adapter; + } mod = &slave_mod->base; + pdev->dev.platform_data = mod; mod->preferred_bpp = slave_info.bpp; @@ -373,10 +368,20 @@ static int slave_probe(struct platform_device *pdev) tilcdc_slave_probedefer(false); return 0; + +fail_adapter: + i2c_put_adapter(slavei2c); + return ret; } static int slave_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct slave_module *slave_mod = to_slave_module(mod); + + tilcdc_module_cleanup(mod); + kfree(slave_mod); + return 0; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index c38b56b268ac..354c47ca6374 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -167,6 +167,7 @@ struct tfp410_connector { static void tfp410_connector_destroy(struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(tfp410_connector); } @@ -261,7 +262,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -295,23 +296,8 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev return 0; } -static void tfp410_destroy(struct tilcdc_module *mod) -{ - struct tfp410_module *tfp410_mod = to_tfp410_module(mod); - - if (tfp410_mod->i2c) - i2c_put_adapter(tfp410_mod->i2c); - - if (!IS_ERR_VALUE(tfp410_mod->gpio)) - gpio_free(tfp410_mod->gpio); - - tilcdc_module_cleanup(mod); - kfree(tfp410_mod); -} - static const struct tilcdc_module_ops tfp410_module_ops = { .modeset_init = tfp410_modeset_init, - .destroy = tfp410_destroy, }; /* @@ -341,6 +327,7 @@ static int tfp410_probe(struct platform_device *pdev) return -ENOMEM; mod = &tfp410_mod->base; + pdev->dev.platform_data = mod; tilcdc_module_init(mod, "tfp410", &tfp410_module_ops); @@ -364,6 +351,7 @@ static int tfp410_probe(struct platform_device *pdev) tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); if (!tfp410_mod->i2c) { dev_err(&pdev->dev, "could not get i2c\n"); + of_node_put(i2c_node); goto fail; } @@ -377,19 +365,32 @@ static int tfp410_probe(struct platform_device *pdev) ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); if (ret) { dev_err(&pdev->dev, "could not get DVI_PDn gpio\n"); - goto fail; + goto fail_adapter; } } return 0; +fail_adapter: + i2c_put_adapter(tfp410_mod->i2c); + fail: - tfp410_destroy(mod); + kfree(tfp410_mod); + tilcdc_module_cleanup(mod); return ret; } static int tfp410_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct tfp410_module *tfp410_mod = to_tfp410_module(mod); + + i2c_put_adapter(tfp410_mod->i2c); + gpio_free(tfp410_mod->gpio); + + tilcdc_module_cleanup(mod); + kfree(tfp410_mod); + return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4ab9f7171c4f..3da89d5dab60 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); /* * We raced, and lost, someone else holds the reservation now, @@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = __ttm_bo_reserve(entry, false, true, false, 0); + ret = __ttm_bo_reserve(entry, false, true, false, NULL); if (remove_all && ret) { spin_unlock(&glob->lru_lock); ret = __ttm_bo_reserve(entry, false, false, - false, 0); + false, NULL); spin_lock(&glob->lru_lock); } @@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } @@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, int ret; do { - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, 0, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) @@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, + cur_flags, mem); if (unlikely(ret)) return ret; } @@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); - if (mem_type == TTM_PL_SYSTEM) { mem->mem_type = mem_type; mem->placement = cur_flags; @@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) * Using ttm_bo_reserve makes sure the lru lists are updated. */ - ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); if (unlikely(ret != 0)) return ret; spin_lock(&bdev->fence_lock); @@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index bd850c9f4bca..9e103a4875c8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -50,6 +50,7 @@ struct ttm_range_manager { static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (!node) return -ENOMEM; - if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN) + if (flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; spin_lock(&rman->lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1df856f78568..30e5d90cb7bc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) pgprot_val(tmp) |= _PAGE_GUARDED; } #endif -#if defined(__ia64__) +#if defined(__ia64__) || defined(__arm__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index d7f92fe9d904..66fc6395eb54 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -35,7 +35,7 @@ #include <drm/drm_sysfs.h> static DECLARE_WAIT_QUEUE_HEAD(exit_q); -atomic_t device_released; +static atomic_t device_released; static struct device_type ttm_drm_class_type = { .name = "ttm", diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 863bef9f9234..beb8e75a3f00 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -790,7 +790,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; } -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name) { spin_lock_init(&pool->lock); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index b44d548c56f8..e026a9e2942a 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -105,14 +105,7 @@ static struct drm_encoder* udl_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; + return drm_encoder_find(connector->dev, enc_id); } static int udl_connector_set_property(struct drm_connector *connector, @@ -124,7 +117,7 @@ static int udl_connector_set_property(struct drm_connector *connector, static void udl_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -154,7 +147,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); drm_connector_helper_add(connector, &udl_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); drm_object_attach_property(&connector->base, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 377176372da8..d1da339843ca 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -550,7 +550,7 @@ out: return ret; } -static struct drm_fb_helper_funcs udl_fb_helper_funcs = { +static const struct drm_fb_helper_funcs udl_fb_helper_funcs = { .fb_probe = udlfb_create, }; @@ -583,7 +583,8 @@ int udl_fbdev_init(struct drm_device *dev) return -ENOMEM; udl->fbdev = ufbdev; - ufbdev->helper.funcs = &udl_fb_helper_funcs; + + drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs); ret = drm_fb_helper_init(dev, &ufbdev->helper, 1, 1); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index c041cd73f399..8044f5fb7c49 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } } -static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) +static int udl_gem_get_pages(struct udl_gem_object *obj) { struct page **pages; if (obj->pages) return 0; - pages = drm_gem_get_pages(&obj->base, gfpmask); + pages = drm_gem_get_pages(&obj->base); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj) return 0; } - ret = udl_gem_get_pages(obj, GFP_KERNEL); + ret = udl_gem_get_pages(obj); if (ret) return ret; @@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, } gobj = to_udl_bo(obj); - ret = udl_gem_get_pages(gobj, GFP_KERNEL); + ret = udl_gem_get_pages(gobj); if (ret) goto out; ret = drm_gem_create_mmap_offset(obj); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 7094b92d1ec7..42795674bc07 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG("\n"); ret = udl_modeset_init(dev); + if (ret) + goto err; ret = udl_fbdev_init(dev); + if (ret) + goto err; + + ret = drm_vblank_init(dev, 1); + if (ret) + goto err_fb; + return 0; +err_fb: + udl_fbdev_cleanup(dev); err: + if (udl->urbs.count) + udl_free_urb_list(dev); kfree(udl); DRM_ERROR("%d\n", ret); return ret; @@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; + drm_vblank_cleanup(dev); + if (udl->urbs.count) udl_free_urb_list(dev); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index cddc4fcf35cf..dc145d320b25 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } +static int udl_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct udl_framebuffer *ufb = to_udl_fb(fb); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + udl_handle_damage(ufb, 0, 0, fb->width, fb->height); + + spin_lock_irqsave(&dev->event_lock, flags); + if (event) + drm_send_vblank_event(dev, 0, event); + spin_unlock_irqrestore(&dev->event_lock, flags); + crtc->primary->fb = fb; + + return 0; +} + static void udl_crtc_prepare(struct drm_crtc *crtc) { } @@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = { static const struct drm_crtc_funcs udl_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .destroy = udl_crtc_destroy, + .page_flip = udl_crtc_page_flip, }; static int udl_crtc_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 458cdf6d81e8..ce0ab951f507 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o + vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ + vmwgfx_cmdbuf_res.o \ obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c new file mode 100644 index 000000000000..bfeb4b1f2acf --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -0,0 +1,341 @@ +/************************************************************************** + * + * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" + +#define VMW_CMDBUF_RES_MAN_HT_ORDER 12 + +enum vmw_cmdbuf_res_state { + VMW_CMDBUF_RES_COMMITED, + VMW_CMDBUF_RES_ADD, + VMW_CMDBUF_RES_DEL +}; + +/** + * struct vmw_cmdbuf_res - Command buffer managed resource entry. + * + * @res: Refcounted pointer to a struct vmw_resource. + * @hash: Hash entry for the manager hash table. + * @head: List head used either by the staging list or the manager list + * of commited resources. + * @state: Staging state of this resource entry. + * @man: Pointer to a resource manager for this entry. + */ +struct vmw_cmdbuf_res { + struct vmw_resource *res; + struct drm_hash_item hash; + struct list_head head; + enum vmw_cmdbuf_res_state state; + struct vmw_cmdbuf_res_manager *man; +}; + +/** + * struct vmw_cmdbuf_res_manager - Command buffer resource manager. + * + * @resources: Hash table containing staged and commited command buffer + * resources + * @list: List of commited command buffer resources. + * @dev_priv: Pointer to a device private structure. + * + * @resources and @list are protected by the cmdbuf mutex for now. + */ +struct vmw_cmdbuf_res_manager { + struct drm_open_hash resources; + struct list_head list; + struct vmw_private *dev_priv; +}; + + +/** + * vmw_cmdbuf_res_lookup - Look up a command buffer resource + * + * @man: Pointer to the command buffer resource manager + * @resource_type: The resource type, that combined with the user key + * identifies the resource. + * @user_key: The user key. + * + * Returns a valid refcounted struct vmw_resource pointer on success, + * an error pointer on failure. + */ +struct vmw_resource * +vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key) +{ + struct drm_hash_item *hash; + int ret; + unsigned long key = user_key | (res_type << 24); + + ret = drm_ht_find_item(&man->resources, key, &hash); + if (unlikely(ret != 0)) + return ERR_PTR(ret); + + return vmw_resource_reference + (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res); +} + +/** + * vmw_cmdbuf_res_free - Free a command buffer resource. + * + * @man: Pointer to the command buffer resource manager + * @entry: Pointer to a struct vmw_cmdbuf_res. + * + * Frees a struct vmw_cmdbuf_res entry and drops its reference to the + * struct vmw_resource. + */ +static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, + struct vmw_cmdbuf_res *entry) +{ + list_del(&entry->head); + WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); + vmw_resource_unreference(&entry->res); + kfree(entry); +} + +/** + * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions + * + * @list: Caller's list of command buffer resource actions. + * + * This function commits a list of command buffer resource + * additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions has commited the fifo contents to the device. + */ +void vmw_cmdbuf_res_commit(struct list_head *list) +{ + struct vmw_cmdbuf_res *entry, *next; + + list_for_each_entry_safe(entry, next, list, head) { + list_del(&entry->head); + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + entry->state = VMW_CMDBUF_RES_COMMITED; + list_add_tail(&entry->head, &entry->man->list); + break; + case VMW_CMDBUF_RES_DEL: + vmw_resource_unreference(&entry->res); + kfree(entry); + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions + * + * @man: Pointer to the command buffer resource manager + * @list: Caller's list of command buffer resource action + * + * This function reverts a list of command buffer resource + * additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions failed for some reason, and the command stream was never + * submitted. + */ +void vmw_cmdbuf_res_revert(struct list_head *list) +{ + struct vmw_cmdbuf_res *entry, *next; + int ret; + + list_for_each_entry_safe(entry, next, list, head) { + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + vmw_cmdbuf_res_free(entry->man, entry); + break; + case VMW_CMDBUF_RES_DEL: + ret = drm_ht_insert_item(&entry->man->resources, + &entry->hash); + list_del(&entry->head); + list_add_tail(&entry->head, &entry->man->list); + entry->state = VMW_CMDBUF_RES_COMMITED; + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition. + * + * @man: Pointer to the command buffer resource manager. + * @res_type: The resource type. + * @user_key: The user-space id of the resource. + * @res: Valid (refcount != 0) pointer to a struct vmw_resource. + * @list: The staging list. + * + * This function allocates a struct vmw_cmdbuf_res entry and adds the + * resource to the hash table of the manager identified by @man. The + * entry is then put on the staging list identified by @list. + */ +int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct vmw_resource *res, + struct list_head *list) +{ + struct vmw_cmdbuf_res *cres; + int ret; + + cres = kzalloc(sizeof(*cres), GFP_KERNEL); + if (unlikely(cres == NULL)) + return -ENOMEM; + + cres->hash.key = user_key | (res_type << 24); + ret = drm_ht_insert_item(&man->resources, &cres->hash); + if (unlikely(ret != 0)) + goto out_invalid_key; + + cres->state = VMW_CMDBUF_RES_ADD; + cres->res = vmw_resource_reference(res); + cres->man = man; + list_add_tail(&cres->head, list); + +out_invalid_key: + return ret; +} + +/** + * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal. + * + * @man: Pointer to the command buffer resource manager. + * @res_type: The resource type. + * @user_key: The user-space id of the resource. + * @list: The staging list. + * + * This function looks up the struct vmw_cmdbuf_res entry from the manager + * hash table and, if it exists, removes it. Depending on its current staging + * state it then either removes the entry from the staging list or adds it + * to it with a staging state of removal. + */ +int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct list_head *list) +{ + struct vmw_cmdbuf_res *entry; + struct drm_hash_item *hash; + int ret; + + ret = drm_ht_find_item(&man->resources, user_key, &hash); + if (likely(ret != 0)) + return -EINVAL; + + entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); + + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + vmw_cmdbuf_res_free(man, entry); + break; + case VMW_CMDBUF_RES_COMMITED: + (void) drm_ht_remove_item(&man->resources, &entry->hash); + list_del(&entry->head); + entry->state = VMW_CMDBUF_RES_DEL; + list_add_tail(&entry->head, list); + break; + default: + BUG(); + break; + } + + return 0; +} + +/** + * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource + * manager. + * + * @dev_priv: Pointer to a struct vmw_private + * + * Allocates and initializes a command buffer managed resource manager. Returns + * an error pointer on failure. + */ +struct vmw_cmdbuf_res_manager * +vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) +{ + struct vmw_cmdbuf_res_manager *man; + int ret; + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (man == NULL) + return ERR_PTR(-ENOMEM); + + man->dev_priv = dev_priv; + INIT_LIST_HEAD(&man->list); + ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); + if (ret == 0) + return man; + + kfree(man); + return ERR_PTR(ret); +} + +/** + * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource + * manager. + * + * @man: Pointer to the manager to destroy. + * + * This function destroys a command buffer managed resource manager and + * unreferences / frees all command buffer managed resources and -entries + * associated with it. + */ +void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) +{ + struct vmw_cmdbuf_res *entry, *next; + + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_cmdbuf_res_free(man, entry); + + kfree(man); +} + +/** + * + * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed + * resource manager + * + * Returns the approximate allocation size of a command buffer managed + * resource manager. + */ +size_t vmw_cmdbuf_res_man_size(void) +{ + static size_t res_man_size; + + if (unlikely(res_man_size == 0)) + res_man_size = + ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) + + ttm_round_pot(sizeof(struct hlist_head) << + VMW_CMDBUF_RES_MAN_HT_ORDER); + + return res_man_size; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 8bb26dcd9eae..5ac92874404d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -33,6 +33,7 @@ struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; struct vmw_ctx_binding_state cbs; + struct vmw_cmdbuf_res_manager *man; }; @@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { static void vmw_hw_context_destroy(struct vmw_resource *res) { - + struct vmw_user_context *uctx = + container_of(res, struct vmw_user_context, res); struct vmw_private *dev_priv = res->dev_priv; struct { SVGA3dCmdHeader header; @@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) if (res->func->destroy == vmw_gb_context_destroy) { mutex_lock(&dev_priv->cmdbuf_mutex); + vmw_cmdbuf_res_man_destroy(uctx->man); mutex_lock(&dev_priv->binding_mutex); - (void) vmw_context_binding_state_kill - (&container_of(res, struct vmw_user_context, res)->cbs); + (void) vmw_context_binding_state_kill(&uctx->cbs); (void) vmw_gb_context_destroy(res); mutex_unlock(&dev_priv->binding_mutex); if (dev_priv->pinned_bo != NULL && @@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ret = vmw_resource_init(dev_priv, res, true, res_free, &vmw_gb_context_func); res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; + if (unlikely(ret != 0)) + goto out_err; - if (unlikely(ret != 0)) { - if (res_free) - res_free(res); - else - kfree(res); - return ret; + if (dev_priv->has_mob) { + uctx->man = vmw_cmdbuf_res_man_create(dev_priv); + if (unlikely(IS_ERR(uctx->man))) { + ret = PTR_ERR(uctx->man); + uctx->man = NULL; + goto out_err; + } } memset(&uctx->cbs, 0, sizeof(uctx->cbs)); @@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, vmw_resource_activate(res, vmw_hw_context_destroy); return 0; + +out_err: + if (res_free) + res_free(res); + else + kfree(res); + return ret; } static int vmw_context_init(struct vmw_private *dev_priv, @@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, */ if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + + ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) @@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) { return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); } + +struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) +{ + return container_of(ctx, struct vmw_user_context, res)->man; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 70ddce8358b0..ed1d51006ab1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err; @@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, if (pin) vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err; @@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, if (pin) vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 246a62bab378..18b54acacfbb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (unlikely(ret != 0)) return ret; - ret = ttm_bo_reserve(bo, false, true, false, 0); + ret = ttm_bo_reserve(bo, false, true, false, NULL); BUG_ON(ret != 0); ret = ttm_bo_kmap(bo, 0, 1, &map); @@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev, drm_master_put(&vmw_fp->locked_master); } - vmw_compat_shader_man_destroy(vmw_fp->shman); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; - vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); - if (IS_ERR(vmw_fp->shman)) - goto out_no_shman; - file_priv->driver_priv = vmw_fp; return 0; -out_no_shman: - ttm_object_file_release(&vmw_fp->tfile); out_no_tfile: kfree(vmw_fp); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6b252a887ae2..c1811750cc8d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -40,10 +40,10 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20140325" +#define VMWGFX_DRIVER_DATE "20140704" #define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MINOR 6 -#define VMWGFX_DRIVER_PATCHLEVEL 0 +#define VMWGFX_DRIVER_PATCHLEVEL 1 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -75,14 +75,11 @@ #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 -struct vmw_compat_shader_manager; - struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; struct list_head fence_events; bool gb_aware; - struct vmw_compat_shader_manager *shman; }; struct vmw_dma_buffer { @@ -124,6 +121,10 @@ struct vmw_resource { void (*hw_destroy) (struct vmw_resource *res); }; + +/* + * Resources that are managed using ioctls. + */ enum vmw_res_type { vmw_res_context, vmw_res_surface, @@ -132,6 +133,15 @@ enum vmw_res_type { vmw_res_max }; +/* + * Resources that are managed using command streams. + */ +enum vmw_cmdbuf_res_type { + vmw_cmdbuf_res_compat_shader +}; + +struct vmw_cmdbuf_res_manager; + struct vmw_cursor_snooper { struct drm_crtc *crtc; size_t age; @@ -341,7 +351,7 @@ struct vmw_sw_context{ bool needs_post_query_barrier; struct vmw_resource *error_resource; struct vmw_ctx_binding_state staged_bindings; - struct list_head staged_shaders; + struct list_head staged_cmd_res; }; struct vmw_legacy_display; @@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head); extern void vmw_context_binding_res_list_scrub(struct list_head *head); extern int vmw_context_rebind_all(struct vmw_resource *ctx); extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); - +extern struct vmw_cmdbuf_res_manager * +vmw_context_res_man(struct vmw_resource *ctx); /* * Surface management - vmwgfx_surface.c */ @@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, - SVGA3dShaderType shader_type, - u32 *user_key); -extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, - struct list_head *list); -extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, - struct list_head *list); -extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, - u32 user_key, - SVGA3dShaderType shader_type, - struct list_head *list); -extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, +extern int vmw_compat_shader_add(struct vmw_private *dev_priv, + struct vmw_cmdbuf_res_manager *man, u32 user_key, const void *bytecode, SVGA3dShaderType shader_type, size_t size, - struct ttm_object_file *tfile, struct list_head *list); -extern struct vmw_compat_shader_manager * -vmw_compat_shader_man_create(struct vmw_private *dev_priv); -extern void -vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); +extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list); +extern struct vmw_resource * +vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type); + +/* + * Command buffer managed resources - vmwgfx_cmdbuf_res.c + */ + +extern struct vmw_cmdbuf_res_manager * +vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); +extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); +extern size_t vmw_cmdbuf_res_man_size(void); +extern struct vmw_resource * +vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key); +extern void vmw_cmdbuf_res_revert(struct list_head *list); +extern void vmw_cmdbuf_res_commit(struct list_head *list); +extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct vmw_resource *res, + struct list_head *list); +extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct list_head *list); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 87df0b3674fd..7bfdaa163a33 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) return 0; } + +/** + * vmw_cmd_res_reloc_add - Add a resource to a software context's + * relocation- and validation lists. + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @sw_context: Pointer to the software context. + * @res_type: Resource type. + * @id_loc: Pointer to where the id that needs translation is located. + * @res: Valid pointer to a struct vmw_resource. + * @p_val: If non null, a pointer to the struct vmw_resource_validate_node + * used for this resource is returned here. + */ +static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + uint32_t *id_loc, + struct vmw_resource *res, + struct vmw_resource_val_node **p_val) +{ + int ret; + struct vmw_resource_val_node *node; + + *p_val = NULL; + ret = vmw_resource_relocation_add(&sw_context->res_relocations, + res, + id_loc - sw_context->buf_start); + if (unlikely(ret != 0)) + goto out_err; + + ret = vmw_resource_val_add(sw_context, res, &node); + if (unlikely(ret != 0)) + goto out_err; + + if (res_type == vmw_res_context && dev_priv->has_mob && + node->first_usage) { + + /* + * Put contexts first on the list to be able to exit + * list traversal for contexts early. + */ + list_del(&node->head); + list_add(&node->head, &sw_context->resource_list); + + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); + if (unlikely(ret != 0)) + goto out_err; + node->staged_bindings = + kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); + if (node->staged_bindings == NULL) { + DRM_ERROR("Failed to allocate context binding " + "information.\n"); + goto out_err; + } + INIT_LIST_HEAD(&node->staged_bindings->list); + } + + if (p_val) + *p_val = node; + +out_err: + return ret; +} + + /** - * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it + * vmw_cmd_res_check - Check that a resource is present and if so, put it * on the resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @converter: User-space visisble type specific information. - * @id: user-space resource id handle. * @id_loc: Pointer to the location in the command buffer currently being * parsed from where the user-space resource id handle is located. * @p_val: Pointer to pointer to resource validalidation node. Populated * on exit. */ static int -vmw_cmd_compat_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t id, - uint32_t *id_loc, - struct vmw_resource_val_node **p_val) +vmw_cmd_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; @@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, struct vmw_resource_val_node *node; int ret; - if (id == SVGA3D_INVALID_ID) { + if (*id_loc == SVGA3D_INVALID_ID) { if (p_val) *p_val = NULL; if (res_type == vmw_res_context) { @@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, * resource */ - if (likely(rcache->valid && id == rcache->handle)) { + if (likely(rcache->valid && *id_loc == rcache->handle)) { const struct vmw_resource *res = rcache->res; rcache->node->first_usage = false; @@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, - id, + *id_loc, converter, &res); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) id); + (unsigned) *id_loc); dump_stack(); return ret; } rcache->valid = true; rcache->res = res; - rcache->handle = id; - - ret = vmw_resource_relocation_add(&sw_context->res_relocations, - res, - id_loc - sw_context->buf_start); - if (unlikely(ret != 0)) - goto out_no_reloc; + rcache->handle = *id_loc; - ret = vmw_resource_val_add(sw_context, res, &node); + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, + res, &node); if (unlikely(ret != 0)) goto out_no_reloc; rcache->node = node; if (p_val) *p_val = node; - - if (dev_priv->has_mob && node->first_usage && - res_type == vmw_res_context) { - ret = vmw_resource_context_res_add(dev_priv, sw_context, res); - if (unlikely(ret != 0)) - goto out_no_reloc; - node->staged_bindings = - kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); - if (node->staged_bindings == NULL) { - DRM_ERROR("Failed to allocate context binding " - "information.\n"); - goto out_no_reloc; - } - INIT_LIST_HEAD(&node->staged_bindings->list); - } - vmw_resource_unreference(&res); return 0; @@ -534,31 +576,6 @@ out_no_reloc: } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it - * on the resource validate list unless it's already there. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @res_type: Resource type. - * @converter: User-space visisble type specific information. - * @id_loc: Pointer to the location in the command buffer currently being - * parsed from where the user-space resource id handle is located. - * @p_val: Pointer to pointer to resource validalidation node. Populated - * on exit. - */ -static int -vmw_cmd_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id_loc, - struct vmw_resource_val_node **p_val) -{ - return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, - converter, *id_loc, id_loc, p_val); -} - -/** * vmw_rebind_contexts - Rebind all resources previously bound to * referenced contexts. * @@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) int ret; list_for_each_entry(val, &sw_context->resource_list, head) { - if (likely(!val->staged_bindings)) - continue; + if (unlikely(!val->staged_bindings)) + break; ret = vmw_context_rebind_all(val->res); if (unlikely(ret != 0)) { @@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, } *cmd; int ret; size_t size; + struct vmw_resource_val_node *val; cmd = container_of(header, struct vmw_shader_define_cmd, header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, - NULL); + &val); if (unlikely(ret != 0)) return ret; @@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, return 0; size = cmd->header.size - sizeof(cmd->body); - ret = vmw_compat_shader_add(sw_context->fp->shman, + ret = vmw_compat_shader_add(dev_priv, + vmw_context_res_man(val->res), cmd->body.shid, cmd + 1, cmd->body.type, size, - sw_context->fp->tfile, - &sw_context->staged_shaders); + &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; @@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, SVGA3dCmdDestroyShader body; } *cmd; int ret; + struct vmw_resource_val_node *val; cmd = container_of(header, struct vmw_shader_destroy_cmd, header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, - NULL); + &val); if (unlikely(ret != 0)) return ret; if (unlikely(!dev_priv->has_mob)) return 0; - ret = vmw_compat_shader_remove(sw_context->fp->shman, + ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), cmd->body.shid, cmd->body.type, - &sw_context->staged_shaders); + &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; @@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader header; SVGA3dCmdSetShader body; } *cmd; - struct vmw_resource_val_node *ctx_node; + struct vmw_resource_val_node *ctx_node, *res_node = NULL; + struct vmw_ctx_bindinfo bi; + struct vmw_resource *res = NULL; int ret; cmd = container_of(header, struct vmw_set_shader_cmd, @@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - if (dev_priv->has_mob) { - struct vmw_ctx_bindinfo bi; - struct vmw_resource_val_node *res_node; - u32 shid = cmd->body.shid; - - if (shid != SVGA3D_INVALID_ID) - (void) vmw_compat_shader_lookup(sw_context->fp->shman, - cmd->body.type, - &shid); - - ret = vmw_cmd_compat_res_check(dev_priv, sw_context, - vmw_res_shader, - user_shader_converter, - shid, - &cmd->body.shid, &res_node); + if (!dev_priv->has_mob) + return 0; + + if (cmd->body.shid != SVGA3D_INVALID_ID) { + res = vmw_compat_shader_lookup + (vmw_context_res_man(ctx_node->res), + cmd->body.shid, + cmd->body.type); + + if (!IS_ERR(res)) { + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, + vmw_res_shader, + &cmd->body.shid, res, + &res_node); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + return ret; + } + } + + if (!res_node) { + ret = vmw_cmd_res_check(dev_priv, sw_context, + vmw_res_shader, + user_shader_converter, + &cmd->body.shid, &res_node); if (unlikely(ret != 0)) return ret; - - bi.ctx = ctx_node->res; - bi.res = res_node ? res_node->res : NULL; - bi.bt = vmw_ctx_binding_shader; - bi.i1.shader_type = cmd->body.type; - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); } - return 0; + bi.ctx = ctx_node->res; + bi.res = res_node ? res_node->res : NULL; + bi.bt = vmw_ctx_binding_shader; + bi.i1.shader_type = cmd->body.type; + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); } /** @@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, } } + + int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, @@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock; sw_context->res_ht_initialized = true; } - INIT_LIST_HEAD(&sw_context->staged_shaders); + INIT_LIST_HEAD(&sw_context->staged_cmd_res); INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, @@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, } list_splice_init(&sw_context->resource_list, &resource_list); - vmw_compat_shaders_commit(sw_context->fp->shman, - &sw_context->staged_shaders); + vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); mutex_unlock(&dev_priv->cmdbuf_mutex); /* @@ -2576,8 +2606,7 @@ out_unlock: list_splice_init(&sw_context->resource_list, &resource_list); error_resource = sw_context->error_resource; sw_context->error_resource = NULL; - vmw_compat_shaders_revert(sw_context->fp->shman, - &sw_context->staged_shaders); + vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); mutex_unlock(&dev_priv->cmdbuf_mutex); /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index a89ad938eacf..b031b48dbb3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info) vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); - vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b1273e8e9a69..26f8bdde3529 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -47,6 +47,7 @@ struct vmwgfx_gmrid_man { static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8f3edc4710f2..d2bc2b03d4c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -75,7 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du) vmw_surface_unreference(&du->cursor_surface); if (du->cursor_dmabuf) vmw_dmabuf_unreference(&du->cursor_dmabuf); - drm_sysfs_connector_remove(&du->connector); + drm_connector_unregister(&du->connector); drm_crtc_cleanup(&du->crtc); drm_encoder_cleanup(&du->encoder); drm_connector_cleanup(&du->connector); @@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, kmap_offset = 0; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; - ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); + ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return -EINVAL; @@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; kmap_num = (64*64*4) >> PAGE_SHIFT; - ret = ttm_bo_reserve(bo, true, false, false, 0); + ret = ttm_bo_reserve(bo, true, false, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return; @@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, { struct drm_vmw_cursor_bypass_arg *arg = data; struct vmw_display_unit *du; - struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; @@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, return 0; } - obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(dev, arg->crtc_id); + if (!crtc) { ret = -ENOENT; goto out; } - crtc = obj_to_crtc(obj); du = vmw_crtc_to_du(crtc); du->hotspot_x = arg->xhot; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b2b9bd23aeee..15e185ae4c99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -371,7 +371,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; - (void) drm_sysfs_connector_add(connector); + (void) drm_connector_register(connector); drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 01d68f0a69dc..a432c0db257c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref) if (res->backup) { struct ttm_buffer_object *bo = &res->backup->base; - ttm_bo_reserve(bo, false, false, false, 0); + ttm_bo_reserve(bo, false, false, false, NULL); if (!list_empty(&res->mob_head) && res->func->unbind != NULL) { struct ttm_validate_buffer val_buf; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index a95d3a0cabe4..b295463a60b3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -467,7 +467,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; - (void) drm_sysfs_connector_add(connector); + (void) drm_connector_register(connector); drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index c1559eeaffe9..8719fb3cccc9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -29,8 +29,6 @@ #include "vmwgfx_resource_priv.h" #include "ttm/ttm_placement.h" -#define VMW_COMPAT_SHADER_HT_ORDER 12 - struct vmw_shader { struct vmw_resource res; SVGA3dShaderType type; @@ -42,49 +40,8 @@ struct vmw_user_shader { struct vmw_shader shader; }; -/** - * enum vmw_compat_shader_state - Staging state for compat shaders - */ -enum vmw_compat_shader_state { - VMW_COMPAT_COMMITED, - VMW_COMPAT_ADD, - VMW_COMPAT_DEL -}; - -/** - * struct vmw_compat_shader - Metadata for compat shaders. - * - * @handle: The TTM handle of the guest backed shader. - * @tfile: The struct ttm_object_file the guest backed shader is registered - * with. - * @hash: Hash item for lookup. - * @head: List head for staging lists or the compat shader manager list. - * @state: Staging state. - * - * The structure is protected by the cmdbuf lock. - */ -struct vmw_compat_shader { - u32 handle; - struct ttm_object_file *tfile; - struct drm_hash_item hash; - struct list_head head; - enum vmw_compat_shader_state state; -}; - -/** - * struct vmw_compat_shader_manager - Compat shader manager. - * - * @shaders: Hash table containing staged and commited compat shaders - * @list: List of commited shaders. - * @dev_priv: Pointer to a device private structure. - * - * @shaders and @list are protected by the cmdbuf mutex for now. - */ -struct vmw_compat_shader_manager { - struct drm_open_hash shaders; - struct list_head list; - struct vmw_private *dev_priv; -}; +static uint64_t vmw_user_shader_size; +static uint64_t vmw_shader_size; static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * @@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_gb_shader_destroy(struct vmw_resource *res); -static uint64_t vmw_user_shader_size; - static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, .base_obj_to_res = vmw_user_shader_base_to_res, @@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res) vmw_user_shader_size); } +static void vmw_shader_free(struct vmw_resource *res) +{ + struct vmw_shader *shader = vmw_res_to_shader(res); + struct vmw_private *dev_priv = res->dev_priv; + + kfree(shader); + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_shader_size); +} + /** * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. @@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } -static int vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, - size_t shader_size, - size_t offset, - SVGA3dShaderType shader_type, - struct ttm_object_file *tfile, - u32 *handle) +static int vmw_user_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; @@ -442,6 +407,56 @@ out: } +struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type) +{ + struct vmw_shader *shader; + struct vmw_resource *res; + int ret; + + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of shaders anyway. + */ + if (unlikely(vmw_shader_size == 0)) + vmw_shader_size = + ttm_round_pot(sizeof(struct vmw_shader)) + 128; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_shader_size, + false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + goto out_err; + } + + shader = kzalloc(sizeof(*shader), GFP_KERNEL); + if (unlikely(shader == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_shader_size); + ret = -ENOMEM; + goto out_err; + } + + res = &shader->res; + + /* + * From here on, the destructor takes over resource freeing. + */ + ret = vmw_gb_shader_init(dev_priv, res, shader_size, + offset, shader_type, buffer, + vmw_shader_free); + +out_err: + return ret ? ERR_PTR(ret) : res; +} + + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_bad_arg; - ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, - shader_type, tfile, &arg->shader_handle); + ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, + shader_type, tfile, &arg->shader_handle); ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: @@ -500,202 +515,83 @@ out_bad_arg: } /** - * vmw_compat_shader_lookup - Look up a compat shader - * - * @man: Pointer to the compat shader manager. - * @shader_type: The shader type, that combined with the user_key identifies - * the shader. - * @user_key: On entry, this should be a pointer to the user_key. - * On successful exit, it will contain the guest-backed shader's TTM handle. + * vmw_compat_shader_id_ok - Check whether a compat shader user key and + * shader type are within valid bounds. * - * Returns 0 on success. Non-zero on failure, in which case the value pointed - * to by @user_key is unmodified. - */ -int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, - SVGA3dShaderType shader_type, - u32 *user_key) -{ - struct drm_hash_item *hash; - int ret; - unsigned long key = *user_key | (shader_type << 24); - - ret = drm_ht_find_item(&man->shaders, key, &hash); - if (unlikely(ret != 0)) - return ret; - - *user_key = drm_hash_entry(hash, struct vmw_compat_shader, - hash)->handle; - - return 0; -} - -/** - * vmw_compat_shader_free - Free a compat shader. - * - * @man: Pointer to the compat shader manager. - * @entry: Pointer to a struct vmw_compat_shader. - * - * Frees a struct vmw_compat_shder entry and drops its reference to the - * guest backed shader. - */ -static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, - struct vmw_compat_shader *entry) -{ - list_del(&entry->head); - WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); - WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, - TTM_REF_USAGE)); - kfree(entry); -} - -/** - * vmw_compat_shaders_commit - Commit a list of compat shader actions. - * - * @man: Pointer to the compat shader manager. - * @list: Caller's list of compat shader actions. + * @user_key: User space id of the shader. + * @shader_type: Shader type. * - * This function commits a list of compat shader additions or removals. - * It is typically called when the execbuf ioctl call triggering these - * actions has commited the fifo contents to the device. + * Returns true if valid false if not. */ -void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, - struct list_head *list) +static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - - list_for_each_entry_safe(entry, next, list, head) { - list_del(&entry->head); - switch (entry->state) { - case VMW_COMPAT_ADD: - entry->state = VMW_COMPAT_COMMITED; - list_add_tail(&entry->head, &man->list); - break; - case VMW_COMPAT_DEL: - ttm_ref_object_base_unref(entry->tfile, entry->handle, - TTM_REF_USAGE); - kfree(entry); - break; - default: - BUG(); - break; - } - } + return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; } /** - * vmw_compat_shaders_revert - Revert a list of compat shader actions + * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. * - * @man: Pointer to the compat shader manager. - * @list: Caller's list of compat shader actions. + * @user_key: User space id of the shader. + * @shader_type: Shader type. * - * This function reverts a list of compat shader additions or removals. - * It is typically called when the execbuf ioctl call triggering these - * actions failed for some reason, and the command stream was never - * submitted. + * Returns a hash key suitable for a command buffer managed resource + * manager hash table. */ -void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, - struct list_head *list) +static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - int ret; - - list_for_each_entry_safe(entry, next, list, head) { - switch (entry->state) { - case VMW_COMPAT_ADD: - vmw_compat_shader_free(man, entry); - break; - case VMW_COMPAT_DEL: - ret = drm_ht_insert_item(&man->shaders, &entry->hash); - list_del(&entry->head); - list_add_tail(&entry->head, &man->list); - entry->state = VMW_COMPAT_COMMITED; - break; - default: - BUG(); - break; - } - } + return user_key | (shader_type << 20); } /** * vmw_compat_shader_remove - Stage a compat shader for removal. * - * @man: Pointer to the compat shader manager + * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @shader_type: Shader type. - * @list: Caller's list of staged shader actions. - * - * This function stages a compat shader for removal and removes the key from - * the shader manager's hash table. If the shader was previously only staged - * for addition it is completely removed (But the execbuf code may keep a - * reference if it was bound to a context between addition and removal). If - * it was previously commited to the manager, it is staged for removal. + * @list: Caller's list of staged command buffer resource actions. */ -int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, +int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, u32 user_key, SVGA3dShaderType shader_type, struct list_head *list) { - struct vmw_compat_shader *entry; - struct drm_hash_item *hash; - int ret; - - ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), - &hash); - if (likely(ret != 0)) + if (!vmw_compat_shader_id_ok(user_key, shader_type)) return -EINVAL; - entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); - - switch (entry->state) { - case VMW_COMPAT_ADD: - vmw_compat_shader_free(man, entry); - break; - case VMW_COMPAT_COMMITED: - (void) drm_ht_remove_item(&man->shaders, &entry->hash); - list_del(&entry->head); - entry->state = VMW_COMPAT_DEL; - list_add_tail(&entry->head, list); - break; - default: - BUG(); - break; - } - - return 0; + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, + shader_type), + list); } /** - * vmw_compat_shader_add - Create a compat shader and add the - * key to the manager + * vmw_compat_shader_add - Create a compat shader and stage it for addition + * as a command buffer managed resource. * - * @man: Pointer to the compat shader manager + * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @bytecode: Pointer to the bytecode of the shader. * @shader_type: Shader type. * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is * to be created with. - * @list: Caller's list of staged shader actions. + * @list: Caller's list of staged command buffer resource actions. * - * Note that only the key is added to the shader manager's hash table. - * The shader is not yet added to the shader manager's list of shaders. */ -int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, +int vmw_compat_shader_add(struct vmw_private *dev_priv, + struct vmw_cmdbuf_res_manager *man, u32 user_key, const void *bytecode, SVGA3dShaderType shader_type, size_t size, - struct ttm_object_file *tfile, struct list_head *list) { struct vmw_dma_buffer *buf; struct ttm_bo_kmap_obj map; bool is_iomem; - struct vmw_compat_shader *compat; - u32 handle; int ret; + struct vmw_resource *res; - if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) + if (!vmw_compat_shader_id_ok(user_key, shader_type)) return -EINVAL; /* Allocate and pin a DMA buffer */ @@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, if (unlikely(buf == NULL)) return -ENOMEM; - ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, + ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, true, vmw_dmabuf_bo_free); if (unlikely(ret != 0)) goto out; @@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, WARN_ON(ret != 0); ttm_bo_unreserve(&buf->base); - /* Create a guest-backed shader container backed by the dma buffer */ - ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, - tfile, &handle); - vmw_dmabuf_unreference(&buf); + res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); if (unlikely(ret != 0)) goto no_reserve; - /* - * Create a compat shader structure and stage it for insertion - * in the manager - */ - compat = kzalloc(sizeof(*compat), GFP_KERNEL); - if (compat == NULL) - goto no_compat; - - compat->hash.key = user_key | (shader_type << 24); - ret = drm_ht_insert_item(&man->shaders, &compat->hash); - if (unlikely(ret != 0)) - goto out_invalid_key; - - compat->state = VMW_COMPAT_ADD; - compat->handle = handle; - compat->tfile = tfile; - list_add_tail(&compat->head, list); - - return 0; -out_invalid_key: - kfree(compat); -no_compat: - ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, shader_type), + res, list); + vmw_resource_unreference(&res); no_reserve: + vmw_dmabuf_unreference(&buf); out: return ret; } /** - * vmw_compat_shader_man_create - Create a compat shader manager - * - * @dev_priv: Pointer to a device private structure. - * - * Typically done at file open time. If successful returns a pointer to a - * compat shader manager. Otherwise returns an error pointer. - */ -struct vmw_compat_shader_manager * -vmw_compat_shader_man_create(struct vmw_private *dev_priv) -{ - struct vmw_compat_shader_manager *man; - int ret; - - man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) - return ERR_PTR(-ENOMEM); - - man->dev_priv = dev_priv; - INIT_LIST_HEAD(&man->list); - ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); - if (ret == 0) - return man; - - kfree(man); - return ERR_PTR(ret); -} - -/** - * vmw_compat_shader_man_destroy - Destroy a compat shader manager + * vmw_compat_shader_lookup - Look up a compat shader * - * @man: Pointer to the shader manager to destroy. + * @man: Pointer to the command buffer managed resource manager identifying + * the shader namespace. + * @user_key: The user space id of the shader. + * @shader_type: The shader type. * - * Typically done at file close time. + * Returns a refcounted pointer to a struct vmw_resource if the shader was + * found. An error pointer otherwise. */ -void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) +struct vmw_resource * +vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, + SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - - mutex_lock(&man->dev_priv->cmdbuf_mutex); - list_for_each_entry_safe(entry, next, &man->list, head) - vmw_compat_shader_free(man, entry); + if (!vmw_compat_shader_id_ok(user_key, shader_type)) + return ERR_PTR(-EINVAL); - mutex_unlock(&man->dev_priv->cmdbuf_mutex); - kfree(man); + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, + shader_type)); } diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index af0259708358..d2077f040f3e 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -237,12 +237,10 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if (conflict->locks & lwants) return conflict; - /* Ok, now check if he owns the resource we want. We don't need - * to check "decodes" since it should be impossible to own - * own legacy resources you don't decode unless I have a bug - * in this code... + /* Ok, now check if it owns the resource we want. We can + * lock resources that are not decoded, therefore a device + * can own resources it doesn't decode. */ - WARN_ON(conflict->owns & ~conflict->decodes); match = lwants & conflict->owns; if (!match) continue; @@ -254,13 +252,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, flags = 0; pci_bits = 0; + /* If we can't control legacy resources via the bridge, we + * also need to disable normal decoding. + */ if (!conflict->bridge_has_one_vga) { - vga_irq_set_state(conflict, false); - flags |= PCI_VGA_STATE_CHANGE_DECODES; - if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) + if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM) pci_bits |= PCI_COMMAND_MEMORY; - if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) + if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) pci_bits |= PCI_COMMAND_IO; + + if (pci_bits) { + vga_irq_set_state(conflict, false); + flags |= PCI_VGA_STATE_CHANGE_DECODES; + } } if (change_bridge) @@ -268,18 +272,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, pci_set_vga_state(conflict->pdev, false, pci_bits, flags); conflict->owns &= ~match; - /* If he also owned non-legacy, that is no longer the case */ - if (match & VGA_RSRC_LEGACY_MEM) + + /* If we disabled normal decoding, reflect it in owns */ + if (pci_bits & PCI_COMMAND_MEMORY) conflict->owns &= ~VGA_RSRC_NORMAL_MEM; - if (match & VGA_RSRC_LEGACY_IO) + if (pci_bits & PCI_COMMAND_IO) conflict->owns &= ~VGA_RSRC_NORMAL_IO; } enable_them: /* ok dude, we got it, everybody conflicting has been disabled, let's - * enable us. Make sure we don't mark a bit in "owns" that we don't - * also have in "decodes". We can lock resources we don't decode but - * not own them. + * enable us. Mark any bits in "owns" regardless of whether we + * decoded them. We can lock resources we don't decode, therefore + * we must track them via "owns". */ flags = 0; pci_bits = 0; @@ -291,7 +296,7 @@ enable_them: if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) pci_bits |= PCI_COMMAND_IO; } - if (!!(wants & VGA_RSRC_LEGACY_MASK)) + if (wants & VGA_RSRC_LEGACY_MASK) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); @@ -299,7 +304,7 @@ enable_them: if (!vgadev->bridge_has_one_vga) { vga_irq_set_state(vgadev, true); } - vgadev->owns |= (wants & vgadev->decodes); + vgadev->owns |= wants; lock_them: vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); if (rsrc & VGA_RSRC_LEGACY_IO) @@ -649,7 +654,6 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, old_decodes = vgadev->decodes; decodes_removed = ~new_decodes & old_decodes; decodes_unlocked = vgadev->locks & decodes_removed; - vgadev->owns &= ~decodes_removed; vgadev->decodes = new_decodes; pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 39b4cb48d738..6eba301ee03d 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev, int ret; struct ad799x_state *st = iio_priv(indio_dev); + if (val < 0 || val > RES_MASK(chan->scan_type.realbits)) + return -EINVAL; + mutex_lock(&indio_dev->mlock); ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info), - val); + val << chan->scan_type.shift); mutex_unlock(&indio_dev->mlock); return ret; @@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; - *val = valin; + *val = (valin >> chan->scan_type.shift) & + RES_MASK(chan->scan_type.realbits); return IIO_VAL_INT; } diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index d833d55052ea..c7497009d60a 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", np->full_name, name ? name : "", index); - return chan; + return NULL; } /* @@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, */ np = np->parent; if (np && !of_get_property(np, "io-channel-ranges", NULL)) - break; + return NULL; } + return chan; } @@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev, if (channel != NULL) return channel; } + return iio_channel_get_sys(name, channel_name); } EXPORT_SYMBOL_GPL(iio_channel_get); diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index c887e6eebc41..574aba0eba4e 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask, static void armada_xp_mpic_smp_cpu_init(void) { + u32 control; + int nr_irqs, i; + + control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); + /* Clear pending IPIs */ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); @@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { struct resource main_int_res, per_cpu_int_res; - int parent_irq; + int parent_irq, nr_irqs, i; u32 control; BUG_ON(of_address_to_resource(node, 0, &main_int_res)); @@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, BUG_ON(!per_cpu_int_base); control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); + nr_irqs = (control >> 2) & 0x3ff; + + for (i = 0; i < nr_irqs; i++) + writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); armada_370_xp_mpic_domain = - irq_domain_add_linear(node, (control >> 2) & 0x3ff, + irq_domain_add_linear(node, nr_irqs, &armada_370_xp_mpic_irq_ops, NULL); BUG_ON(!armada_370_xp_mpic_domain); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 8ee2a36d5840..c15c840987d2 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, /* Allocate a single Generic IRQ chip for this node */ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, - np->full_name, handle_level_irq, clr, 0, 0); + np->full_name, handle_edge_irq, clr, 0, 0); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 3fdda3a40269..6ce6bd3441bf 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = { }; static struct spear_shirq spear320_shirq_ras3 = { - .irq_nr = 3, + .irq_nr = 7, .irq_bit_off = 0, .invalid_irq = 1, .regs = { diff --git a/drivers/md/md.c b/drivers/md/md.c index 34846856dbc6..32fc19c540d4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5599,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg) if (mddev->in_sync) info.state = (1<<MD_SB_CLEAN); if (mddev->bitmap && mddev->bitmap_info.offset) - info.state = (1<<MD_SB_BITMAP_PRESENT); + info.state |= (1<<MD_SB_BITMAP_PRESENT); info.active_disks = insync; info.working_disks = working; info.failed_disks = failed; @@ -7501,6 +7501,19 @@ void md_do_sync(struct md_thread *thread) rdev->recovery_offset < j) j = rdev->recovery_offset; rcu_read_unlock(); + + /* If there is a bitmap, we need to make sure all + * writes that started before we added a spare + * complete before we start doing a recovery. + * Otherwise the write might complete and (via + * bitmap_endwrite) set a bit in the bitmap after the + * recovery has checked that bit and skipped that + * region. + */ + if (mddev->bitmap) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } } printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index c4cddf0cd96d..b777d8f46bd5 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -880,6 +880,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) const u64 phys_offset = __pa(PAGE_OFFSET); base &= PAGE_MASK; size &= PAGE_MASK; + + if (sizeof(phys_addr_t) < sizeof(u64)) { + if (base > ULONG_MAX) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size > ULONG_MAX) { + pr_warning("Ignoring memory range 0x%lx - 0x%llx\n", + ULONG_MAX, base + size); + size = ULONG_MAX - base; + } + } + if (base + size < phys_offset) { pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", base, base + size); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 554349029628..56467df3d6de 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4198,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) kfree(phba->ep_array); phba->ep_array = NULL; ret = -ENOMEM; + + goto free_memory; } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 6045aa78986a..07934b0b9ee1 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1008,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba, BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); - if (rc) { - kfree(if_info); + if (rc) return rc; - } if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (if_info->dhcp_state) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f54843023466..785d0d71781e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { - put_cpu(); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { - put_cpu(); kfree_skb(skb); return; } @@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) port = lport_priv(vn_port); if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); - put_cpu(); kfree_skb(skb); return; } @@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ - put_cpu(); kfree_skb(skb); return; } @@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ - put_cpu(); kfree_skb(skb); return; } @@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ - put_cpu(); kfree_skb(skb); return; } + stats = per_cpu_ptr(lport->stats, smp_processor_id()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); stats->InvalidCRCCount++; - put_cpu(); kfree_skb(skb); return; } - put_cpu(); fc_exch_recv(lport, fp); } diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 32a5e0a2a669..7bc47fc7c686 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) arr_sz, GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; goto mem_err; } diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 2ebfb2bb0f42..7b23f21f22f1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index c4f31b21feb8..e90c89f1d480 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * pm8001_get_phy_settings_info : Read phy setting values. * @pm8001_ha : our hba. */ -void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) { #ifdef PM8001_READ_VPD @@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) payload.offset = 0; payload.length = 4096; payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; /* Read phy setting values from flash */ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); #endif + return 0; } #ifdef PM8001_USE_MSIX @@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm8001_init_sas_add(pm8001_ha); /* phy setting support for motherboard controller */ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && - pdev->subsystem_vendor != 0) - pm8001_get_phy_settings_info(pm8001_ha); + pdev->subsystem_vendor != 0) { + rc = pm8001_get_phy_settings_info(pm8001_ha); + if (rc) + goto err_out_shost; + } pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 4b188b0164e9..e632e14180cf 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1128,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ctio->u.status1.flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); qla2x00_start_iocbs(vha, vha->req); @@ -1262,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, { struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; + uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", @@ -1292,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); - ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = __constant_cpu_to_le16(8); @@ -1513,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, struct ctio7_to_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t temp; pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; prm->pkt = pkt; @@ -1541,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags |= (atio->u.isp24.attr << 9); - pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); ql_dbg(ql_dbg_tgt, vha, 0xe00c, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", - vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, - le16_to_cpu(pkt->u.status0.ox_id)); + vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); return 0; } @@ -2619,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; + uint16_t temp; ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); @@ -2655,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.ox_id = cpu_to_le16(temp); /* Most likely, it isn't needed */ ctio24->u.status1.residual = get_unaligned((uint32_t *) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index e0a58fd13f66..d1d24fb0160a 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -443,7 +443,7 @@ struct ctio7_to_24xx { uint16_t reserved1; __le16 flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint32_t relative_offset; uint32_t reserved2; @@ -458,7 +458,7 @@ struct ctio7_to_24xx { uint16_t sense_length; uint16_t flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint16_t response_len; uint16_t reserved; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index cbe38e5e7955..7e957918f33f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work) "aborting command %p\n", scmd)); rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); if (rtn == SUCCESS) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); if (scsi_host_eh_past_deadline(sdev->host)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, @@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work) scmd_printk(KERN_WARNING, scmd, "scmd %p terminate " "aborted command\n", scmd)); - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); scsi_finish_command(scmd); } } @@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) else if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); - if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort) - if (scsi_abort_command(scmd) == SUCCESS) + if (rtn == BLK_EH_NOT_HANDLED) { + if (!host->hostt->no_async_abort && + scsi_abort_command(scmd) == SUCCESS) return BLK_EH_NOT_HANDLED; - scmd->result |= DID_TIME_OUT << 16; - - if (unlikely(rtn == BLK_EH_NOT_HANDLED && - !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) - rtn = BLK_EH_HANDLED; + set_host_byte(scmd, DID_TIME_OUT); + if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) + rtn = BLK_EH_HANDLED; + } return rtn; } @@ -1777,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) break; case DID_ABORT: if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } case DID_NO_CONNECT: diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index f80908f74ca9..521f5838594b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); + cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e9689d57ccb6..6825eda1114a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2441,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; - if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 89ee5929eb6d..308256b5e4cb 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; @@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -589,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c index 357cef2a6f4c..7194bd138762 100644 --- a/drivers/staging/iio/adc/ad7291.c +++ b/drivers/staging/iio/adc/ad7291.c @@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client, struct ad7291_platform_data *pdata = client->dev.platform_data; struct ad7291_chip_info *chip; struct iio_dev *indio_dev; - int ret = 0; + int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (!indio_dev) @@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client, if (pdata && pdata->use_external_ref) { chip->reg = devm_regulator_get(&client->dev, "vref"); if (IS_ERR(chip->reg)) - return ret; + return PTR_ERR(chip->reg); ret = regulator_enable(chip->reg); if (ret) diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index def8280d7ee6..6f54ff4f9372 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -202,7 +202,7 @@ static const struct file_operations imx_drm_driver_fops = { void imx_drm_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); @@ -293,10 +293,10 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) * userspace will expect to be able to access DRM at this point. */ list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret) { dev_err(drm->dev, - "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n", + "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", connector->base.id, connector->name, ret); goto err_unbind; diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index 8945b4e3a2a6..cb50120ed7b5 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c @@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); /* Wait until the state has moved to ON */ - while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)& - OMAP_INTRANSITION_MASK); + while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, + OMAP2_PM_PWSTST) & + OMAP_INTRANSITION_MASK) + ; /* Disable Automatic transition */ (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 69425b3cb6b7..9d2b673f90e3 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); unsigned long flags; + struct td_node *node, *tmpnode; if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || hwep->ep.desc == NULL || list_empty(&hwreq->queue) || @@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { + dma_pool_free(hwep->td_pool, node->ptr, node->dma); + list_del(&node->td); + kfree(node); + } + /* pop request */ list_del_init(&hwreq->queue); diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 8eb996e4f058..261c3b428220 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -45,6 +45,7 @@ comment "Platform Glue Driver Support" config USB_DWC3_OMAP tristate "Texas Instruments OMAP5 and similar Platforms" depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST) + depends on OF default USB_DWC3 help Some platforms from Texas Instruments like OMAP5, DRA7xxx and diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 4af4c3567656..07a736acd0f2 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c) { struct platform_device *pdev = to_platform_device(dev); - platform_device_unregister(pdev); + of_device_unregister(pdev); return 0; } @@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - dwc3_omap_disable_irqs(omap); + dwc3_omap_write_irqmisc_set(omap, 0x00); return 0; } @@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev) static void dwc3_omap_complete(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); + u32 reg; - dwc3_omap_enable_irqs(omap); + reg = (USBOTGSS_IRQMISC_OEVT | + USBOTGSS_IRQMISC_DRVVBUS_RISE | + USBOTGSS_IRQMISC_CHRGVBUS_RISE | + USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | + USBOTGSS_IRQMISC_IDPULLUP_RISE | + USBOTGSS_IRQMISC_DRVVBUS_FALL | + USBOTGSS_IRQMISC_CHRGVBUS_FALL | + USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | + USBOTGSS_IRQMISC_IDPULLUP_FALL); + + dwc3_omap_write_irqmisc_set(omap, reg); } static int dwc3_omap_suspend(struct device *dev) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9d64dd02c57e..dab7927d1009 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, length, last ? " last" : "", chain ? " chain" : ""); - /* Skip the LINK-TRB on ISOC */ - if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && - usb_endpoint_xfer_isoc(dep->endpoint.desc)) - dep->free_slot++; trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; @@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, } dep->free_slot++; + /* Skip the LINK-TRB on ISOC */ + if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && + usb_endpoint_xfer_isoc(dep->endpoint.desc)) + dep->free_slot++; trb->size = DWC3_TRB_SIZE_LENGTH(length); trb->bpl = lower_32_bits(dma); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 2ddcd635ca2a..97142146eead 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = { .store_attribute = usb_os_desc_attr_store, }; -static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc, - char *page) +static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc, + char *page) { memcpy(page, desc->ext_compat_id, 8); return 8; } -static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, - const char *page, size_t len) +static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) { int l; @@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, return len; } -static struct usb_os_desc_attribute rndis_grp_attr_compatible_id = +static struct usb_os_desc_attribute interf_grp_attr_compatible_id = __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR, - rndis_grp_compatible_id_show, - rndis_grp_compatible_id_store); + interf_grp_compatible_id_show, + interf_grp_compatible_id_store); -static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc, - char *page) +static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc, + char *page) { memcpy(page, desc->ext_compat_id + 8, 8); return 8; } -static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, - const char *page, size_t len) +static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc, + const char *page, size_t len) { int l; @@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, return len; } -static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id = +static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id = __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR, - rndis_grp_sub_compatible_id_show, - rndis_grp_sub_compatible_id_store); + interf_grp_sub_compatible_id_show, + interf_grp_sub_compatible_id_store); static struct configfs_attribute *interf_grp_attrs[] = { - &rndis_grp_attr_compatible_id.attr, - &rndis_grp_attr_sub_compatible_id.attr, + &interf_grp_attr_compatible_id.attr, + &interf_grp_attr_sub_compatible_id.attr, NULL }; int usb_os_desc_prepare_interf_dir(struct config_group *parent, int n_interf, struct usb_os_desc **desc, + char **names, struct module *owner) { struct config_group **f_default_groups, *os_desc_group, @@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, d = desc[n_interf]; d->owner = owner; config_group_init_type_name(&d->group, "", interface_type); - config_item_set_name(&d->group.cg_item, "interface.%d", - n_interf); + config_item_set_name(&d->group.cg_item, "interface.%s", + names[n_interf]); interface_groups[n_interf] = &d->group; } diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index a14ac792c698..36c468c4f5e9 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h @@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item); int usb_os_desc_prepare_interf_dir(struct config_group *parent, int n_interf, struct usb_os_desc **desc, + char **names, struct module *owner); static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 74202d67f911..8598c27c7d43 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) ffs->ep0req->context = ffs; lang = ffs->stringtabs; - for (lang = ffs->stringtabs; *lang; ++lang) { - struct usb_string *str = (*lang)->strings; - int id = first_id; - for (; str->s; ++id, ++str) - str->id = id; + if (lang) { + for (; *lang; ++lang) { + struct usb_string *str = (*lang)->strings; + int id = first_id; + for (; str->s; ++id, ++str) + str->id = id; + } } ffs->gadget = cdev->gadget; diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index eed3ad878047..9c41e9515b8e 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), GFP_KERNEL); if (!f->os_desc_table) - return PTR_ERR(f->os_desc_table); + return -ENOMEM; f->os_desc_n = 1; f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; } @@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) { struct f_rndis_opts *opts; struct usb_os_desc *descs[1]; + char *names[1]; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) @@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void) INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop); descs[0] = &opts->rndis_os_desc; + names[0] = "rndis"; usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, - THIS_MODULE); + names, THIS_MODULE); config_group_init_type_name(&opts->func_inst.group, "", &rndis_func_type); diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c index 99a37ed03e27..c7004ee89c90 100644 --- a/drivers/usb/gadget/gr_udc.c +++ b/drivers/usb/gadget/gr_udc.c @@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep, "%s mode: multiple trans./microframe not valid\n", (mode == 2 ? "Bulk" : "Control")); return -EINVAL; - } else if (nt == 0x11) { - dev_err(dev->dev, "Invalid value for trans./microframe\n"); + } else if (nt == 0x3) { + dev_err(dev->dev, + "Invalid value 0x3 for additional trans./microframe\n"); return -EINVAL; } else if ((nt + 1) * max > buffer_size) { dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n", diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index ee6c16416c30..2e4ce7704908 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd) kfree (dev->buf); dev->buf = NULL; - put_dev (dev); + /* other endpoints were all decoupled from this device */ + spin_lock_irq(&dev->lock); + dev->state = STATE_DEV_DISABLED; + spin_unlock_irq(&dev->lock); + + put_dev (dev); return 0; } diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 3d78a8844e43..97b027724ee7 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link) DBG(dev, "%s\n", __func__); + netif_tx_lock(dev->net); netif_stop_queue(dev->net); + netif_tx_unlock(dev->net); + netif_carrier_off(dev->net); /* disable endpoints, forcing (synchronous) completion diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 61b7817bd66b..03314f861bee 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91 config USB_EHCI_MSM tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller" - depends on ARCH_MSM + depends on ARCH_MSM || ARCH_QCOM select USB_EHCI_ROOT_HUB_TT ---help--- Enables support for the USB Host controller present on the diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 2b998c60faf2..aa79e8749040 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -22,6 +22,7 @@ #include <linux/slab.h> +#include <linux/device.h> #include <asm/unaligned.h> #include "xhci.h" @@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup) { + if (hcd->self.root_hub->do_remote_wakeup + && device_may_wakeup(hcd->self.controller)) { + if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index d67ff71209f5..749fc68eb5c1 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: - WARN_ON(slot_id != TRB_TO_SLOT_ID( - le32_to_cpu(cmd_trb->generic.field[3]))); + /* SLOT_ID field in reset device cmd completion event TRB is 0. + * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) + */ + slot_id = TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3])); xhci_handle_cmd_reset_dev(xhci, slot_id, event); break; case TRB_NEC_GET_FW: @@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, return 0; max_burst = urb->ep->ss_ep_comp.bMaxBurst; - return roundup(total_packet_count, max_burst + 1) - 1; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; } /* diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2b8d9a24af09..7436d5f5e67a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci) */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0; + u32 command, temp = 0, status; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); + /* Resume root hubs only when have pending events. */ + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) { + usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); + } } /* diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c index d2353781bd2d..1e58ed2361cc 100644 --- a/drivers/usb/musb/musb_am335x.c +++ b/drivers/usb/musb/musb_am335x.c @@ -19,21 +19,6 @@ err: return ret; } -static int of_remove_populated_child(struct device *dev, void *d) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - return 0; -} - -static int am335x_child_remove(struct platform_device *pdev) -{ - device_for_each_child(&pdev->dev, NULL, of_remove_populated_child); - pm_runtime_disable(&pdev->dev); - return 0; -} - static const struct of_device_id am335x_child_of_match[] = { { .compatible = "ti,am33xx-usb" }, { }, @@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match); static struct platform_driver am335x_child_driver = { .probe = am335x_child_probe, - .remove = am335x_child_remove, .driver = { .name = "am335x-usb-childs", .of_match_table = am335x_child_of_match, }, }; -module_platform_driver(am335x_child_driver); +static int __init am335x_child_init(void) +{ + return platform_driver_register(&am335x_child_driver); +} +module_init(am335x_child_init); + MODULE_DESCRIPTION("AM33xx child devices"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 61da471b7aed..eff3c5cf84f4 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -849,7 +849,7 @@ b_host: } /* handle babble condition */ - if (int_usb & MUSB_INTR_BABBLE) + if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb)) schedule_work(&musb->recover_work); #if 0 diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 7b8bbf53127e..5341bb223b7c 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data) } list_add_tail(&cppi41_channel->tx_check, &controller->early_tx_list); - if (!hrtimer_active(&controller->early_tx)) { + if (!hrtimer_is_queued(&controller->early_tx)) { hrtimer_start_range_ns(&controller->early_tx, ktime_set(0, 140 * NSEC_PER_USEC), 40 * NSEC_PER_USEC, diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 51beb13c7e1a..09529f94e72d 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) struct dsps_glue *glue = dev_get_drvdata(dev->parent); const struct dsps_musb_wrapper *wrp = glue->wrp; void __iomem *ctrl_base = musb->ctrl_base; - void __iomem *base = musb->mregs; u32 reg; - reg = dsps_readl(base, wrp->mode); + reg = dsps_readl(ctrl_base, wrp->mode); switch (mode) { case MUSB_HOST: @@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) */ reg |= (1 << wrp->iddig_mux); - dsps_writel(base, wrp->mode, reg); + dsps_writel(ctrl_base, wrp->mode, reg); dsps_writel(ctrl_base, wrp->phy_utmi, 0x02); break; case MUSB_PERIPHERAL: @@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode) */ reg |= (1 << wrp->iddig_mux); - dsps_writel(base, wrp->mode, reg); + dsps_writel(ctrl_base, wrp->mode, reg); break; case MUSB_OTG: - dsps_writel(base, wrp->phy_utmi, 0x02); + dsps_writel(ctrl_base, wrp->phy_utmi, 0x02); break; default: dev_err(glue->dev, "unsupported mode %d\n", mode); diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index c2e45e632723..f202e5088461 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev) musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; - musb->dev.of_node = pdev->dev.of_node; glue->dev = &pdev->dev; glue->musb = musb; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index ced34f39bdd4..c929370cdaa6 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w) motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; } - pm_runtime_put_sync(otg->phy->dev); + + if (otg->phy->state == OTG_STATE_B_IDLE) + pm_runtime_put_sync(otg->phy->dev); break; case OTG_STATE_B_PERIPHERAL: dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n"); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index d49f9c326035..4fd36530bfa3 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -681,6 +681,14 @@ usbhs_fifo_read_end: usbhs_pipe_number(pipe), pkt->length, pkt->actual, *is_done, pkt->zero); + /* + * Transmission end + */ + if (*is_done) { + if (usbhs_pipe_is_dcp(pipe)) + usbhs_dcp_control_transfer_done(pipe); + } + usbhs_fifo_read_busy: usbhsf_fifo_unselect(pipe, fifo); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index edf3b124583c..115662c16dcc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1566,14 +1566,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; - struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; + struct usb_endpoint_descriptor *ep_desc; unsigned num_endpoints; - int i; + unsigned i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); + if (!num_endpoints) + return; + /* NOTE: some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 59c3108cc136..ac73f49cd9f0 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb); /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 +/* SpeedUp SU9800 usb 3g modem */ +#define SPEEDUP_PRODUCT_SU9800 0x9800 + /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 @@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb); /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD120 0xc001 +#define OLIVETTI_PRODUCT_OLICARD140 0xc002 #define OLIVETTI_PRODUCT_OLICARD145 0xc003 +#define OLIVETTI_PRODUCT_OLICARD155 0xc004 #define OLIVETTI_PRODUCT_OLICARD200 0xc005 +#define OLIVETTI_PRODUCT_OLICARD160 0xc00a #define OLIVETTI_PRODUCT_OLICARD500 0xc00b /* Celot products */ @@ -1577,6 +1584,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, @@ -1611,15 +1619,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, - - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 9d38ddc8da49..866b5df36ed1 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev) if (us->fflags & US_FL_WRITE_CACHE) sdev->wce_default_on = 1; + /* A few buggy USB-ATA bridges don't understand FUA */ + if (us->fflags & US_FL_BROKEN_FUA) + sdev->broken_fua = 1; + } else { /* Non-disk-type devices don't need to blacklist any pages diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 174a447868cd..80a5b366255f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1936,6 +1936,13 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Michael Büsch <m@bues.ch> */ +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> * JMicron responds to USN and several other SCSI ioctls with a * residue that causes subsequent I/O requests to fail. */ diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e683b6ef9594..d36e830d6fc6 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) goto put_display_node; } + INIT_LIST_HEAD(&pdata->pwr_gpios); ret = -ENOMEM; for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) { gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio", @@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) dev_err(dev, "set direction output gpio %d failed\n", gpio); goto put_display_node; } + list_add(&og->list, &pdata->pwr_gpios); } if (is_gpio_power) diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c index a54f7f7d763b..8fe41caac38e 100644 --- a/drivers/video/fbdev/bfin_adv7393fb.c +++ b/drivers/video/fbdev/bfin_adv7393fb.c @@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client, /* Workaround "PPI Does Not Start Properly In Specific Mode" */ if (ANOMALY_05000400) { ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW, - "PPI0_FS3") + "PPI0_FS3"); if (ret) { dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); ret = -EBUSY; diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c index 99af9e88b2d8..2f0822ee3ff9 100644 --- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c +++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c @@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root) { struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), GFP_KERNEL); - n->node = node; - n->root = root; - list_add(&n->list, &dss_conv_list); + if (n) { + n->node = node; + n->root = root; + list_add(&n->list, &dss_conv_list); + } } static bool __init omapdss_list_contains(const struct device_node *node) diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c index a8f2b280f796..a1134c3f6c11 100644 --- a/drivers/video/fbdev/vt8500lcdfb.c +++ b/drivers/video/fbdev/vt8500lcdfb.c @@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); - kfree(fbi); - return 0; } diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index d7bd395ab586..1c55388ae633 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) int pipefd; struct autofs_sb_info *sbi; struct autofs_info *ino; - int pgrp; + int pgrp = 0; bool pgrp_set = false; int ret = -EINVAL; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 92371c414228..1daea0b47187 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -821,7 +821,7 @@ static void free_workspace(int type, struct list_head *workspace) spin_lock(workspace_lock); if (*num_workspace < num_online_cpus()) { - list_add_tail(workspace, idle_workspace); + list_add(workspace, idle_workspace); (*num_workspace)++; spin_unlock(workspace_lock); goto wake; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 2af6e66fe788..eea26e1b2fda 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -36,6 +36,7 @@ #include "check-integrity.h" #include "rcu-string.h" #include "dev-replace.h" +#include "sysfs.h" static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, int scrub_ret); @@ -562,6 +563,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, fs_info->fs_devices->latest_bdev = tgt_device->bdev; list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); + /* replace the sysfs entry */ + btrfs_kobj_rm_device(fs_info, src_device); + btrfs_kobj_add_device(fs_info, tgt_device); + btrfs_rm_dev_replace_blocked(fs_info); btrfs_rm_dev_replace_srcdev(fs_info, src_device); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8bb4aa19898f..08e65e9cf2aa 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -369,7 +369,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, out: unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, &cached_state, GFP_NOFS); - btrfs_tree_read_unlock_blocking(eb); + if (need_lock) + btrfs_tree_read_unlock_blocking(eb); return ret; } @@ -2904,7 +2905,9 @@ retry_root_backup: if (ret) goto fail_qgroup; + mutex_lock(&fs_info->cleaner_mutex); ret = btrfs_recover_relocation(tree_root); + mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) { printk(KERN_WARNING "BTRFS: failed to recover relocation\n"); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 99c253918208..813537f362f9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5678,7 +5678,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_caching_control *next; struct btrfs_caching_control *caching_ctl; struct btrfs_block_group_cache *cache; - struct btrfs_space_info *space_info; down_write(&fs_info->commit_root_sem); @@ -5701,9 +5700,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, up_write(&fs_info->commit_root_sem); - list_for_each_entry_rcu(space_info, &fs_info->space_info, list) - percpu_counter_set(&space_info->total_bytes_pinned, 0); - update_global_block_rsv(fs_info); } @@ -5741,6 +5737,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) spin_lock(&cache->lock); cache->pinned -= len; space_info->bytes_pinned -= len; + percpu_counter_add(&space_info->total_bytes_pinned, -len); if (cache->ro) { space_info->bytes_readonly += len; readonly = true; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0d321c23069a..47aceb494d1d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -136,19 +136,22 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) void btrfs_update_iflags(struct inode *inode) { struct btrfs_inode *ip = BTRFS_I(inode); - - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + unsigned int new_fl = 0; if (ip->flags & BTRFS_INODE_SYNC) - inode->i_flags |= S_SYNC; + new_fl |= S_SYNC; if (ip->flags & BTRFS_INODE_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; + new_fl |= S_IMMUTABLE; if (ip->flags & BTRFS_INODE_APPEND) - inode->i_flags |= S_APPEND; + new_fl |= S_APPEND; if (ip->flags & BTRFS_INODE_NOATIME) - inode->i_flags |= S_NOATIME; + new_fl |= S_NOATIME; if (ip->flags & BTRFS_INODE_DIRSYNC) - inode->i_flags |= S_DIRSYNC; + new_fl |= S_DIRSYNC; + + set_mask_bits(&inode->i_flags, + S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC, + new_fl); } /* @@ -3139,7 +3142,6 @@ out: static void clone_update_extent_map(struct inode *inode, const struct btrfs_trans_handle *trans, const struct btrfs_path *path, - struct btrfs_file_extent_item *fi, const u64 hole_offset, const u64 hole_len) { @@ -3154,7 +3156,11 @@ static void clone_update_extent_map(struct inode *inode, return; } - if (fi) { + if (path) { + struct btrfs_file_extent_item *fi; + + fi = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_file_extent_item); btrfs_extent_item_to_extent_map(inode, path, fi, false, em); em->generation = -1; if (btrfs_file_extent_type(path->nodes[0], fi) == @@ -3508,18 +3514,15 @@ process_slot: btrfs_item_ptr_offset(leaf, slot), size); inode_add_bytes(inode, datal); - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); } /* If we have an implicit hole (NO_HOLES feature). */ if (drop_start < new_key.offset) clone_update_extent_map(inode, trans, - path, NULL, drop_start, + NULL, drop_start, new_key.offset - drop_start); - clone_update_extent_map(inode, trans, path, - extent, 0, 0); + clone_update_extent_map(inode, trans, path, 0, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -3562,12 +3565,10 @@ process_slot: btrfs_end_transaction(trans, root); goto out; } + clone_update_extent_map(inode, trans, NULL, last_dest_end, + destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen); - if (ret) - goto out; - clone_update_extent_map(inode, trans, path, NULL, last_dest_end, - destoff + len - last_dest_end); } out: diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 6efd70d3b64f..9626b4ad3b9a 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -54,7 +54,7 @@ static void print_extent_data_ref(struct extent_buffer *eb, btrfs_extent_data_ref_count(eb, ref)); } -static void print_extent_item(struct extent_buffer *eb, int slot) +static void print_extent_item(struct extent_buffer *eb, int slot, int type) { struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; @@ -63,7 +63,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot) struct btrfs_disk_key key; unsigned long end; unsigned long ptr; - int type; u32 item_size = btrfs_item_size_nr(eb, slot); u64 flags; u64 offset; @@ -88,7 +87,8 @@ static void print_extent_item(struct extent_buffer *eb, int slot) btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), flags); - if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + if ((type == BTRFS_EXTENT_ITEM_KEY) && + flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { struct btrfs_tree_block_info *info; info = (struct btrfs_tree_block_info *)(ei + 1); btrfs_tree_block_key(eb, info, &key); @@ -223,7 +223,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) btrfs_disk_root_refs(l, ri)); break; case BTRFS_EXTENT_ITEM_KEY: - print_extent_item(l, i); + case BTRFS_METADATA_ITEM_KEY: + print_extent_item(l, i, type); break; case BTRFS_TREE_BLOCK_REF_KEY: printk(KERN_INFO "\t\ttree block backref\n"); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 4055291a523e..4a88f073fdd7 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1956,9 +1956,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) * pages are going to be uptodate. */ for (stripe = 0; stripe < bbio->num_stripes; stripe++) { - if (rbio->faila == stripe || - rbio->failb == stripe) + if (rbio->faila == stripe || rbio->failb == stripe) { + atomic_inc(&rbio->bbio->error); continue; + } for (pagenr = 0; pagenr < nr_pages; pagenr++) { struct page *p; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4662d92a4b73..8e16bca69c56 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -522,9 +522,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_ssd_spread: btrfs_set_and_info(root, SSD_SPREAD, "use spread ssd allocation scheme"); + btrfs_set_opt(info->mount_opt, SSD); break; case Opt_nossd: - btrfs_clear_and_info(root, NOSSD, + btrfs_set_and_info(root, NOSSD, "not using ssd allocation scheme"); btrfs_clear_opt(info->mount_opt, SSD); break; @@ -1467,7 +1468,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) goto restore; /* recover relocation */ + mutex_lock(&fs_info->cleaner_mutex); ret = btrfs_recover_relocation(root); + mutex_unlock(&fs_info->cleaner_mutex); if (ret) goto restore; @@ -1808,6 +1811,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) list_for_each_entry(dev, head, dev_list) { if (dev->missing) continue; + if (!dev->name) + continue; if (!first_dev || dev->devid < first_dev->devid) first_dev = dev; } diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index df39458f1487..78699364f537 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -605,14 +605,37 @@ static void init_feature_attrs(void) } } -static int add_device_membership(struct btrfs_fs_info *fs_info) +int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *one_device) +{ + struct hd_struct *disk; + struct kobject *disk_kobj; + + if (!fs_info->device_dir_kobj) + return -EINVAL; + + if (one_device) { + disk = one_device->bdev->bd_part; + disk_kobj = &part_to_dev(disk)->kobj; + + sysfs_remove_link(fs_info->device_dir_kobj, + disk_kobj->name); + } + + return 0; +} + +int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *one_device) { int error = 0; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *dev; - fs_info->device_dir_kobj = kobject_create_and_add("devices", + if (!fs_info->device_dir_kobj) + fs_info->device_dir_kobj = kobject_create_and_add("devices", &fs_info->super_kobj); + if (!fs_info->device_dir_kobj) return -ENOMEM; @@ -623,6 +646,9 @@ static int add_device_membership(struct btrfs_fs_info *fs_info) if (!dev->bdev) continue; + if (one_device && one_device != dev) + continue; + disk = dev->bdev->bd_part; disk_kobj = &part_to_dev(disk)->kobj; @@ -666,7 +692,7 @@ int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info) if (error) goto failure; - error = add_device_membership(fs_info); + error = btrfs_kobj_add_device(fs_info, NULL); if (error) goto failure; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 9ab576318a84..ac46df37504c 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -66,4 +66,8 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); extern const char * const btrfs_feature_set_names[3]; extern struct kobj_type space_info_ktype; extern struct kobj_type btrfs_raid_ktype; +int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *one_device); +int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *one_device); #endif /* _BTRFS_SYSFS_H_ */ diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 511839c04f11..5f379affdf23 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -386,11 +386,13 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type, bool reloc_reserved = false; int ret; + /* Send isn't supposed to start transactions. */ + ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB); + if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) return ERR_PTR(-EROFS); - if (current->journal_info && - current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) { + if (current->journal_info) { WARN_ON(type & TRANS_EXTWRITERS); h = current->journal_info; h->use_count++; @@ -491,6 +493,7 @@ again: smp_mb(); if (cur_trans->state >= TRANS_STATE_BLOCKED && may_wait_transaction(root, type)) { + current->journal_info = h; btrfs_commit_transaction(h, root); goto again; } @@ -1615,11 +1618,6 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans, int ret; ret = btrfs_run_delayed_items(trans, root); - /* - * running the delayed items may have added new refs. account - * them now so that they hinder processing of more delayed refs - * as little as possible. - */ if (ret) return ret; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c83b24251e53..6104676857f5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -40,6 +40,7 @@ #include "rcu-string.h" #include "math.h" #include "dev-replace.h" +#include "sysfs.h" static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -554,12 +555,14 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) * This is ok to do without rcu read locked because we hold the * uuid mutex so nothing we touch in here is going to disappear. */ - name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); - if (!name) { - kfree(device); - goto error; + if (orig_dev->name) { + name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); + if (!name) { + kfree(device); + goto error; + } + rcu_assign_pointer(device->name, name); } - rcu_assign_pointer(device->name, name); list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; @@ -1680,6 +1683,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev) device->fs_devices->open_devices--; + /* remove sysfs entry */ + btrfs_kobj_rm_device(root->fs_info, device); + call_rcu(&device->rcu, free_device); num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; @@ -2143,9 +2149,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) total_bytes = btrfs_super_num_devices(root->fs_info->super_copy); btrfs_set_super_num_devices(root->fs_info->super_copy, total_bytes + 1); + + /* add sysfs device entry */ + btrfs_kobj_add_device(root->fs_info, device); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (seeding_dev) { + char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; ret = init_first_rw_device(trans, root, device); if (ret) { btrfs_abort_transaction(trans, root, ret); @@ -2156,6 +2167,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) btrfs_abort_transaction(trans, root, ret); goto error_trans; } + + /* Sprouting would change fsid of the mounted root, + * so rename the fsid on the sysfs + */ + snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", + root->fs_info->fsid); + if (kobject_rename(&root->fs_info->super_kobj, fsid_buf)) + goto error_trans; } else { ret = btrfs_add_device(trans, root, device); if (ret) { @@ -2205,6 +2224,7 @@ error_trans: unlock_chunks(root); btrfs_end_transaction(trans, root); rcu_string_free(device->name); + btrfs_kobj_rm_device(root->fs_info, device); kfree(device); error: blkdev_put(bdev, FMODE_EXCL); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 4f196314c0c1..b67d8fc81277 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -136,7 +136,7 @@ static int zlib_compress_pages(struct list_head *ws, if (workspace->def_strm.total_in > 8192 && workspace->def_strm.total_in < workspace->def_strm.total_out) { - ret = -EIO; + ret = -E2BIG; goto out; } /* we need another page for writing out. Test this diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 0762d143e252..fca382037ddd 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -194,7 +194,16 @@ static void ext4_init_block_bitmap(struct super_block *sb, if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); grp = ext4_get_group_info(sb, block_group); + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); + if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { + int count; + count = ext4_free_inodes_count(sb, gdp); + percpu_counter_sub(&sbi->s_freeinodes_counter, + count); + } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); return; } @@ -359,6 +368,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb, { ext4_fsblk_t blk; struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); + struct ext4_sb_info *sbi = EXT4_SB(sb); if (buffer_verified(bh)) return; @@ -369,6 +379,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb, ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: block %llu: invalid block bitmap", block_group, blk); + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); return; } @@ -376,6 +389,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb, desc, bh))) { ext4_unlock_group(sb, block_group); ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); return; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 0ee59a6644e2..a87455df38bc 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -71,6 +71,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb, struct ext4_group_desc *gdp) { struct ext4_group_info *grp; + struct ext4_sb_info *sbi = EXT4_SB(sb); J_ASSERT_BH(bh, buffer_locked(bh)); /* If checksum is bad mark all blocks and inodes use to prevent @@ -78,7 +79,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb, if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); grp = ext4_get_group_info(sb, block_group); + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); + if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { + int count; + count = ext4_free_inodes_count(sb, gdp); + percpu_counter_sub(&sbi->s_freeinodes_counter, + count); + } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); return 0; } @@ -116,6 +126,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) struct buffer_head *bh = NULL; ext4_fsblk_t bitmap_blk; struct ext4_group_info *grp; + struct ext4_sb_info *sbi = EXT4_SB(sb); desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) @@ -185,6 +196,12 @@ verify: ext4_error(sb, "Corrupt inode bitmap - block_group = %u, " "inode_bitmap = %llu", block_group, bitmap_blk); grp = ext4_get_group_info(sb, block_group); + if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { + int count; + count = ext4_free_inodes_count(sb, desc); + percpu_counter_sub(&sbi->s_freeinodes_counter, + count); + } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); return NULL; } @@ -321,6 +338,12 @@ out: fatal = err; } else { ext4_error(sb, "bit already cleared for inode %lu", ino); + if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { + int count; + count = ext4_free_inodes_count(sb, gdp); + percpu_counter_sub(&sbi->s_freeinodes_counter, + count); + } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); } diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 8a57e9fcd1b9..fd69da194826 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, return 0; failed: for (; i >= 0; i--) { - if (i != indirect_blks && branch[i].bh) + /* + * We want to ext4_forget() only freshly allocated indirect + * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and + * buffer at branch[0].bh is indirect block / inode already + * existing before ext4_alloc_branch() was called. + */ + if (i > 0 && i != indirect_blks && branch[i].bh) ext4_forget(handle, 1, inode, branch[i].bh, branch[i].bh->b_blocknr); ext4_free_blocks(handle, inode, NULL, new_blocks[i], @@ -1310,16 +1316,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, blk = *i_data; if (level > 0) { ext4_lblk_t first2; + ext4_lblk_t count2; + bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); if (!bh) { EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), "Read failure"); return -EIO; } - first2 = (first > offset) ? first - offset : 0; + if (first > offset) { + first2 = first - offset; + count2 = count; + } else { + first2 = 0; + count2 = count - (offset - first); + } ret = free_hole_blocks(handle, inode, bh, (__le32 *)bh->b_data, level - 1, - first2, count - offset, + first2, count2, inode->i_sb->s_blocksize >> 2); if (ret) { brelse(bh); @@ -1329,8 +1343,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, if (level == 0 || (bh && all_zeroes((__le32 *)bh->b_data, (__le32 *)bh->b_data + addr_per_block))) { - ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); - *i_data = 0; + ext4_free_data(handle, inode, parent_bh, + i_data, i_data + 1); } brelse(bh); bh = NULL; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 59e31622cc6e..7f72f50a8fa7 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -722,6 +722,7 @@ void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); + struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_grpblk_t first; @@ -759,6 +760,9 @@ void ext4_mb_generate_buddy(struct super_block *sb, * corrupt and update bb_free using bitmap value */ grp->bb_free = free; + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + grp->bb_free); set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); } mb_set_largest_free_order(sb, grp); @@ -1431,6 +1435,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); if (unlikely(block != -1)) { + struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); @@ -1441,6 +1446,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, "freeing already freed block " "(bit %u); block bitmap corrupt.", block); + if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)) + percpu_counter_sub(&sbi->s_freeclusters_counter, + e4b->bd_info->bb_free); /* Mark the block group as corrupt. */ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &e4b->bd_info->bb_state); diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index e3d37f607f97..d895b4b7b661 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -39,6 +39,19 @@ struct kernfs_open_node { struct list_head files; /* goes through kernfs_open_file.list */ }; +/* + * kernfs_notify() may be called from any context and bounces notifications + * through a work item. To minimize space overhead in kernfs_node, the + * pending queue is implemented as a singly linked list of kernfs_nodes. + * The list is terminated with the self pointer so that whether a + * kernfs_node is on the list or not can be determined by testing the next + * pointer for NULL. + */ +#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) + +static DEFINE_SPINLOCK(kernfs_notify_lock); +static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL; + static struct kernfs_open_file *kernfs_of(struct file *file) { return ((struct seq_file *)file->private_data)->private; @@ -783,24 +796,25 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait) return DEFAULT_POLLMASK|POLLERR|POLLPRI; } -/** - * kernfs_notify - notify a kernfs file - * @kn: file to notify - * - * Notify @kn such that poll(2) on @kn wakes up. - */ -void kernfs_notify(struct kernfs_node *kn) +static void kernfs_notify_workfn(struct work_struct *work) { - struct kernfs_root *root = kernfs_root(kn); + struct kernfs_node *kn; struct kernfs_open_node *on; struct kernfs_super_info *info; - unsigned long flags; - - if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) +repeat: + /* pop one off the notify_list */ + spin_lock_irq(&kernfs_notify_lock); + kn = kernfs_notify_list; + if (kn == KERNFS_NOTIFY_EOL) { + spin_unlock_irq(&kernfs_notify_lock); return; + } + kernfs_notify_list = kn->attr.notify_next; + kn->attr.notify_next = NULL; + spin_unlock_irq(&kernfs_notify_lock); /* kick poll */ - spin_lock_irqsave(&kernfs_open_node_lock, flags); + spin_lock_irq(&kernfs_open_node_lock); on = kn->attr.open; if (on) { @@ -808,12 +822,12 @@ void kernfs_notify(struct kernfs_node *kn) wake_up_interruptible(&on->poll); } - spin_unlock_irqrestore(&kernfs_open_node_lock, flags); + spin_unlock_irq(&kernfs_open_node_lock); /* kick fsnotify */ mutex_lock(&kernfs_mutex); - list_for_each_entry(info, &root->supers, node) { + list_for_each_entry(info, &kernfs_root(kn)->supers, node) { struct inode *inode; struct dentry *dentry; @@ -833,6 +847,33 @@ void kernfs_notify(struct kernfs_node *kn) } mutex_unlock(&kernfs_mutex); + kernfs_put(kn); + goto repeat; +} + +/** + * kernfs_notify - notify a kernfs file + * @kn: file to notify + * + * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any + * context. + */ +void kernfs_notify(struct kernfs_node *kn) +{ + static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn); + unsigned long flags; + + if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) + return; + + spin_lock_irqsave(&kernfs_notify_lock, flags); + if (!kn->attr.notify_next) { + kernfs_get(kn); + kn->attr.notify_next = kernfs_notify_list; + kernfs_notify_list = kn; + schedule_work(&kernfs_notify_work); + } + spin_unlock_irqrestore(&kernfs_notify_lock, flags); } EXPORT_SYMBOL_GPL(kernfs_notify); diff --git a/fs/mbcache.c b/fs/mbcache.c index bf166e388f0d..187477ded6b3 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -73,6 +73,7 @@ #include <linux/mbcache.h> #include <linux/init.h> #include <linux/blockgroup_lock.h> +#include <linux/log2.h> #ifdef MB_CACHE_DEBUG # define mb_debug(f...) do { \ @@ -93,7 +94,7 @@ #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) -#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS) +#define MB_CACHE_ENTRY_LOCK_BITS ilog2(NR_BG_LOCKS) #define MB_CACHE_ENTRY_LOCK_INDEX(ce) \ (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS)) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 6851b003f2a4..8f029db5d271 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -617,15 +617,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, switch (create->cr_type) { case NF4LNK: - /* ugh! we have to null-terminate the linktext, or - * vfs_symlink() will choke. it is always safe to - * null-terminate by brute force, since at worst we - * will overwrite the first byte of the create namelen - * in the XDR buffer, which has already been extracted - * during XDR decode. - */ - create->cr_linkname[create->cr_linklen] = 0; - status = nfsd_symlink(rqstp, &cstate->current_fh, create->cr_name, create->cr_namelen, create->cr_linkname, create->cr_linklen, diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 83baf2bfe9e9..2fc7abebeb9b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -600,7 +600,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create READ_BUF(4); create->cr_linklen = be32_to_cpup(p++); READ_BUF(create->cr_linklen); - SAVEMEM(create->cr_linkname, create->cr_linklen); + /* + * The VFS will want a null-terminated string, and + * null-terminating in place isn't safe since this might + * end on a page boundary: + */ + create->cr_linkname = + kmalloc(create->cr_linklen + 1, GFP_KERNEL); + if (!create->cr_linkname) + return nfserr_jukebox; + memcpy(create->cr_linkname, p, create->cr_linklen); + create->cr_linkname[create->cr_linklen] = '\0'; + defer_free(argp, kfree, create->cr_linkname); break; case NF4BLK: case NF4CHR: @@ -3267,7 +3278,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd wire_count = htonl(maxcount); write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4); - xdr_truncate_encode(xdr, length_offset + 4 + maxcount); + xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4)); if (maxcount & 3) write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero, 4 - (maxcount&3)); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 9d231e9e5f0e..bf2d03f8fd3e 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v) static int stat_open(struct inode *inode, struct file *file) { - size_t size = 1024 + 128 * num_possible_cpus(); - char *buf; - struct seq_file *m; - int res; + size_t size = 1024 + 128 * num_online_cpus(); /* minimum size to display an interrupt count : 2 bytes */ size += 2 * nr_irqs; - - /* don't ask for more than the kmalloc() max size */ - if (size > KMALLOC_MAX_SIZE) - size = KMALLOC_MAX_SIZE; - buf = kmalloc(size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - res = single_open(file, show_stat, NULL); - if (!res) { - m = file->private_data; - m->buf = buf; - m->size = ksize(buf); - } else - kfree(buf); - return res; + return single_open_size(file, show_stat, NULL, size); } static const struct file_operations proc_stat_operations = { diff --git a/fs/seq_file.c b/fs/seq_file.c index 1d641bb108d2..3857b720cb1b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -8,8 +8,10 @@ #include <linux/fs.h> #include <linux/export.h> #include <linux/seq_file.h> +#include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/cred.h> +#include <linux/mm.h> #include <asm/uaccess.h> #include <asm/page.h> @@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m) m->count = m->size; } +static void *seq_buf_alloc(unsigned long size) +{ + void *buf; + + buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!buf && size > PAGE_SIZE) + buf = vmalloc(size); + return buf; +} + /** * seq_open - initialize sequential file * @file: file we initialize @@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset) return 0; } if (!m->buf) { - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); + m->buf = seq_buf_alloc(m->size = PAGE_SIZE); if (!m->buf) return -ENOMEM; } @@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset) Eoverflow: m->op->stop(m, p); - kfree(m->buf); + kvfree(m->buf); m->count = 0; - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); + m->buf = seq_buf_alloc(m->size <<= 1); return !m->buf ? -ENOMEM : -EAGAIN; } @@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) /* grab buffer if we didn't have one */ if (!m->buf) { - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); + m->buf = seq_buf_alloc(m->size = PAGE_SIZE); if (!m->buf) goto Enomem; } @@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) if (m->count < m->size) goto Fill; m->op->stop(m, p); - kfree(m->buf); + kvfree(m->buf); m->count = 0; - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); + m->buf = seq_buf_alloc(m->size <<= 1); if (!m->buf) goto Enomem; m->version = 0; @@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek); int seq_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; - kfree(m->buf); + kvfree(m->buf); kfree(m); return 0; } @@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open); int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), void *data, size_t size) { - char *buf = kmalloc(size, GFP_KERNEL); + char *buf = seq_buf_alloc(size); int ret; if (!buf) return -ENOMEM; ret = single_open(file, show, data); if (ret) { - kfree(buf); + kvfree(buf); return ret; } ((struct seq_file *)file->private_data)->buf = buf; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 8af71a8e2c00..9b6a445f8602 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1395,8 +1395,6 @@ extern void drm_master_put(struct drm_master **master); extern void drm_put_dev(struct drm_device *dev); extern void drm_unplug_dev(struct drm_device *dev); extern unsigned int drm_debug; -extern unsigned int drm_rnodes; -extern unsigned int drm_universal_planes; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; @@ -1419,6 +1417,8 @@ extern int drm_debugfs_create_files(const struct drm_info_list *files, extern int drm_debugfs_remove_files(const struct drm_info_list *files, int count, struct drm_minor *minor); extern int drm_debugfs_cleanup(struct drm_minor *minor); +extern int drm_debugfs_connector_add(struct drm_connector *connector); +extern void drm_debugfs_connector_remove(struct drm_connector *connector); #else static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, struct dentry *root) @@ -1443,6 +1443,15 @@ static inline int drm_debugfs_cleanup(struct drm_minor *minor) { return 0; } + +static inline int drm_debugfs_connector_add(struct drm_connector *connector) +{ + return 0; +} +static inline void drm_debugfs_connector_remove(struct drm_connector *connector) +{ +} + #endif /* Info file support */ @@ -1574,7 +1583,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); -struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); +struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 6a94909f1ca9..7f1bc7e4848b 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -333,6 +333,10 @@ struct drm_crtc { struct drm_plane *primary; struct drm_plane *cursor; + /* position of cursor plane on crtc */ + int cursor_x; + int cursor_y; + /* Temporary tracking of the old fb while a modeset is ongoing. Used * by drm_mode_set_config_internal to implement correct refcounting. */ struct drm_framebuffer *old_fb; @@ -526,6 +530,8 @@ struct drm_connector { struct drm_property_blob *edid_blob_ptr; struct drm_object_properties properties; + struct drm_property_blob *path_blob_ptr; + uint8_t polled; /* DRM_CONNECTOR_POLL_* */ /* requested DPMS state */ @@ -535,6 +541,7 @@ struct drm_connector { /* forced on connector */ enum drm_connector_force force; + bool override_edid; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; struct drm_encoder *encoder; /* currently active encoder */ @@ -547,6 +554,8 @@ struct drm_connector { int audio_latency[2]; int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ unsigned bad_edid_counter; + + struct dentry *debugfs_entry; }; /** @@ -802,6 +811,7 @@ struct drm_mode_config { struct list_head property_blob_list; struct drm_property *edid_property; struct drm_property *dpms_property; + struct drm_property *path_property; struct drm_property *plane_type_property; /* DVI-I properties */ @@ -854,7 +864,7 @@ struct drm_prop_enum_list { extern int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, - void *cursor, + struct drm_plane *cursor, const struct drm_crtc_funcs *funcs); extern int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, @@ -880,6 +890,8 @@ extern int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); +int drm_connector_register(struct drm_connector *connector); +void drm_connector_unregister(struct drm_connector *connector); extern void drm_connector_cleanup(struct drm_connector *connector); /* helper to unplug all connectors from sysfs for device */ @@ -939,6 +951,7 @@ extern const char *drm_get_tv_select_name(int val); extern void drm_fb_release(struct drm_file *file_priv); extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); extern void drm_mode_group_destroy(struct drm_mode_group *group); +extern void drm_reinit_primary_mode_group(struct drm_device *dev); extern bool drm_probe_ddc(struct i2c_adapter *adapter); extern struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); @@ -948,6 +961,8 @@ extern void drm_mode_config_init(struct drm_device *dev); extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev); +extern int drm_mode_connector_set_path_property(struct drm_connector *connector, + char *path); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h new file mode 100644 index 000000000000..9b446ada2532 --- /dev/null +++ b/include/drm/drm_dp_mst_helper.h @@ -0,0 +1,509 @@ +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef _DRM_DP_MST_HELPER_H_ +#define _DRM_DP_MST_HELPER_H_ + +#include <linux/types.h> +#include <drm/drm_dp_helper.h> + +struct drm_dp_mst_branch; + +/** + * struct drm_dp_vcpi - Virtual Channel Payload Identifer + * @vcpi: Virtual channel ID. + * @pbn: Payload Bandwidth Number for this channel + * @aligned_pbn: PBN aligned with slot size + * @num_slots: number of slots for this PBN + */ +struct drm_dp_vcpi { + int vcpi; + int pbn; + int aligned_pbn; + int num_slots; +}; + +/** + * struct drm_dp_mst_port - MST port + * @kref: reference count for this port. + * @guid_valid: for DP 1.2 devices if we have validated the GUID. + * @guid: guid for DP 1.2 device on this port. + * @port_num: port number + * @input: if this port is an input port. + * @mcs: message capability status - DP 1.2 spec. + * @ddps: DisplayPort Device Plug Status - DP 1.2 + * @pdt: Peer Device Type + * @ldps: Legacy Device Plug Status + * @dpcd_rev: DPCD revision of device on this port + * @num_sdp_streams: Number of simultaneous streams + * @num_sdp_stream_sinks: Number of stream sinks + * @available_pbn: Available bandwidth for this port. + * @next: link to next port on this branch device + * @mstb: branch device attach below this port + * @aux: i2c aux transport to talk to device connected to this port. + * @parent: branch device parent of this port + * @vcpi: Virtual Channel Payload info for this port. + * @connector: DRM connector this port is connected to. + * @mgr: topology manager this port lives under. + * + * This structure represents an MST port endpoint on a device somewhere + * in the MST topology. + */ +struct drm_dp_mst_port { + struct kref kref; + + /* if dpcd 1.2 device is on this port - its GUID info */ + bool guid_valid; + u8 guid[16]; + + u8 port_num; + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + uint16_t available_pbn; + struct list_head next; + struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */ + struct drm_dp_aux aux; /* i2c bus for this port? */ + struct drm_dp_mst_branch *parent; + + struct drm_dp_vcpi vcpi; + struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; +}; + +/** + * struct drm_dp_mst_branch - MST branch device. + * @kref: reference count for this port. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @msg_slots: one bit per transmitted msg slot. + * @ports: linked list of ports on this branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @tx_slots: transmission slots for this device. + * @last_seqno: last sequence number used to talk to this. + * @link_address_sent: if a link address message has been sent to this device yet. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any others connected + * to downstream ports + */ +struct drm_dp_mst_branch { + struct kref kref; + u8 rad[8]; + u8 lct; + int num_ports; + + int msg_slots; + struct list_head ports; + + /* list of tx ops queue for this port */ + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + /* slots are protected by mstb->mgr->qlock */ + struct drm_dp_sideband_msg_tx *tx_slots[2]; + int last_seqno; + bool link_address_sent; +}; + + +/* sideband msg header - not bit struct */ +struct drm_dp_sideband_msg_hdr { + u8 lct; + u8 lcr; + u8 rad[8]; + bool broadcast; + bool path_msg; + u8 msg_len; + bool somt; + bool eomt; + bool seqno; +}; + +struct drm_dp_nak_reply { + u8 guid[16]; + u8 reason; + u8 nak_data; +}; + +struct drm_dp_link_address_ack_reply { + u8 guid[16]; + u8 nports; + struct drm_dp_link_addr_reply_port { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + } ports[16]; +}; + +struct drm_dp_remote_dpcd_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_dpcd_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_remote_dpcd_write_nak_reply { + u8 port_number; + u8 reason; + u8 bytes_written_before_failure; +}; + +struct drm_dp_remote_i2c_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_i2c_read_nak_reply { + u8 port_number; + u8 nak_reason; + u8 i2c_nak_transaction; +}; + +struct drm_dp_remote_i2c_write_ack_reply { + u8 port_number; +}; + + +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; +}; + + +struct drm_dp_allocate_payload { + u8 port_number; + u8 number_sdp_streams; + u8 vcpi; + u16 pbn; + u8 sdp_stream_sink[8]; +}; + +struct drm_dp_allocate_payload_ack_reply { + u8 port_number; + u8 vcpi; + u16 allocated_pbn; +}; + +struct drm_dp_connection_status_notify { + u8 guid[16]; + u8 port_number; + bool legacy_device_plug_status; + bool displayport_device_plug_status; + bool message_capability_status; + bool input_port; + u8 peer_device_type; +}; + +struct drm_dp_remote_dpcd_read { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; +}; + +struct drm_dp_remote_dpcd_write { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; + u8 *bytes; +}; + +struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; + struct { + u8 i2c_dev_id; + u8 num_bytes; + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; + } transactions[4]; + u8 read_i2c_device_id; + u8 num_bytes_read; +}; + +struct drm_dp_remote_i2c_write { + u8 port_number; + u8 write_i2c_device_id; + u8 num_bytes; + u8 *bytes; +}; + +/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_req { + u8 port_number; +}; + +struct drm_dp_enum_path_resources_ack_reply { + u8 port_number; + u16 full_payload_bw_number; + u16 avail_payload_bw_number; +}; + +/* covers POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_rep { + u8 port_number; +}; + +struct drm_dp_query_payload { + u8 port_number; + u8 vcpi; +}; + +struct drm_dp_resource_status_notify { + u8 port_number; + u8 guid[16]; + u16 available_pbn; +}; + +struct drm_dp_query_payload_ack_reply { + u8 port_number; + u8 allocated_pbn; +}; + +struct drm_dp_sideband_msg_req_body { + u8 req_type; + union ack_req { + struct drm_dp_connection_status_notify conn_stat; + struct drm_dp_port_number_req port_num; + struct drm_dp_resource_status_notify resource_stat; + + struct drm_dp_query_payload query_payload; + struct drm_dp_allocate_payload allocate_payload; + + struct drm_dp_remote_dpcd_read dpcd_read; + struct drm_dp_remote_dpcd_write dpcd_write; + + struct drm_dp_remote_i2c_read i2c_read; + struct drm_dp_remote_i2c_write i2c_write; + } u; +}; + +struct drm_dp_sideband_msg_reply_body { + u8 reply_type; + u8 req_type; + union ack_replies { + struct drm_dp_nak_reply nak; + struct drm_dp_link_address_ack_reply link_addr; + struct drm_dp_port_number_rep port_number; + + struct drm_dp_enum_path_resources_ack_reply path_resources; + struct drm_dp_allocate_payload_ack_reply allocate_payload; + struct drm_dp_query_payload_ack_reply query_payload; + + struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; + struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; + struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; + + struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; + struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; + struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + } u; +}; + +/* msg is queued to be put into a slot */ +#define DRM_DP_SIDEBAND_TX_QUEUED 0 +/* msg has started transmitting on a slot - still on msgq */ +#define DRM_DP_SIDEBAND_TX_START_SEND 1 +/* msg has finished transmitting on a slot - removed from msgq only in slot */ +#define DRM_DP_SIDEBAND_TX_SENT 2 +/* msg has received a response - removed from slot */ +#define DRM_DP_SIDEBAND_TX_RX 3 +#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 + +struct drm_dp_sideband_msg_tx { + u8 msg[256]; + u8 chunk[48]; + u8 cur_offset; + u8 cur_len; + struct drm_dp_mst_branch *dst; + struct list_head next; + int seqno; + int state; + bool path_msg; + struct drm_dp_sideband_msg_reply_body reply; +}; + +/* sideband msg handler */ +struct drm_dp_mst_topology_mgr; +struct drm_dp_mst_topology_cbs { + /* create a connector for a port */ + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); + void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector); + void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); + +}; + +#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) + +#define DP_PAYLOAD_LOCAL 1 +#define DP_PAYLOAD_REMOTE 2 +#define DP_PAYLOAD_DELETE_LOCAL 3 + +struct drm_dp_payload { + int payload_state; + int start_slot; + int num_slots; +}; + +/** + * struct drm_dp_mst_topology_mgr - DisplayPort MST manager + * @dev: device pointer for adding i2c devices etc. + * @cbs: callbacks for connector addition and destruction. + * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go. + * @aux: aux channel for the DP connector. + * @max_payloads: maximum number of payloads the GPU can generate. + * @conn_base_id: DRM connector ID this mgr is connected to. + * @down_rep_recv: msg receiver state for down replies. + * @up_req_recv: msg receiver state for up requests. + * @lock: protects mst state, primary, guid, dpcd. + * @mst_state: if this manager is enabled for an MST capable port. + * @mst_primary: pointer to the primary branch device. + * @guid_valid: GUID valid for the primary branch device. + * @guid: GUID for primary port. + * @dpcd: cache of DPCD for primary port. + * @pbn_div: PBN to slots divisor. + * + * This struct represents the toplevel displayport MST topology manager. + * There should be one instance of this for every MST capable DP connector + * on the GPU. + */ +struct drm_dp_mst_topology_mgr { + + struct device *dev; + struct drm_dp_mst_topology_cbs *cbs; + int max_dpcd_transaction_bytes; + struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ + int max_payloads; + int conn_base_id; + + /* only ever accessed from the workqueue - which should be serialised */ + struct drm_dp_sideband_msg_rx down_rep_recv; + struct drm_dp_sideband_msg_rx up_req_recv; + + /* pointer to info about the initial MST device */ + struct mutex lock; /* protects mst_state + primary + guid + dpcd */ + + bool mst_state; + struct drm_dp_mst_branch *mst_primary; + /* primary MST device GUID */ + bool guid_valid; + u8 guid[16]; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 sink_count; + int pbn_div; + int total_slots; + int avail_slots; + int total_pbn; + + /* messages to be transmitted */ + /* qlock protects the upq/downq and in_progress, + the mstb tx_slots and txmsg->state once they are queued */ + struct mutex qlock; + struct list_head tx_msg_downq; + struct list_head tx_msg_upq; + bool tx_down_in_progress; + bool tx_up_in_progress; + + /* payload info + lock for it */ + struct mutex payload_lock; + struct drm_dp_vcpi **proposed_vcpis; + struct drm_dp_payload *payloads; + unsigned long payload_mask; + + wait_queue_head_t tx_waitq; + struct work_struct work; + + struct work_struct tx_work; +}; + +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); + +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); + + +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + + +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); + + +enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +int drm_dp_calc_pbn_mode(int clock, int bpp); + + +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots); + + +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + + +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn); + + +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); + + +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); + +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); +#endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 7997246d4039..bfd329d613c4 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -86,8 +86,9 @@ struct drm_fb_helper { int crtc_count; struct drm_fb_helper_crtc *crtc_info; int connector_count; + int connector_info_alloc_count; struct drm_fb_helper_connector **connector_info; - struct drm_fb_helper_funcs *funcs; + const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; struct list_head kernel_fb_list; @@ -97,6 +98,8 @@ struct drm_fb_helper { bool delayed_hotplug; }; +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs); int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper, int crtc_count, int max_conn); @@ -128,4 +131,7 @@ struct drm_display_mode * drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int width, int height); +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector); #endif diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h index 2baba9996094..baa6f11b1837 100644 --- a/include/drm/i915_powerwell.h +++ b/include/drm/i915_powerwell.h @@ -32,5 +32,6 @@ /* For use by hda_i915 driver */ extern int i915_request_power_well(void); extern int i915_release_power_well(void); +extern int i915_get_cdclk_freq(void); #endif /* _I915_POWERWELL_H_ */ diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a5183da3ef92..202f0a7171e8 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -182,6 +182,7 @@ struct ttm_mem_type_manager_func { * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. * @placement: Placement details. + * @flags: Additional placement flags. * @mem: Pointer to a struct ttm_mem_reg to be filled in. * * This function should allocate space in the memory type managed @@ -206,6 +207,7 @@ struct ttm_mem_type_manager_func { int (*get_node)(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem); /** @@ -653,18 +655,6 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm); extern int ttm_tt_swapin(struct ttm_tt *ttm); /** - * ttm_tt_cache_flush: - * - * @pages: An array of pointers to struct page:s to flush. - * @num_pages: Number of pages to flush. - * - * Flush the data of the indicated pages from the cpu caches. - * This is used when changing caching attributes of the pages from - * cache-coherent. - */ -extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); - -/** * ttm_tt_set_placement_caching: * * @ttm A struct ttm_tt the backing pages of which will change caching policy. diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 17aa1cce6f8e..145375ea0bd9 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -91,6 +91,7 @@ struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; + struct kernfs_node *notify_next; /* for kernfs_notify() */ }; /* diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 077904c8b70d..cc79eff4a1ad 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, * calling arch_ptrace_stop() when it would be superfluous. For example, * if the thread has not been back to user mode since the last stop, the * thread state might indicate that nothing needs to be done. + * + * This is guaranteed to be invoked once before a task stops for ptrace and + * may include arch-specific operations necessary prior to a ptrace stop. */ #define arch_ptrace_stop_needed(code, info) (0) #endif diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 1a64b26046ed..9b7de1b46437 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -70,7 +70,9 @@ US_FLAG(NEEDS_CAP16, 0x00400000) \ /* cannot handle READ_CAPACITY_10 */ \ US_FLAG(IGNORE_UAS, 0x00800000) \ - /* Device advertises UAS but it is broken */ + /* Device advertises UAS but it is broken */ \ + US_FLAG(BROKEN_FUA, 0x01000000) \ + /* Cannot handle FUA in WRITE or READ CDBs */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 42ed789ebafc..e0ae71098144 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -318,7 +318,7 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) { - unsigned int xfer_len = blk_rq_bytes(scmd->request); + unsigned int xfer_len = scsi_out(scmd)->length; unsigned int prot_op = scsi_get_prot_op(scmd); unsigned int sector_size = scmd->device->sector_size; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 5853c913d2b0..27ab31017f09 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -173,6 +173,7 @@ struct scsi_device { unsigned is_visible:1; /* is the device visible in sysfs */ unsigned wce_default_on:1; /* Cache is ON by default */ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ + unsigned broken_fua:1; /* Don't set FUA bit */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 9abbeb924cbb..b0b855613641 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -780,7 +780,7 @@ struct drm_prime_handle { /** * Device specific ioctls should only be in their respective headers - * The device specific ioctl range is from 0x40 to 0x99. + * The device specific ioctl range is from 0x40 to 0x9f. * Generic IOCTLS restart at 0xA0. * * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 6f9c38ce45c7..2f47824e7a36 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -38,6 +38,7 @@ struct btrfs_ioctl_vol_args { #define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) #define BTRFS_FSID_SIZE 16 #define BTRFS_UUID_SIZE 16 +#define BTRFS_UUID_UNPARSED_SIZE 37 #define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0) diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h index 2a4b4a72a4f9..24b68c59dcf8 100644 --- a/include/uapi/linux/usb/functionfs.h +++ b/include/uapi/linux/usb/functionfs.h @@ -33,6 +33,13 @@ struct usb_endpoint_descriptor_no_audio { __u8 bInterval; } __attribute__((packed)); +/* Legacy format, deprecated as of 3.14. */ +struct usb_functionfs_descs_head { + __le32 magic; + __le32 length; + __le32 fs_count; + __le32 hs_count; +} __attribute__((packed, deprecated)); /* * Descriptors format: diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c445e392e93f..6f3254e8c137 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -846,7 +846,7 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u { int err; - if (!consumer_del(uprobe, uc)) /* WARN? */ + if (WARN_ON(!consumer_del(uprobe, uc))) return; err = register_for_each_vma(uprobe, NULL); @@ -927,7 +927,7 @@ int uprobe_apply(struct inode *inode, loff_t offset, int ret = -ENOENT; uprobe = find_uprobe(inode, offset); - if (!uprobe) + if (WARN_ON(!uprobe)) return ret; down_write(&uprobe->register_rwsem); @@ -952,7 +952,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume struct uprobe *uprobe; uprobe = find_uprobe(inode, offset); - if (!uprobe) + if (WARN_ON(!uprobe)) return; down_write(&uprobe->register_rwsem); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 7339e42a85ab..1487a123db5c 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -455,9 +455,9 @@ EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); */ void irq_free_hwirqs(unsigned int from, int cnt) { - int i; + int i, j; - for (i = from; cnt > 0; i++, cnt--) { + for (i = from, j = cnt; j > 0; i++, j--) { irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); arch_teardown_hwirq(i); } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index ea2d5f6962ed..13e839dbca07 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1416,9 +1416,10 @@ static int have_callable_console(void) /* * Can we actually use the console at this time on this cpu? * - * Console drivers may assume that per-cpu resources have been allocated. So - * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't - * call them until this CPU is officially up. + * Console drivers may assume that per-cpu resources have + * been allocated. So unless they're explicitly marked as + * being able to cope (CON_ANYTIME) don't call them until + * this CPU is officially up. */ static inline int can_use_console(unsigned int cpu) { @@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu) * console_lock held, and 'console_locked' set) if it * is successful, false otherwise. */ -static int console_trylock_for_printk(void) +static int console_trylock_for_printk(unsigned int cpu) { - unsigned int cpu = smp_processor_id(); - if (!console_trylock()) return 0; /* @@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level, */ if (!oops_in_progress && !lockdep_recursing(current)) { recursion_bug = 1; - local_irq_restore(flags); - return 0; + goto out_restore_irqs; } zap_locks(); } @@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level, logbuf_cpu = UINT_MAX; raw_spin_unlock(&logbuf_lock); - lockdep_on(); - local_irq_restore(flags); /* If called from the scheduler, we can not call up(). */ - if (in_sched) - return printed_len; - - /* - * Disable preemption to avoid being preempted while holding - * console_sem which would prevent anyone from printing to console - */ - preempt_disable(); - /* - * Try to acquire and then immediately release the console semaphore. - * The release will print out buffers and wake up /dev/kmsg and syslog() - * users. - */ - if (console_trylock_for_printk()) - console_unlock(); - preempt_enable(); + if (!in_sched) { + /* + * Try to acquire and then immediately release the console + * semaphore. The release will print out buffers and wake up + * /dev/kmsg and syslog() users. + */ + if (console_trylock_for_printk(this_cpu)) + console_unlock(); + } + lockdep_on(); +out_restore_irqs: + local_irq_restore(flags); return printed_len; } EXPORT_SYMBOL(vprintk_emit); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 384ede311717..f243444a3772 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1396,7 +1396,6 @@ void tracing_start(void) arch_spin_unlock(&global_trace.max_lock); - ftrace_start(); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } @@ -1443,7 +1442,6 @@ void tracing_stop(void) struct ring_buffer *buffer; unsigned long flags; - ftrace_stop(); raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 04fdb5de823c..3c9b97e6b1f4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -893,6 +893,9 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, int ret; if (file) { + if (tu->tp.flags & TP_FLAG_PROFILE) + return -EINTR; + link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; @@ -901,29 +904,40 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, list_add_tail_rcu(&link->list, &tu->tp.files); tu->tp.flags |= TP_FLAG_TRACE; - } else - tu->tp.flags |= TP_FLAG_PROFILE; + } else { + if (tu->tp.flags & TP_FLAG_TRACE) + return -EINTR; - ret = uprobe_buffer_enable(); - if (ret < 0) - return ret; + tu->tp.flags |= TP_FLAG_PROFILE; + } WARN_ON(!uprobe_filter_is_empty(&tu->filter)); if (enabled) return 0; + ret = uprobe_buffer_enable(); + if (ret) + goto err_flags; + tu->consumer.filter = filter; ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); - if (ret) { - if (file) { - list_del(&link->list); - kfree(link); - tu->tp.flags &= ~TP_FLAG_TRACE; - } else - tu->tp.flags &= ~TP_FLAG_PROFILE; - } + if (ret) + goto err_buffer; + return 0; + + err_buffer: + uprobe_buffer_disable(); + + err_flags: + if (file) { + list_del(&link->list); + kfree(link); + tu->tp.flags &= ~TP_FLAG_TRACE; + } else { + tu->tp.flags &= ~TP_FLAG_PROFILE; + } return ret; } @@ -1201,12 +1215,6 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) current->utask->vaddr = (unsigned long) &udd; -#ifdef CONFIG_PERF_EVENTS - if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && - !uprobe_perf_filter(&tu->consumer, 0, current->mm)) - return UPROBE_HANDLER_REMOVE; -#endif - if (WARN_ON_ONCE(!uprobe_cpu_buffer)) return 0; diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index b74da447e81e..7a85967060a5 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -192,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, int s = 255; while ((ip < iend) && (s == 255)) { s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; } } @@ -232,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (length == ML_MASK) { while (ip < iend) { int s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; if (s == 255) continue; @@ -284,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, /* write overflow error detected */ _output_error: - return (int) (-(((char *) ip) - source)); + return -1; } int lz4_decompress(const unsigned char *src, size_t *src_len, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index cd8989c1027e..c6399e328931 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, struct page *hpage = *hpagep; struct page *ppage; - if (PageReserved(p) || PageSlab(p)) + if (PageReserved(p) || PageSlab(p) || !PageLRU(p)) return SWAP_SUCCESS; /* @@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) action_result(pfn, "free buddy, 2nd try", DELAYED); return 0; } - action_result(pfn, "non LRU", IGNORED); - put_page(p); - return -EBUSY; } } @@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags) return 0; } + if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p)) + goto identify_page_state; + /* * For error on the tail page, we should set PG_hwpoison * on the head page to show that the hugepage is hwpoisoned @@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) goto out; } +identify_page_state: res = -EBUSY; /* * The first check uses the current page flags which may not have any diff --git a/mm/msync.c b/mm/msync.c index a5c673669ca6..992a1673d488 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) goto out_unlock; } file = vma->vm_file; - fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + fstart = (start - vma->vm_start) + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); fend = fstart + (min(end, vma->vm_end) - start) - 1; start = vma->vm_end; if ((flags & MS_SYNC) && file && diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 20d17f8266fe..0ea758b898fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -816,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page) set_page_count(p, 0); } while (++p, --i); - set_page_refcounted(page); set_pageblock_migratetype(page, MIGRATE_CMA); - __free_pages(page, pageblock_order); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + adjust_managed_page_count(page, pageblock_nr_pages); } #endif diff --git a/mm/shmem.c b/mm/shmem.c index 8f419cff9e34..1140f49b6ded 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1029,6 +1029,9 @@ repeat: goto failed; } + if (page && sgp == SGP_WRITE) + mark_page_accessed(page); + /* fallocated page? */ if (page && !PageUptodate(page)) { if (sgp != SGP_READ) @@ -1110,6 +1113,9 @@ repeat: shmem_recalc_inode(inode); spin_unlock(&info->lock); + if (sgp == SGP_WRITE) + mark_page_accessed(page); + delete_from_swap_cache(page); set_page_dirty(page); swap_free(swap); @@ -1136,6 +1142,9 @@ repeat: __SetPageSwapBacked(page); __set_page_locked(page); + if (sgp == SGP_WRITE) + init_page_accessed(page); + error = mem_cgroup_charge_file(page, current->mm, gfp & GFP_RECLAIM_MASK); if (error) @@ -1412,13 +1421,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - int ret; struct inode *inode = mapping->host; pgoff_t index = pos >> PAGE_CACHE_SHIFT; - ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); - if (ret == 0 && *pagep) - init_page_accessed(*pagep); - return ret; + return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); } static int diff --git a/mm/slub.c b/mm/slub.c index b2b047327d76..73004808537e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1881,7 +1881,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial > s->min_partial) + if (!new.inuse && n->nr_partial >= s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; @@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s, new.freelist, new.counters, "unfreezing slab")); - if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { + if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { page->next = discard_page; discard_page = page; } else { @@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; } - if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) + if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) goto slab_empty; /* diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c index e9e8a4a4a9a1..8b4940ba33d6 100644 --- a/sound/pci/hda/hda_i915.c +++ b/sound/pci/hda/hda_i915.c @@ -20,10 +20,20 @@ #include <linux/module.h> #include <sound/core.h> #include <drm/i915_powerwell.h> +#include "hda_priv.h" #include "hda_i915.h" +/* Intel HSW/BDW display HDA controller Extended Mode registers. + * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display + * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N + * The values will be lost when the display power well is disabled. + */ +#define ICH6_REG_EM4 0x100c +#define ICH6_REG_EM5 0x1010 + static int (*get_power)(void); static int (*put_power)(void); +static int (*get_cdclk)(void); int hda_display_power(bool enable) { @@ -38,6 +48,43 @@ int hda_display_power(bool enable) return put_power(); } +void haswell_set_bclk(struct azx *chip) +{ + int cdclk_freq; + unsigned int bclk_m, bclk_n; + + if (!get_cdclk) + return; + + cdclk_freq = get_cdclk(); + switch (cdclk_freq) { + case 337500: + bclk_m = 16; + bclk_n = 225; + break; + + case 450000: + default: /* default CDCLK 450MHz */ + bclk_m = 4; + bclk_n = 75; + break; + + case 540000: + bclk_m = 4; + bclk_n = 90; + break; + + case 675000: + bclk_m = 8; + bclk_n = 225; + break; + } + + azx_writew(chip, EM4, bclk_m); + azx_writew(chip, EM5, bclk_n); +} + + int hda_i915_init(void) { int err = 0; @@ -55,6 +102,10 @@ int hda_i915_init(void) return -ENODEV; } + get_cdclk = symbol_request(i915_get_cdclk_freq); + if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */ + pr_warn("hda-i915: get_cdclk symbol get fail\n"); + pr_debug("HDA driver get symbol successfully from i915 module\n"); return err; @@ -70,6 +121,10 @@ int hda_i915_exit(void) symbol_put(i915_release_power_well); put_power = NULL; } + if (get_cdclk) { + symbol_put(i915_get_cdclk_freq); + get_cdclk = NULL; + } return 0; } diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h index bfd835f8f1aa..e6072c627583 100644 --- a/sound/pci/hda/hda_i915.h +++ b/sound/pci/hda/hda_i915.h @@ -18,10 +18,12 @@ #ifdef CONFIG_SND_HDA_I915 int hda_display_power(bool enable); +void haswell_set_bclk(struct azx *chip); int hda_i915_init(void); int hda_i915_exit(void); #else static inline int hda_display_power(bool enable) { return 0; } +static inline void haswell_set_bclk(struct azx *chip) { return; } static inline int hda_i915_init(void) { return -ENODEV; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 25753db97071..b6b4e71a0b0b 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -62,9 +62,9 @@ #include <linux/vga_switcheroo.h> #include <linux/firmware.h> #include "hda_codec.h" -#include "hda_i915.h" #include "hda_controller.h" #include "hda_priv.h" +#include "hda_i915.h" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; @@ -288,21 +288,8 @@ static char *driver_short_names[] = { [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; - -/* Intel HSW/BDW display HDA controller Extended Mode registers. - * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display - * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N - * The values will be lost when the display power well is disabled. - */ -#define ICH6_REG_EM4 0x100c -#define ICH6_REG_EM5 0x1010 - struct hda_intel { struct azx chip; - - /* HSW/BDW display HDA controller to restore BCLK from CDCLK */ - unsigned int bclk_m; - unsigned int bclk_n; }; @@ -598,22 +585,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp) #define azx_del_card_list(chip) /* NOP */ #endif /* CONFIG_PM */ -static void haswell_save_bclk(struct azx *chip) -{ - struct hda_intel *hda = container_of(chip, struct hda_intel, chip); - - hda->bclk_m = azx_readw(chip, EM4); - hda->bclk_n = azx_readw(chip, EM5); -} - -static void haswell_restore_bclk(struct azx *chip) -{ - struct hda_intel *hda = container_of(chip, struct hda_intel, chip); - - azx_writew(chip, EM4, hda->bclk_m); - azx_writew(chip, EM5, hda->bclk_n); -} - #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) /* * power management @@ -641,12 +612,6 @@ static int azx_suspend(struct device *dev) chip->irq = -1; } - /* Save BCLK M/N values before they become invalid in D3. - * Will test if display power well can be released now. - */ - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) - haswell_save_bclk(chip); - if (chip->msi) pci_disable_msi(chip->pci); pci_disable_device(pci); @@ -668,7 +633,7 @@ static int azx_resume(struct device *dev) if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { hda_display_power(true); - haswell_restore_bclk(chip); + haswell_set_bclk(chip); } pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); @@ -713,10 +678,9 @@ static int azx_runtime_suspend(struct device *dev) azx_stop_chip(chip); azx_enter_link_reset(chip); azx_clear_irq_pending(chip); - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { - haswell_save_bclk(chip); + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) hda_display_power(false); - } + return 0; } @@ -736,7 +700,7 @@ static int azx_runtime_resume(struct device *dev) if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { hda_display_power(true); - haswell_restore_bclk(chip); + haswell_set_bclk(chip); } /* Read STATESTS before controller reset */ @@ -1426,6 +1390,10 @@ static int azx_first_init(struct azx *chip) /* initialize chip */ azx_init_pci(chip); + + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + haswell_set_bclk(chip); + azx_init_chip(chip, (probe_only[dev] & 2) == 0); /* codec detection */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1c654effcd1a..b60824e90408 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4880,6 +4880,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -5085,6 +5086,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1b, 0x411111f0}, {0x1d, 0x40700001}, {0x1e, 0x411111f0}), + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x13, 0x90a60140}, + {0x14, 0x90170110}, + {0x15, 0x0221401f}, + {0x16, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x411111f0}, + {0x1b, 0x411111f0}, + {0x1d, 0x40700001}, + {0x1e, 0x411111f0}), {} }; diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile index ae5faf9aade2..790c23a9db44 100644 --- a/tools/testing/selftests/cpu-hotplug/Makefile +++ b/tools/testing/selftests/cpu-hotplug/Makefile @@ -1,6 +1,6 @@ all: run_tests: - @/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" + @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" clean: diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index aa290c0de6f5..552f0810bffb 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c @@ -193,6 +193,11 @@ int main(int argc, char **argv) int msg, pid, err; struct msgque_data msgque; + if (getuid() != 0) { + printf("Please run the test as root - Exiting.\n"); + exit(1); + } + msgque.key = ftok(argv[0], 822155650); if (msgque.key == -1) { printf("Can't make key\n"); diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile index 350bfeda3aa8..058c76f5d102 100644 --- a/tools/testing/selftests/memory-hotplug/Makefile +++ b/tools/testing/selftests/memory-hotplug/Makefile @@ -1,6 +1,6 @@ all: run_tests: - @/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" + @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" clean: diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index fe1e66b6ef40..a87e99f37c52 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -116,8 +116,8 @@ static const struct { .header = { .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), .length = cpu_to_le32(sizeof descriptors), - .fs_count = 3, - .hs_count = 3, + .fs_count = cpu_to_le32(3), + .hs_count = cpu_to_le32(3), }, .fs_descs = { .intf = { |