From d8f2ebaac650dc35db3bf5cf10e8ee1115b455f8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 13 Apr 2017 11:41:38 +1000 Subject: sync_file: get rid of internal reference count. sync_file uses the reference count of the file, the internal kref was never getting moved past 1. We can reintroduce this if we decide we need it later. [airlied: fix buildbot warnings] Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie Acked-by: Sumit Semwal Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170413014144.637-2-airlied@gmail.com --- include/linux/sync_file.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 3e3ab84fc4cd..d37beefdfbd5 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -14,7 +14,6 @@ #define _LINUX_SYNC_FILE_H #include -#include #include #include #include @@ -24,7 +23,6 @@ /** * struct sync_file - sync file to export to the userspace * @file: file representing this fence - * @kref: reference count on fence. * @name: name of sync_file. Useful for debugging * @sync_file_list: membership in global file list * @wq: wait queue for fence signaling @@ -33,7 +31,6 @@ */ struct sync_file { struct file *file; - struct kref kref; char name[32]; #ifdef CONFIG_DEBUG_FS struct list_head sync_file_list; -- cgit v1.2.3 From 73c73463189974ace90a05397197339071c6ecc7 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 12 Apr 2017 20:17:45 -0700 Subject: video: ARM CLCD: Move registers to a separate header. We'd like to reuse these register definitions for the DRM CLCD driver, but there's a bunch of fbdev-specific code in the current header. v2: Add #ifndef guard. Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170413031746.12921-1-eric@anholt.net --- include/linux/amba/clcd-regs.h | 81 ++++++++++++++++++++++++++++++++++++++++++ include/linux/amba/clcd.h | 68 +---------------------------------- 2 files changed, 82 insertions(+), 67 deletions(-) create mode 100644 include/linux/amba/clcd-regs.h (limited to 'include/linux') diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h new file mode 100644 index 000000000000..69c0e2143003 --- /dev/null +++ b/include/linux/amba/clcd-regs.h @@ -0,0 +1,81 @@ +/* + * David A Rusling + * + * Copyright (C) 2001 ARM Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef AMBA_CLCD_REGS_H +#define AMBA_CLCD_REGS_H + +/* + * CLCD Controller Internal Register addresses + */ +#define CLCD_TIM0 0x00000000 +#define CLCD_TIM1 0x00000004 +#define CLCD_TIM2 0x00000008 +#define CLCD_TIM3 0x0000000c +#define CLCD_UBAS 0x00000010 +#define CLCD_LBAS 0x00000014 + +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + +#define CLCD_PALL 0x00000200 +#define CLCD_PALETTE 0x00000200 + +#define TIM2_CLKSEL (1 << 5) +#define TIM2_IVS (1 << 11) +#define TIM2_IHS (1 << 12) +#define TIM2_IPC (1 << 13) +#define TIM2_IOE (1 << 14) +#define TIM2_BCD (1 << 26) + +#define CNTL_LCDEN (1 << 0) +#define CNTL_LCDBPP1 (0 << 1) +#define CNTL_LCDBPP2 (1 << 1) +#define CNTL_LCDBPP4 (2 << 1) +#define CNTL_LCDBPP8 (3 << 1) +#define CNTL_LCDBPP16 (4 << 1) +#define CNTL_LCDBPP16_565 (6 << 1) +#define CNTL_LCDBPP16_444 (7 << 1) +#define CNTL_LCDBPP24 (5 << 1) +#define CNTL_LCDBW (1 << 4) +#define CNTL_LCDTFT (1 << 5) +#define CNTL_LCDMONO8 (1 << 6) +#define CNTL_LCDDUAL (1 << 7) +#define CNTL_BGR (1 << 8) +#define CNTL_BEBO (1 << 9) +#define CNTL_BEPO (1 << 10) +#define CNTL_LCDPWR (1 << 11) +#define CNTL_LCDVCOMP(x) ((x) << 12) +#define CNTL_LDMAFIFOTIME (1 << 15) +#define CNTL_WATERMARK (1 << 16) + +/* ST Microelectronics variant bits */ +#define CNTL_ST_1XBPP_444 0x0 +#define CNTL_ST_1XBPP_5551 (1 << 17) +#define CNTL_ST_1XBPP_565 (1 << 18) +#define CNTL_ST_CDWID_12 0x0 +#define CNTL_ST_CDWID_16 (1 << 19) +#define CNTL_ST_CDWID_18 (1 << 20) +#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) +#define CNTL_ST_CEAEN (1 << 21) +#define CNTL_ST_LCDBPP24_PACKED (6 << 1) + +#endif /* AMBA_CLCD_REGS_H */ diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index 1035879b322c..d0c3be77c18e 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h @@ -10,73 +10,7 @@ * for more details. */ #include - -/* - * CLCD Controller Internal Register addresses - */ -#define CLCD_TIM0 0x00000000 -#define CLCD_TIM1 0x00000004 -#define CLCD_TIM2 0x00000008 -#define CLCD_TIM3 0x0000000c -#define CLCD_UBAS 0x00000010 -#define CLCD_LBAS 0x00000014 - -#define CLCD_PL110_IENB 0x00000018 -#define CLCD_PL110_CNTL 0x0000001c -#define CLCD_PL110_STAT 0x00000020 -#define CLCD_PL110_INTR 0x00000024 -#define CLCD_PL110_UCUR 0x00000028 -#define CLCD_PL110_LCUR 0x0000002C - -#define CLCD_PL111_CNTL 0x00000018 -#define CLCD_PL111_IENB 0x0000001c -#define CLCD_PL111_RIS 0x00000020 -#define CLCD_PL111_MIS 0x00000024 -#define CLCD_PL111_ICR 0x00000028 -#define CLCD_PL111_UCUR 0x0000002c -#define CLCD_PL111_LCUR 0x00000030 - -#define CLCD_PALL 0x00000200 -#define CLCD_PALETTE 0x00000200 - -#define TIM2_CLKSEL (1 << 5) -#define TIM2_IVS (1 << 11) -#define TIM2_IHS (1 << 12) -#define TIM2_IPC (1 << 13) -#define TIM2_IOE (1 << 14) -#define TIM2_BCD (1 << 26) - -#define CNTL_LCDEN (1 << 0) -#define CNTL_LCDBPP1 (0 << 1) -#define CNTL_LCDBPP2 (1 << 1) -#define CNTL_LCDBPP4 (2 << 1) -#define CNTL_LCDBPP8 (3 << 1) -#define CNTL_LCDBPP16 (4 << 1) -#define CNTL_LCDBPP16_565 (6 << 1) -#define CNTL_LCDBPP16_444 (7 << 1) -#define CNTL_LCDBPP24 (5 << 1) -#define CNTL_LCDBW (1 << 4) -#define CNTL_LCDTFT (1 << 5) -#define CNTL_LCDMONO8 (1 << 6) -#define CNTL_LCDDUAL (1 << 7) -#define CNTL_BGR (1 << 8) -#define CNTL_BEBO (1 << 9) -#define CNTL_BEPO (1 << 10) -#define CNTL_LCDPWR (1 << 11) -#define CNTL_LCDVCOMP(x) ((x) << 12) -#define CNTL_LDMAFIFOTIME (1 << 15) -#define CNTL_WATERMARK (1 << 16) - -/* ST Microelectronics variant bits */ -#define CNTL_ST_1XBPP_444 0x0 -#define CNTL_ST_1XBPP_5551 (1 << 17) -#define CNTL_ST_1XBPP_565 (1 << 18) -#define CNTL_ST_CDWID_12 0x0 -#define CNTL_ST_CDWID_16 (1 << 19) -#define CNTL_ST_CDWID_18 (1 << 20) -#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20)) -#define CNTL_ST_CEAEN (1 << 21) -#define CNTL_ST_LCDBPP24_PACKED (6 << 1) +#include enum { /* individual formats */ -- cgit v1.2.3 From 33c35aa4817864e056fd772230b0c6b552e36ea2 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 15 May 2017 09:34:06 -0400 Subject: cgroup: Prevent kill_css() from being called more than once The kill_css() function may be called more than once under the condition that the css was killed but not physically removed yet followed by the removal of the cgroup that is hosting the css. This patch prevents any harmm from being done when that happens. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org # v4.5+ --- include/linux/cgroup-defs.h | 1 + kernel/cgroup/cgroup.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 21745946cae1..ec47101cb1bf 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -48,6 +48,7 @@ enum { CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ + CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c3c9a0e1b3c9..8d4e85eae42c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css) { lockdep_assert_held(&cgroup_mutex); + if (css->flags & CSS_DYING) + return; + + css->flags |= CSS_DYING; + /* * This must happen before css is disassociated with its cgroup. * See seq_css() for details. -- cgit v1.2.3 From 032838f9cb4014af8a974374db9e2ce6f3aa8d3b Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 8 May 2017 12:33:48 -0700 Subject: drm/pl111: Register the clock divider and use it. This is required for the panel to work on bcm911360, where CLCDCLK is the fixed 200Mhz AXI41 clock. The rate set is still passed up to the CLCDCLK, for platforms that have a settable rate on that one. v2: Set SET_RATE_PARENT (caught by Linus Walleij), depend on COMMON_CLK. v3: Mark the clk_ops static (caught by Stephen). Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170508193348.30236-1-eric@anholt.net Reviewed-by: Linus Walleij Reviewed-by: Stephen Boyd --- drivers/gpu/drm/pl111/Kconfig | 1 + drivers/gpu/drm/pl111/pl111_display.c | 162 ++++++++++++++++++++++++++++++---- drivers/gpu/drm/pl111/pl111_drm.h | 8 ++ drivers/gpu/drm/pl111/pl111_drv.c | 11 +-- include/linux/amba/clcd-regs.h | 5 ++ 5 files changed, 163 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig index ede49efd531f..309f4fd52de7 100644 --- a/drivers/gpu/drm/pl111/Kconfig +++ b/drivers/gpu/drm/pl111/Kconfig @@ -2,6 +2,7 @@ config DRM_PL111 tristate "DRM Support for PL111 CLCD Controller" depends on DRM depends on ARM || ARM64 || COMPILE_TEST + depends on COMMON_CLK select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 39a5c33bce7d..fbf8fbec6c16 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -108,7 +108,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe, u32 cntl; u32 ppl, hsw, hfp, hbp; u32 lpp, vsw, vfp, vbp; - u32 cpl; + u32 cpl, tim2; int ret; ret = clk_set_rate(priv->clk, mode->clock * 1000); @@ -142,20 +142,28 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe, (vfp << 16) | (vbp << 24), priv->regs + CLCD_TIM1); - /* XXX: We currently always use CLCDCLK with no divisor. We - * could probably reduce power consumption by using HCLK - * (apb_pclk) with a divisor when it gets us near our target - * pixel clock. - */ - writel(((mode->flags & DRM_MODE_FLAG_NHSYNC) ? TIM2_IHS : 0) | - ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? TIM2_IVS : 0) | - ((connector->display_info.bus_flags & - DRM_BUS_FLAG_DE_LOW) ? TIM2_IOE : 0) | - ((connector->display_info.bus_flags & - DRM_BUS_FLAG_PIXDATA_NEGEDGE) ? TIM2_IPC : 0) | - TIM2_BCD | - (cpl << 16), - priv->regs + CLCD_TIM2); + + spin_lock(&priv->tim2_lock); + + tim2 = readl(priv->regs + CLCD_TIM2); + tim2 &= (TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK); + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + tim2 |= TIM2_IHS; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + tim2 |= TIM2_IVS; + + if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW) + tim2 |= TIM2_IOE; + + if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + tim2 |= TIM2_IPC; + + tim2 |= cpl << 16; + writel(tim2, priv->regs + CLCD_TIM2); + spin_unlock(&priv->tim2_lock); + writel(0, priv->regs + CLCD_TIM3); drm_panel_prepare(priv->connector.panel); @@ -288,6 +296,126 @@ const struct drm_simple_display_pipe_funcs pl111_display_funcs = { .prepare_fb = pl111_display_prepare_fb, }; +static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, bool set_parent) +{ + int best_div = 1, div; + struct clk_hw *parent = clk_hw_get_parent(hw); + unsigned long best_prate = 0; + unsigned long best_diff = ~0ul; + int max_div = (1 << (TIM2_PCD_LO_BITS + TIM2_PCD_HI_BITS)) - 1; + + for (div = 1; div < max_div; div++) { + unsigned long this_prate, div_rate, diff; + + if (set_parent) + this_prate = clk_hw_round_rate(parent, rate * div); + else + this_prate = *prate; + div_rate = DIV_ROUND_UP_ULL(this_prate, div); + diff = abs(rate - div_rate); + + if (diff < best_diff) { + best_div = div; + best_diff = diff; + best_prate = this_prate; + } + } + + *prate = best_prate; + return best_div; +} + +static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int div = pl111_clk_div_choose_div(hw, rate, prate, true); + + return DIV_ROUND_UP_ULL(*prate, div); +} + +static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw, + unsigned long prate) +{ + struct pl111_drm_dev_private *priv = + container_of(hw, struct pl111_drm_dev_private, clk_div); + u32 tim2 = readl(priv->regs + CLCD_TIM2); + int div; + + if (tim2 & TIM2_BCD) + return prate; + + div = tim2 & TIM2_PCD_LO_MASK; + div |= (tim2 & TIM2_PCD_HI_MASK) >> + (TIM2_PCD_HI_SHIFT - TIM2_PCD_LO_BITS); + div += 2; + + return DIV_ROUND_UP_ULL(prate, div); +} + +static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct pl111_drm_dev_private *priv = + container_of(hw, struct pl111_drm_dev_private, clk_div); + int div = pl111_clk_div_choose_div(hw, rate, &prate, false); + u32 tim2; + + spin_lock(&priv->tim2_lock); + tim2 = readl(priv->regs + CLCD_TIM2); + tim2 &= ~(TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK); + + if (div == 1) { + tim2 |= TIM2_BCD; + } else { + div -= 2; + tim2 |= div & TIM2_PCD_LO_MASK; + tim2 |= (div >> TIM2_PCD_LO_BITS) << TIM2_PCD_HI_SHIFT; + } + + writel(tim2, priv->regs + CLCD_TIM2); + spin_unlock(&priv->tim2_lock); + + return 0; +} + +static const struct clk_ops pl111_clk_div_ops = { + .recalc_rate = pl111_clk_div_recalc_rate, + .round_rate = pl111_clk_div_round_rate, + .set_rate = pl111_clk_div_set_rate, +}; + +static int +pl111_init_clock_divider(struct drm_device *drm) +{ + struct pl111_drm_dev_private *priv = drm->dev_private; + struct clk *parent = devm_clk_get(drm->dev, "clcdclk"); + struct clk_hw *div = &priv->clk_div; + const char *parent_name; + struct clk_init_data init = { + .name = "pl111_div", + .ops = &pl111_clk_div_ops, + .parent_names = &parent_name, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }; + int ret; + + if (IS_ERR(parent)) { + dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); + return PTR_ERR(parent); + } + parent_name = __clk_get_name(parent); + + spin_lock_init(&priv->tim2_lock); + div->init = &init; + + ret = devm_clk_hw_register(drm->dev, div); + + priv->clk = div->clk; + return ret; +} + int pl111_display_init(struct drm_device *drm) { struct pl111_drm_dev_private *priv = drm->dev_private; @@ -333,6 +461,10 @@ int pl111_display_init(struct drm_device *drm) return -EINVAL; } + ret = pl111_init_clock_divider(drm); + if (ret) + return ret; + ret = drm_simple_display_pipe_init(drm, &priv->pipe, &pl111_display_funcs, formats, ARRAY_SIZE(formats), diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h index f381593921b7..4162c6aa5dbb 100644 --- a/drivers/gpu/drm/pl111/pl111_drm.h +++ b/drivers/gpu/drm/pl111/pl111_drm.h @@ -21,6 +21,7 @@ #include #include +#include #define CLCD_IRQ_NEXTBASE_UPDATE BIT(2) @@ -37,7 +38,14 @@ struct pl111_drm_dev_private { struct drm_fbdev_cma *fbdev; void *regs; + /* The pixel clock (a reference to our clock divider off of CLCDCLK). */ struct clk *clk; + /* pl111's internal clock divider. */ + struct clk_hw clk_div; + /* Lock to sync access to CLCD_TIM2 between the common clock + * subsystem and pl111_display_enable(). + */ + spinlock_t tim2_lock; }; #define to_pl111_connector(x) \ diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 936403f65508..9d1467492cb9 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -50,8 +50,8 @@ * - Read back hardware state at boot to skip reprogramming the * hardware when doing a no-op modeset. * - * - Use the internal clock divisor to reduce power consumption by - * using HCLK (apb_pclk) when appropriate. + * - Use the CLKSEL bit to support switching between the two external + * clock parents. */ #include @@ -195,13 +195,6 @@ static int pl111_amba_probe(struct amba_device *amba_dev, priv->drm = drm; drm->dev_private = priv; - priv->clk = devm_clk_get(dev, "clcdclk"); - if (IS_ERR(priv->clk)) { - dev_err(dev, "CLCD: unable to get clk.\n"); - ret = PTR_ERR(priv->clk); - goto dev_unref; - } - priv->regs = devm_ioremap_resource(dev, &amba_dev->res); if (!priv->regs) { dev_err(dev, "%s failed mmio\n", __func__); diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h index 69c0e2143003..516a6fda83c5 100644 --- a/include/linux/amba/clcd-regs.h +++ b/include/linux/amba/clcd-regs.h @@ -39,12 +39,17 @@ #define CLCD_PALL 0x00000200 #define CLCD_PALETTE 0x00000200 +#define TIM2_PCD_LO_MASK GENMASK(4, 0) +#define TIM2_PCD_LO_BITS 5 #define TIM2_CLKSEL (1 << 5) #define TIM2_IVS (1 << 11) #define TIM2_IHS (1 << 12) #define TIM2_IPC (1 << 13) #define TIM2_IOE (1 << 14) #define TIM2_BCD (1 << 26) +#define TIM2_PCD_HI_MASK GENMASK(31, 27) +#define TIM2_PCD_HI_BITS 5 +#define TIM2_PCD_HI_SHIFT 27 #define CNTL_LCDEN (1 << 0) #define CNTL_LCDBPP1 (0 << 1) -- cgit v1.2.3 From 28232a4317be7ad615f0f1b69dc8583fd580a8e3 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 20 May 2017 14:12:34 +0200 Subject: KVM: arm/arm64: Fix isues with GICv2 on GICv3 migration We have been a little loose with our intermediate VMCR representation where we had a 'ctlr' field, but we failed to differentiate between the GICv2 GICC_CTLR and ICC_CTLR_EL1 layouts, and therefore ended up mapping the wrong bits into the individual fields of the ICH_VMCR_EL2 when emulating a GICv2 on a GICv3 system. Fix this by using explicit fields for the VMCR bits instead. Cc: Eric Auger Reported-by: wanghaibin Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier Tested-by: Marc Zyngier --- arch/arm64/kvm/vgic-sys-reg-v3.c | 10 ++++---- include/linux/irqchip/arm-gic-v3.h | 4 ++++ include/linux/irqchip/arm-gic.h | 28 ++++++++++++++++++++--- virt/kvm/arm/vgic/vgic-mmio-v2.c | 16 +++++++++++-- virt/kvm/arm/vgic/vgic-v2.c | 28 ++++++++++++++++++++--- virt/kvm/arm/vgic/vgic-v3.c | 47 ++++++++++++++++++++++++++------------ virt/kvm/arm/vgic/vgic.h | 12 ++++++---- 7 files changed, 114 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c index 79f37e37d367..6260b69e5622 100644 --- a/arch/arm64/kvm/vgic-sys-reg-v3.c +++ b/arch/arm64/kvm/vgic-sys-reg-v3.c @@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. * The vgic_set_vmcr() will convert to ICH_VMCR layout. */ - vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; - vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; + vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; + vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; vgic_set_vmcr(vcpu, &vmcr); } else { val = 0; @@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. * Extract it directly using ICC_CTLR_EL1 reg definitions. */ - val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; - val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; + val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; + val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; p->regval = val; } @@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, p->regval = 0; vgic_get_vmcr(vcpu, &vmcr); - if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { + if (!vmcr.cbpr) { if (p->is_write) { vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> ICC_BPR1_EL1_SHIFT; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index fffb91202bc9..1fa293a37f4a 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -417,6 +417,10 @@ #define ICH_HCR_EN (1 << 0) #define ICH_HCR_UIE (1 << 1) +#define ICH_VMCR_ACK_CTL_SHIFT 2 +#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) +#define ICH_VMCR_FIQ_EN_SHIFT 3 +#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) #define ICH_VMCR_CBPR_SHIFT 4 #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) #define ICH_VMCR_EOIM_SHIFT 9 diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index dc30f3d057eb..d3453ee072fc 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -25,7 +25,18 @@ #define GICC_ENABLE 0x1 #define GICC_INT_PRI_THRESHOLD 0xf0 -#define GIC_CPU_CTRL_EOImodeNS (1 << 9) +#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0 +#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT) +#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1 +#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT) +#define GIC_CPU_CTRL_AckCtl_SHIFT 2 +#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT) +#define GIC_CPU_CTRL_FIQEn_SHIFT 3 +#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT) +#define GIC_CPU_CTRL_CBPR_SHIFT 4 +#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT) +#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9 +#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT) #define GICC_IAR_INT_ID_MASK 0x3ff #define GICC_INT_SPURIOUS 1023 @@ -84,8 +95,19 @@ #define GICH_LR_EOI (1 << 19) #define GICH_LR_HW (1 << 31) -#define GICH_VMCR_CTRL_SHIFT 0 -#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) +#define GICH_VMCR_ENABLE_GRP0_SHIFT 0 +#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT) +#define GICH_VMCR_ENABLE_GRP1_SHIFT 1 +#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT) +#define GICH_VMCR_ACK_CTL_SHIFT 2 +#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT) +#define GICH_VMCR_FIQ_EN_SHIFT 3 +#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT) +#define GICH_VMCR_CBPR_SHIFT 4 +#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT) +#define GICH_VMCR_EOI_MODE_SHIFT 9 +#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT) + #define GICH_VMCR_PRIMASK_SHIFT 27 #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) #define GICH_VMCR_BINPOINT_SHIFT 21 diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 0a4283ed9aa7..63e0bbdcddcc 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, switch (addr & 0xff) { case GIC_CPU_CTRL: - val = vmcr.ctlr; + val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT; + val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT; + val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT; + val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT; + val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT; + val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT; + break; case GIC_CPU_PRIMASK: /* @@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, switch (addr & 0xff) { case GIC_CPU_CTRL: - vmcr.ctlr = val; + vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0); + vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1); + vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl); + vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn); + vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR); + vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS); + break; case GIC_CPU_PRIMASK: /* diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 504b4bd0d651..e4187e52bb26 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; u32 vmcr; - vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; + vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & + GICH_VMCR_ENABLE_GRP0_MASK; + vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & + GICH_VMCR_ENABLE_GRP1_MASK; + vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & + GICH_VMCR_ACK_CTL_MASK; + vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & + GICH_VMCR_FIQ_EN_MASK; + vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & + GICH_VMCR_CBPR_MASK; + vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & + GICH_VMCR_EOI_MODE_MASK; vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & @@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) vmcr = cpu_if->vgic_vmcr; - vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> - GICH_VMCR_CTRL_SHIFT; + vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> + GICH_VMCR_ENABLE_GRP0_SHIFT; + vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> + GICH_VMCR_ENABLE_GRP1_SHIFT; + vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> + GICH_VMCR_ACK_CTL_SHIFT; + vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> + GICH_VMCR_FIQ_EN_SHIFT; + vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> + GICH_VMCR_CBPR_SHIFT; + vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> + GICH_VMCR_EOI_MODE_SHIFT; + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 6fe3f003636a..030248e669f6 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 vmcr; - /* - * Ignore the FIQen bit, because GIC emulation always implies - * SRE=1 which means the vFIQEn bit is also RES1. - */ - vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << - ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; - vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; + if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { + vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & + ICH_VMCR_ACK_CTL_MASK; + vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & + ICH_VMCR_FIQ_EN_MASK; + } else { + /* + * When emulating GICv3 on GICv3 with SRE=1 on the + * VFIQEn bit is RES1 and the VAckCtl bit is RES0. + */ + vmcr = ICH_VMCR_FIQ_EN_MASK; + } + + vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; + vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; @@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 vmcr; vmcr = cpu_if->vgic_vmcr; - /* - * Ignore the FIQen bit, because GIC emulation always implies - * SRE=1 which means the vFIQEn bit is also RES1. - */ - vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << - ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; - vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; + if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { + vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> + ICH_VMCR_ACK_CTL_SHIFT; + vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> + ICH_VMCR_FIQ_EN_SHIFT; + } else { + /* + * When emulating GICv3 on GICv3 with SRE=1 on the + * VFIQEn bit is RES1 and the VAckCtl bit is RES0. + */ + vmcrp->fiqen = 1; + vmcrp->ackctl = 0; + } + + vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; + vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index da83e4caa272..bba7fa22a7f7 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) * registers regardless of the hardware backed GIC used. */ struct vgic_vmcr { - u32 ctlr; + u32 grpen0; + u32 grpen1; + + u32 ackctl; + u32 fiqen; + u32 cbpr; + u32 eoim; + u32 abpr; u32 bpr; u32 pmr; /* Priority mask field in the GICC_PMR and * ICC_PMR_EL1 priority field format */ - /* Below member variable are valid only for GICv3 */ - u32 grpen0; - u32 grpen1; }; struct vgic_reg_attr { -- cgit v1.2.3 From 71ebc9a3795818eab52e81bbcbdfae130ee35d9e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 May 2017 12:10:42 +0100 Subject: dma-buf/sync-file: Defer creation of sync_file->name Constructing the name takes the majority of the time for allocating a sync_file to wrap a fence, and the name is very rarely used (only via the sync_file status user interface). To reduce the impact on the common path (that of creating sync_file to pass around), defer the construction of the name until it is first used. v2: Update kerneldoc (kbuild test robot) v3: sync_debug.c was peeking at the name v4: Comment upon the potential race between two users of sync_file_get_name() and claim that such a race is below the level of notice. However, to prevent any future nuisance, use a global spinlock to serialize the assignment of the name. v5: Completely avoid the read/write race by only storing the name passed in from the user inside sync_file->user_name and passing in a buffer to dynamically construct the name otherwise. Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Gustavo Padovan Cc: Daniel Vetter Cc: David Herrmann Reviewed-by: Daniel Vetter Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170516111042.24719-1-chris@chris-wilson.co.uk --- drivers/dma-buf/sync_debug.c | 4 +++- drivers/dma-buf/sync_file.c | 39 ++++++++++++++++++++++++++++++++------- include/linux/sync_file.h | 11 +++++++++-- 3 files changed, 44 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index a0d780ab68c3..82a6e7f6d37f 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -132,9 +132,11 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) static void sync_print_sync_file(struct seq_file *s, struct sync_file *sync_file) { + char buf[128]; int i; - seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, + seq_printf(s, "[%p] %s: %s\n", sync_file, + sync_file_get_name(sync_file, buf, sizeof(buf)), sync_status_str(dma_fence_get_status(sync_file->fence))); if (dma_fence_is_array(sync_file->fence)) { diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index dc89b1d484e8..545e2c5c4815 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -80,11 +80,6 @@ struct sync_file *sync_file_create(struct dma_fence *fence) sync_file->fence = dma_fence_get(fence); - snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), fence->context, - fence->seqno); - return sync_file; } EXPORT_SYMBOL(sync_file_create); @@ -129,6 +124,36 @@ struct dma_fence *sync_file_get_fence(int fd) } EXPORT_SYMBOL(sync_file_get_fence); +/** + * sync_file_get_name - get the name of the sync_file + * @sync_file: sync_file to get the fence from + * @buf: destination buffer to copy sync_file name into + * @len: available size of destination buffer. + * + * Each sync_file may have a name assigned either by the user (when merging + * sync_files together) or created from the fence it contains. In the latter + * case construction of the name is deferred until use, and so requires + * sync_file_get_name(). + * + * Returns: a string representing the name. + */ +char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) +{ + if (sync_file->user_name[0]) { + strlcpy(buf, sync_file->user_name, len); + } else { + struct dma_fence *fence = sync_file->fence; + + snprintf(buf, len, "%s-%s%llu-%d", + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), + fence->context, + fence->seqno); + } + + return buf; +} + static int sync_file_set_fence(struct sync_file *sync_file, struct dma_fence **fences, int num_fences) { @@ -266,7 +291,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, goto err; } - strlcpy(sync_file->name, name, sizeof(sync_file->name)); + strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: @@ -413,7 +438,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, } no_fences: - strlcpy(info.name, sync_file->name, sizeof(info.name)); + sync_file_get_name(sync_file, info.name, sizeof(info.name)); info.status = dma_fence_is_signaled(sync_file->fence); info.num_fences = num_fences; diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index d37beefdfbd5..5726107963b2 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -23,7 +23,6 @@ /** * struct sync_file - sync file to export to the userspace * @file: file representing this fence - * @name: name of sync_file. Useful for debugging * @sync_file_list: membership in global file list * @wq: wait queue for fence signaling * @fence: fence with the fences in the sync_file @@ -31,7 +30,14 @@ */ struct sync_file { struct file *file; - char name[32]; + /** + * @user_name: + * + * Name of the sync file provided by userspace, for merged fences. + * Otherwise generated through driver callbacks (in which case the + * entire array is 0). + */ + char user_name[32]; #ifdef CONFIG_DEBUG_FS struct list_head sync_file_list; #endif @@ -46,5 +52,6 @@ struct sync_file { struct sync_file *sync_file_create(struct dma_fence *fence); struct dma_fence *sync_file_get_fence(int fd); +char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len); #endif /* _LINUX_SYNC_H */ -- cgit v1.2.3 From 41c25707d21716826e3c1f60967f5550610ec1c9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 May 2017 12:03:48 -0400 Subject: cpuset: consider dying css as offline In most cases, a cgroup controller don't care about the liftimes of cgroups. For the controller, a css becomes online when ->css_online() is called on it and offline when ->css_offline() is called. However, cpuset is special in that the user interface it exposes cares whether certain cgroups exist or not. Combined with the RCU delay between cgroup removal and css offlining, this can lead to user visible behavior oddities where operations which should succeed after cgroup removals fail for some time period. The effects of cgroup removals are delayed when seen from userland. This patch adds css_is_dying() which tests whether offline is pending and updates is_cpuset_online() so that the function returns false also while offline is pending. This gets rid of the userland visible delays. Signed-off-by: Tejun Heo Reported-by: Daniel Jordan Link: http://lkml.kernel.org/r/327ca1f5-7957-fbb9-9e5f-9ba149d40ba2@oracle.com Cc: stable@vger.kernel.org Signed-off-by: Tejun Heo --- include/linux/cgroup.h | 20 ++++++++++++++++++++ kernel/cgroup/cpuset.c | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed2573e149fa..710a005c6b7a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) return true; } +/** + * css_is_dying - test whether the specified css is dying + * @css: target css + * + * Test whether @css is in the process of offlining or already offline. In + * most cases, ->css_online() and ->css_offline() callbacks should be + * enough; however, the actual offline operations are RCU delayed and this + * test returns %true also when @css is scheduled to be offlined. + * + * This is useful, for example, when the use case requires synchronous + * behavior with respect to cgroup removal. cgroup removal schedules css + * offlining but the css can seem alive while the operation is being + * delayed. If the delay affects user visible semantics, this test can be + * used to resolve the situation. + */ +static inline bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + /** * css_put - put a css reference * @css: target css diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index f6501f4f6040..ae643412948a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -176,9 +176,9 @@ typedef enum { } cpuset_flagbits_t; /* convenient tests for these bits */ -static inline bool is_cpuset_online(const struct cpuset *cs) +static inline bool is_cpuset_online(struct cpuset *cs) { - return test_bit(CS_ONLINE, &cs->flags); + return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); } static inline int is_cpu_exclusive(const struct cpuset *cs) -- cgit v1.2.3 From b8cb5a545c3dd8b975aad19ea020eabe0a888e8d Mon Sep 17 00:00:00 2001 From: Tahsin Erdogan Date: Wed, 24 May 2017 18:24:07 -0400 Subject: ext4: fix quota charging for shared xattr blocks ext4_xattr_block_set() calls dquot_alloc_block() to charge for an xattr block when new references are made. However if dquot_initialize() hasn't been called on an inode, request for charging is effectively ignored because ext4_inode_info->i_dquot is not initialized yet. Add dquot_initialize() to call paths that lead to ext4_xattr_block_set(). Signed-off-by: Tahsin Erdogan Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara --- fs/ext4/acl.c | 4 ++++ fs/ext4/super.c | 3 +++ fs/ext4/xattr.c | 8 ++++++++ fs/quota/dquot.c | 16 ++++++++++++++++ include/linux/quotaops.h | 6 ++++++ 5 files changed, 37 insertions(+) (limited to 'include/linux') diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index fd389935ecd1..3ec0e46de95f 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -4,6 +4,7 @@ * Copyright (C) 2001-2003 Andreas Gruenbacher, */ +#include #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" @@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type) handle_t *handle; int error, retries = 0; + error = dquot_initialize(inode); + if (error) + return error; retry: handle = ext4_journal_start(inode, EXT4_HT_XATTR, ext4_jbd2_credits_xattr(inode)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8f05c72bc4ec..d37c81f327e7 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1174,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, return res; } + res = dquot_initialize(inode); + if (res) + return res; retry: handle = ext4_journal_start(inode, EXT4_HT_MISC, ext4_jbd2_credits_xattr(inode)); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 8fb7ce14e6eb..5d3c2536641c 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -888,6 +888,8 @@ inserted: else { u32 ref; + WARN_ON_ONCE(dquot_initialize_needed(inode)); + /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, @@ -954,6 +956,8 @@ inserted: /* We need to allocate a new block */ ext4_fsblk_t goal, block; + WARN_ON_ONCE(dquot_initialize_needed(inode)); + goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); @@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, return -EINVAL; if (strlen(name) > 255) return -ERANGE; + ext4_write_lock_xattr(inode, &no_expand); error = ext4_reserve_inode_write(handle, inode, &is.iloc); @@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name, int error, retries = 0; int credits = ext4_jbd2_credits_xattr(inode); + error = dquot_initialize(inode); + if (error) + return error; retry: handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); if (IS_ERR(handle)) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index ebf80c7739e1..48813aeaab80 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode) } EXPORT_SYMBOL(dquot_initialize); +bool dquot_initialize_needed(struct inode *inode) +{ + struct dquot **dquots; + int i; + + if (!dquot_active(inode)) + return false; + + dquots = i_dquot(inode); + for (i = 0; i < MAXQUOTAS; i++) + if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) + return true; + return false; +} +EXPORT_SYMBOL(dquot_initialize_needed); + /* * Release all quotas referenced by inode. * diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 9c6f768b7d32..dda22f45fc1b 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number); void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); int dquot_initialize(struct inode *inode); +bool dquot_initialize_needed(struct inode *inode); void dquot_drop(struct inode *inode); struct dquot *dqget(struct super_block *sb, struct kqid qid); static inline struct dquot *dqgrab(struct dquot *dquot) @@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode) return 0; } +static inline bool dquot_initialize_needed(struct inode *inode) +{ + return false; +} + static inline void dquot_drop(struct inode *inode) { } -- cgit v1.2.3 From 4b1c88984c8ac894c2c411570757bed7fa5f3226 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 May 2017 15:13:57 +0200 Subject: iommu/dma: Fix function declaration Newly added code in the ipmmu-vmsa driver showed a small mistake in a header file that can't be included by itself without CONFIG_IOMMU_DMA enabled: In file included from drivers/iommu/ipmmu-vmsa.c:13:0: include/linux/dma-iommu.h:105:94: error: 'struct device' declared inside parameter list will not be visible outside of this definition or declaration [-Werror] This adds a forward declaration for 'struct device', similar to how we treat the other struct types in this case. Fixes: 3ae47292024f ("iommu/ipmmu-vmsa: Add new IOMMU_DOMAIN_DMA ops") Fixes: 273df9635385 ("iommu/dma: Make PCI window reservation generic") Signed-off-by: Arnd Bergmann Acked-by: Robin Murphy Signed-off-by: Joerg Roedel --- include/linux/dma-iommu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 4eac2670bfa1..92f20832fd28 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); struct iommu_domain; struct msi_msg; +struct device; static inline int iommu_dma_init(void) { -- cgit v1.2.3 From 278cba7eaf5422510fc4a6b5a4d447f17b00506e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 14 Dec 2016 03:35:03 +0200 Subject: drm: omapdrm: Remove unused default display name support The default display name is both unused and never set by platform data. Remove default display name module parameter, platform data field and runtime infrastructure. Signed-off-by: Laurent Pinchart Acked-by: Bartlomiej Zolnierkiewicz Reviewed-by: Tomi Valkeinen Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/core.c | 18 ------------------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 - drivers/video/fbdev/omap2/omapfb/dss/core.c | 2 -- include/linux/platform_data/omapdss.h | 1 - 4 files changed, 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 3351ce23f413..bc4fa4ec8557 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -41,20 +41,8 @@ static struct { struct platform_device *pdev; - - const char *default_display_name; } core; -static char *def_disp_name; -module_param_named(def_disp, def_disp_name, charp, 0); -MODULE_PARM_DESC(def_disp, "default display name"); - -const char *omapdss_get_default_display_name(void) -{ - return core.default_display_name; -} -EXPORT_SYMBOL(omapdss_get_default_display_name); - enum omapdss_version omapdss_get_version(void) { struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; @@ -175,7 +163,6 @@ static void dss_disable_all_devices(void) static int __init omap_dss_probe(struct platform_device *pdev) { - struct omap_dss_board_info *pdata = pdev->dev.platform_data; int r; core.pdev = pdev; @@ -186,11 +173,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) if (r) goto err_debugfs; - if (def_disp_name) - core.default_display_name = def_disp_name; - else if (pdata->default_display_name) - core.default_display_name = pdata->default_display_name; - return 0; err_debugfs: diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index b19dae1fd6c5..3bee528380cd 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -781,7 +781,6 @@ void omap_dss_put_device(struct omap_dss_device *dssdev); struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); struct omap_dss_device *omap_dss_find_device(void *data, int (*match)(struct omap_dss_device *dssdev, void *data)); -const char *omapdss_get_default_display_name(void); int dss_feat_get_num_mgrs(void); int dss_feat_get_num_ovls(void); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index 29de4827589d..eecf695c16f4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -206,8 +206,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) if (def_disp_name) core.default_display_name = def_disp_name; - else if (pdata->default_display_name) - core.default_display_name = pdata->default_display_name; register_pm_notifier(&omap_dss_pm_notif_block); diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h index 679177929045..7feb011ed500 100644 --- a/include/linux/platform_data/omapdss.h +++ b/include/linux/platform_data/omapdss.h @@ -27,7 +27,6 @@ enum omapdss_version { /* Board specific data */ struct omap_dss_board_info { - const char *default_display_name; int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); int (*set_min_bus_tput)(struct device *dev, unsigned long r); -- cgit v1.2.3 From 6dc06c08bef1c746ff8da33dab677cfbacdcad32 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Sun, 4 Jun 2017 14:30:07 +0300 Subject: net/mlx4: Fix the check in attaching steering rules Our previous patch (cited below) introduced a regression for RAW Eth QPs. Fix it by checking if the QP number provided by user-space exists, hence allowing steering rules to be added for valid QPs only. Fixes: 89c557687a32 ("net/mlx4_en: Avoid adding steering rules with invalid ring") Reported-by: Or Gerlitz Signed-off-by: Talat Batheesh Signed-off-by: Tariq Toukan Acked-by: Or Gerlitz Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 5 ----- drivers/net/ethernet/mellanox/mlx4/mcg.c | 15 +++++++++++---- drivers/net/ethernet/mellanox/mlx4/qp.c | 13 +++++++++++++ include/linux/mlx4/qp.h | 1 + 4 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae5fdc2df654..ffbcb27c05e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev, qpn = priv->drop_qp.qpn; else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); - if (qpn < priv->rss_map.base_qpn || - qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { - en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); - return -EINVAL; - } } else { if (cmd->fs.ring_cookie >= priv->rx_ring_num) { en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1a670b681555..0710b3677464 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -35,6 +35,7 @@ #include #include +#include #include #include "mlx4.h" @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (!mlx4_qp_lookup(dev, rule->qpn)) { + mlx4_err_rule(dev, "QP doesn't exist\n", rule); + ret = -EINVAL; + goto out; + } + trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); list_for_each_entry(cur, &rule->list, list) { ret = parse_trans_rule(dev, cur, mailbox->buf + size); - if (ret < 0) { - mlx4_free_cmd_mailbox(dev, mailbox); - return ret; - } + if (ret < 0) + goto out; + size += ret; } @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, } } +out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2d6abd4662b1..ad92d2311478 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) __mlx4_qp_free_icm(dev, qpn); } +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + + spin_unlock(&qp_table->lock); + return qp; +} + int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index b4ee8f62ce8d..8e2828d48d7f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -470,6 +470,7 @@ struct mlx4_update_qp_params { u16 rate_val; }; +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); -- cgit v1.2.3 From 9bd2bbc01d17ddd567cc0f81f77fe1163e497462 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 2 Jun 2017 20:35:51 -0700 Subject: elevator: fix truncation of icq_cache_name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc 7.1 reports the following warning: block/elevator.c: In function ‘elv_register’: block/elevator.c:898:5: warning: ‘snprintf’ output may be truncated before the last format character [-Wformat-truncation=] "%s_io_cq", e->elevator_name); ^~~~~~~~~~ block/elevator.c:897:3: note: ‘snprintf’ output between 7 and 22 bytes into a destination of size 21 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ "%s_io_cq", e->elevator_name); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The bug is that the name of the icq_cache is 6 characters longer than the elevator name, but only ELV_NAME_MAX + 5 characters were reserved for it --- so in the case of a maximum-length elevator name, the 'q' character in "_io_cq" would be truncated by snprintf(). Fix it by reserving ELV_NAME_MAX + 6 characters instead. Signed-off-by: Eric Biggers Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/elevator.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 9ec5e22846e0..0e306c5a86d6 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -153,7 +153,7 @@ struct elevator_type #endif /* managed by elevator core */ - char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ + char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; }; -- cgit v1.2.3 From abb2ea7dfd82451d85ce669b811310c05ab5ca46 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 6 Jun 2017 13:36:24 -0700 Subject: compiler, clang: suppress warning for unused static inline functions GCC explicitly does not warn for unused static inline functions for -Wunused-function. The manual states: Warn whenever a static function is declared but not defined or a non-inline static function is unused. Clang does warn for static inline functions that are unused. It turns out that suppressing the warnings avoids potentially complex #ifdef directives, which also reduces LOC. Suppress the warning for clang. Signed-off-by: David Rientjes Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index de179993e039..ea9126006a69 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,3 +15,10 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* + * GCC does not warn about unused static inline functions for + * -Wunused-function. This turns out to avoid the need for complex #ifdef + * directives. Suppress the warning in clang as well. + */ +#define inline inline __attribute__((unused)) -- cgit v1.2.3 From f3b7eaae1b35eb8077610eb7c7db042c9b0645e1 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jun 2017 00:57:37 +0200 Subject: Revert "ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle" Revert commit eed4d47efe95 (ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle) as it turned out to be premature and triggered a number of different issues on various systems. That includes, but is not limited to, premature suspend-to-RAM aborts on Dell XPS 13 (9343) reported by Dominik. The issue the commit in question attempted to address is real and will need to be taken care of going forward, but evidently more work is needed for this purpose. Reported-by: Dominik Brodowski Signed-off-by: Rafael J. Wysocki --- drivers/acpi/battery.c | 2 +- drivers/acpi/button.c | 5 ++--- drivers/acpi/device_pm.c | 3 +-- drivers/acpi/sleep.c | 28 ---------------------------- drivers/base/power/main.c | 5 +++++ drivers/base/power/wakeup.c | 18 ++++++------------ include/linux/suspend.h | 7 ++----- kernel/power/process.c | 2 +- kernel/power/suspend.c | 29 ++++------------------------- 9 files changed, 22 insertions(+), 77 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 83ab17e4a795..4ef1e4624b2b 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -776,7 +776,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && (battery->capacity_now <= battery->alarm))) - pm_wakeup_hard_event(&battery->device->dev); + pm_wakeup_event(&battery->device->dev, 0); return result; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b7c2a06963d6..668137e4a069 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) } if (state) - pm_wakeup_hard_event(&device->dev); + pm_wakeup_event(&device->dev, 0); ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) @@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } else { int keycode; - pm_wakeup_hard_event(&device->dev); + pm_wakeup_event(&device->dev, 0); if (button->suspended) break; @@ -530,7 +530,6 @@ static int acpi_button_add(struct acpi_device *device) lid_device = device; } - device_init_wakeup(&device->dev, true); printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); return 0; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 798d5003a039..993fd31394c8 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -24,7 +24,6 @@ #include #include #include -#include #include "internal.h" @@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_lock(&acpi_pm_notifier_lock); if (adev->wakeup.flags.notifier_present) { - pm_wakeup_ws_event(adev->wakeup.ws, 0, true); + __pm_wakeup_event(adev->wakeup.ws, 0); if (adev->wakeup.context.work.func) queue_pm_work(&adev->wakeup.context.work); } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index e84005d642e6..a4327af676fe 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -662,40 +662,14 @@ static int acpi_freeze_prepare(void) acpi_os_wait_events_complete(); if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); - return 0; } -static void acpi_freeze_wake(void) -{ - /* - * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means - * that the SCI has triggered while suspended, so cancel the wakeup in - * case it has not been a wakeup event (the GPEs will be checked later). - */ - if (acpi_sci_irq_valid() && - !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) - pm_system_cancel_wakeup(); -} - -static void acpi_freeze_sync(void) -{ - /* - * Process all pending events in case there are any wakeup ones. - * - * The EC driver uses the system workqueue, so that one needs to be - * flushed too. - */ - acpi_os_wait_events_complete(); - flush_scheduled_work(); -} - static void acpi_freeze_restore(void) { acpi_disable_wakeup_devices(ACPI_STATE_S0); if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); - acpi_enable_all_runtime_gpes(); } @@ -707,8 +681,6 @@ static void acpi_freeze_end(void) static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, .prepare = acpi_freeze_prepare, - .wake = acpi_freeze_wake, - .sync = acpi_freeze_sync, .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e987a6f55d36..9faee1c893e5 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore || dev->power.direct_complete) goto Complete; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 9c36b27996fc..c313b600d356 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ unsigned int pm_wakeup_irq __read_mostly; -/* If greater than 0 and the system is suspending, terminate the suspend. */ -static atomic_t pm_abort_suspend __read_mostly; +/* If set and the system is suspending, terminate the suspend. */ +static bool pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. @@ -855,26 +855,20 @@ bool pm_wakeup_pending(void) pm_print_active_wakeup_sources(); } - return ret || atomic_read(&pm_abort_suspend) > 0; + return ret || pm_abort_suspend; } void pm_system_wakeup(void) { - atomic_inc(&pm_abort_suspend); + pm_abort_suspend = true; freeze_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); -void pm_system_cancel_wakeup(void) -{ - atomic_dec(&pm_abort_suspend); -} - -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(void) { + pm_abort_suspend = false; pm_wakeup_irq = 0; - if (reset) - atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 0b1cf32edfd7..d9718378a8be 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -189,8 +189,6 @@ struct platform_suspend_ops { struct platform_freeze_ops { int (*begin)(void); int (*prepare)(void); - void (*wake)(void); - void (*sync)(void); void (*restore)(void); void (*end)(void); }; @@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); -extern void pm_system_cancel_wakeup(void); -extern void pm_wakeup_clear(bool reset); +extern void pm_wakeup_clear(void); extern void pm_system_irq_wakeup(unsigned int irq_number); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); @@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} -static inline void pm_wakeup_clear(bool reset) {} +static inline void pm_wakeup_clear(void) {} static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline void lock_system_sleep(void) {} diff --git a/kernel/power/process.c b/kernel/power/process.c index 78672d324a6e..c7209f060eeb 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,7 +132,7 @@ int freeze_processes(void) if (!pm_freezing) atomic_inc(&system_freezing_cnt); - pm_wakeup_clear(true); + pm_wakeup_clear(); pr_info("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c0248c74d6d4..15e6baef5c73 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -72,8 +72,6 @@ static void freeze_begin(void) static void freeze_enter(void) { - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true); - spin_lock_irq(&suspend_freeze_lock); if (pm_wakeup_pending()) goto out; @@ -100,27 +98,6 @@ static void freeze_enter(void) out: suspend_freeze_state = FREEZE_STATE_NONE; spin_unlock_irq(&suspend_freeze_lock); - - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false); -} - -static void s2idle_loop(void) -{ - do { - freeze_enter(); - - if (freeze_ops && freeze_ops->wake) - freeze_ops->wake(); - - dpm_resume_noirq(PMSG_RESUME); - if (freeze_ops && freeze_ops->sync) - freeze_ops->sync(); - - if (pm_wakeup_pending()) - break; - - pm_wakeup_clear(false); - } while (!dpm_suspend_noirq(PMSG_SUSPEND)); } void freeze_wake(void) @@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) * all the devices are suspended. */ if (state == PM_SUSPEND_FREEZE) { - s2idle_loop(); - goto Platform_early_resume; + trace_suspend_resume(TPS("machine_suspend"), state, true); + freeze_enter(); + trace_suspend_resume(TPS("machine_suspend"), state, false); + goto Platform_wake; } error = disable_nonboot_cpus(); -- cgit v1.2.3 From cf124db566e6b036b8bcbe8decbed740bdfac8c6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 8 May 2017 12:52:56 -0400 Subject: net: Fix inconsistent teardown and release of private netdev state. Network devices can allocate reasources and private memory using netdev_ops->ndo_init(). However, the release of these resources can occur in one of two different places. Either netdev_ops->ndo_uninit() or netdev->destructor(). The decision of which operation frees the resources depends upon whether it is necessary for all netdev refs to be released before it is safe to perform the freeing. netdev_ops->ndo_uninit() presumably can occur right after the NETDEV_UNREGISTER notifier completes and the unicast and multicast address lists are flushed. netdev->destructor(), on the other hand, does not run until the netdev references all go away. Further complicating the situation is that netdev->destructor() almost universally does also a free_netdev(). This creates a problem for the logic in register_netdevice(). Because all callers of register_netdevice() manage the freeing of the netdev, and invoke free_netdev(dev) if register_netdevice() fails. If netdev_ops->ndo_init() succeeds, but something else fails inside of register_netdevice(), it does call ndo_ops->ndo_uninit(). But it is not able to invoke netdev->destructor(). This is because netdev->destructor() will do a free_netdev() and then the caller of register_netdevice() will do the same. However, this means that the resources that would normally be released by netdev->destructor() will not be. Over the years drivers have added local hacks to deal with this, by invoking their destructor parts by hand when register_netdevice() fails. Many drivers do not try to deal with this, and instead we have leaks. Let's close this hole by formalizing the distinction between what private things need to be freed up by netdev->destructor() and whether the driver needs unregister_netdevice() to perform the free_netdev(). netdev->priv_destructor() performs all actions to free up the private resources that used to be freed by netdev->destructor(), except for free_netdev(). netdev->needs_free_netdev is a boolean that indicates whether free_netdev() should be done at the end of unregister_netdevice(). Now, register_netdevice() can sanely release all resources after ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit() and netdev->priv_destructor(). And at the end of unregister_netdevice(), we invoke netdev->priv_destructor() and optionally call free_netdev(). Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +++--- drivers/net/caif/caif_hsi.c | 2 +- drivers/net/caif/caif_serial.c | 2 +- drivers/net/caif/caif_spi.c | 2 +- drivers/net/caif/caif_virtio.c | 2 +- drivers/net/can/slcan.c | 7 +++---- drivers/net/can/vcan.c | 2 +- drivers/net/can/vxcan.c | 2 +- drivers/net/dummy.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/geneve.c | 2 +- drivers/net/gtp.c | 2 +- drivers/net/hamradio/6pack.c | 2 +- drivers/net/hamradio/bpqether.c | 2 +- drivers/net/ifb.c | 4 ++-- drivers/net/ipvlan/ipvlan_main.c | 2 +- drivers/net/loopback.c | 4 ++-- drivers/net/macsec.c | 4 ++-- drivers/net/macvlan.c | 2 +- drivers/net/nlmon.c | 2 +- drivers/net/slip/slip.c | 7 +++---- drivers/net/team/team.c | 4 ++-- drivers/net/tun.c | 4 ++-- drivers/net/usb/cdc-phonet.c | 2 +- drivers/net/usb/qmi_wwan.c | 2 +- drivers/net/veth.c | 4 ++-- drivers/net/vrf.c | 2 +- drivers/net/vsockmon.c | 2 +- drivers/net/vxlan.c | 2 +- drivers/net/wan/dlci.c | 2 +- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/lapbether.c | 2 +- drivers/net/wireless/ath/ath6kl/main.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 1 - drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 3 ++- drivers/net/wireless/intersil/hostap/hostap_main.c | 2 +- drivers/net/wireless/mac80211_hwsim.c | 2 +- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- drivers/staging/rtl8188eu/os_dep/mon.c | 2 +- drivers/usb/gadget/function/f_phonet.c | 2 +- include/linux/netdevice.h | 7 ++++--- net/8021q/vlan_dev.c | 4 ++-- net/batman-adv/soft-interface.c | 5 ++--- net/bluetooth/6lowpan.c | 2 +- net/bridge/br_device.c | 2 +- net/caif/chnl_net.c | 4 ++-- net/core/dev.c | 8 ++++++-- net/hsr/hsr_device.c | 4 ++-- net/ieee802154/6lowpan/core.c | 2 +- net/ipv4/ip_tunnel.c | 4 ++-- net/ipv4/ipmr.c | 2 +- net/ipv6/ip6_gre.c | 9 +++++---- net/ipv6/ip6_tunnel.c | 8 ++++---- net/ipv6/ip6_vti.c | 8 ++++---- net/ipv6/ip6mr.c | 2 +- net/ipv6/sit.c | 6 +++--- net/irda/irlan/irlan_eth.c | 2 +- net/l2tp/l2tp_eth.c | 2 +- net/mac80211/iface.c | 6 +++--- net/mac802154/iface.c | 7 +++---- net/openvswitch/vport-internal_dev.c | 4 ++-- net/phonet/pep-gprs.c | 2 +- 62 files changed, 105 insertions(+), 103 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2359478b977f..8ab6bdbe1682 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); if (bond->wq) destroy_workqueue(bond->wq); - free_netdev(bond_dev); } void bond_setup(struct net_device *bond_dev) @@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev) bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops; - bond_dev->destructor = bond_destructor; + bond_dev->needs_free_netdev = true; + bond_dev->priv_destructor = bond_destructor; SET_NETDEV_DEVTYPE(bond_dev, &bond_type); @@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name) rtnl_unlock(); if (res < 0) - bond_destructor(bond_dev); + free_netdev(bond_dev); return res; } diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index ddabce759456..71a7c3b44fdd 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->netdev_ops = &cfhsi_netdevops; for (i = 0; i < CFHSI_PRIO_LAST; ++i) skb_queue_head_init(&cfhsi->qhead[i]); diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index c2dea4916e5d..76e1d3545105 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CAIF_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; skb_queue_head_init(&serdev->head); serdev->common.link_select = CAIF_LINK_LOW_LATENCY; serdev->common.use_frag = true; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 3a529fbe539f..fc21afe852b9 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev) dev->flags = IFF_NOARP | IFF_POINTOPOINT; dev->priv_flags |= IFF_NO_QUEUE; dev->mtu = SPI_MAX_PAYLOAD_SIZE; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; skb_queue_head_init(&cfspi->qhead); skb_queue_head_init(&cfspi->chead); cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 6122768c8644..1794ea0420b7 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev) netdev->tx_queue_len = 100; netdev->flags = IFF_POINTOPOINT | IFF_NOARP; netdev->mtu = CFV_DEF_MTU_SIZE; - netdev->destructor = free_netdev; + netdev->needs_free_netdev = true; } /* Create debugfs counters for the device */ diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index eb7173713bbc..6a6e896e52fa 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev) static void slc_free_netdev(struct net_device *dev) { int i = dev->base_addr; - free_netdev(dev); + slcan_devs[i] = NULL; } @@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = { static void slc_setup(struct net_device *dev) { dev->netdev_ops = &slc_netdev_ops; - dev->destructor = slc_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = slc_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; @@ -761,8 +762,6 @@ static void __exit slcan_exit(void) if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); - /* Intentionally leak the control block. */ - dev->destructor = NULL; } unregister_netdev(dev); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index facca33d53e9..0eda1b308583 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev) dev->flags |= IFF_ECHO; dev->netdev_ops = &vcan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct rtnl_link_ops vcan_link_ops __read_mostly = { diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7fbb24795681..30cf2368becf 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev) dev->tx_queue_len = 0; dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* forward declaration for rtnl_create_link() */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 149244aac20a..9905b52fe293 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev) struct dummy_priv *priv = netdev_priv(dev); kfree(priv->vfinfo); - free_netdev(dev); } static void dummy_setup(struct net_device *dev) @@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; dev->ethtool_ops = &dummy_ethtool_ops; - dev->destructor = dummy_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = dummy_free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->flags |= IFF_NOARP; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77ed2f628f9c..ea1bfcf1870a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4525,7 +4525,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static int config_mgmt_dev(struct pci_dev *pdev) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6ebb0f559a42..199459bd6961 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev) dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &geneve_type); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7b652bb7ebe4..ca110cd2a4e4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = { static void gtp_link_setup(struct net_device *dev) { dev->netdev_ops = >p_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->hard_header_len = 0; dev->addr_len = 0; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 922bf440e9f1..021a8ec411ab 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index f62e7f325cf9..78a6414c5fd9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = { static void bpq_setup(struct net_device *dev) { dev->netdev_ops = &bpq_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 312fce7302d3..144ea5ae8ab4 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev) __skb_queue_purge(&txp->tq); } kfree(dp->tx_private); - free_netdev(dev); } static void ifb_setup(struct net_device *dev) @@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); - dev->destructor = ifb_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 618ed88fad0f..7c7680c8f0e3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev) dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; } diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 224f65cb576b..30612497643c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev) { dev_net(dev)->loopback_dev = NULL; free_percpu(dev->lstats); - free_netdev(dev); } static const struct net_device_ops loopback_ops = { @@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev) dev->ethtool_ops = &loopback_ethtool_ops; dev->header_ops = ð_header_ops; dev->netdev_ops = &loopback_ops; - dev->destructor = loopback_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = loopback_dev_free; } /* Setup and register the loopback device. */ diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index cdc347be68f2..79411675f0e6 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev) free_percpu(macsec->secy.tx_sc.stats); dev_put(real_dev); - free_netdev(dev); } static void macsec_setup(struct net_device *dev) @@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev) dev->max_mtu = ETH_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; - dev->destructor = macsec_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = macsec_free_netdev; SET_NETDEV_DEVTYPE(dev, &macsec_type); eth_zero_addr(dev->broadcast); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 346ad2ff3998..67bf7ebae5c6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1092,7 +1092,7 @@ void macvlan_common_setup(struct net_device *dev) netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index b91603835d26..c4b3362da4a2 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev) dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 1da31dc47f86..74b907206aa7 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev) static void sl_free_netdev(struct net_device *dev) { int i = dev->base_addr; - free_netdev(dev); + slip_devs[i] = NULL; } @@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = { static void sl_setup(struct net_device *dev) { dev->netdev_ops = &sl_netdev_ops; - dev->destructor = sl_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = sl_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; @@ -1369,8 +1370,6 @@ static void __exit slip_exit(void) if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); - /* Intentionally leak the control block. */ - dev->destructor = NULL; } unregister_netdev(dev); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6c5d5ef46f75..fba8c136aa7c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev) struct team *team = netdev_priv(dev); free_percpu(team->pcpu_stats); - free_netdev(dev); } static int team_open(struct net_device *dev) @@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev) dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; - dev->destructor = team_destructor; + dev->needs_free_netdev = true; + dev->priv_destructor = team_destructor; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_TEAM; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bbd707b9ef7a..9ee7d4275640 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev) free_percpu(tun->pcpu_stats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); - free_netdev(dev); } static void tun_setup(struct net_device *dev) @@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev) tun->group = INVALID_GID; dev->ethtool_ops = &tun_ethtool_ops; - dev->destructor = tun_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index eb52de8205f0..c7a350bbaaa7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev) dev->addr_len = 1; dev->tx_queue_len = 3; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8f923a147fa9..949671ce4039 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev) dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &qmimux_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 38f0f03a29c8..0156fe8cac17 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev) static void veth_dev_free(struct net_device *dev) { free_percpu(dev->vstats); - free_netdev(dev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); - dev->destructor = veth_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = veth_dev_free; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index db882493875c..d38f11d833fe 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1348,7 +1348,7 @@ static void vrf_setup(struct net_device *dev) dev->netdev_ops = &vrf_netdev_ops; dev->l3mdev_ops = &vrf_l3mdev_ops; dev->ethtool_ops = &vrf_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; /* Fill in device structure with ethernet-generic values. */ eth_hw_addr_random(dev); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index 7f0136f2dd9d..c28bdce14fd5 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev) dev->netdev_ops = &vsockmon_ops; dev->ethtool_ops = &vsockmon_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_LLTX; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a6b5052c1d36..5fa798a5c9a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2611,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); - dev->destructor = free_netdev; + dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &vxlan_type); dev->features |= NETIF_F_LLTX; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 65ee2a6f248c..a0d76f70c428 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev) dev->flags = 0; dev->header_ops = &dlci_header_ops; dev->netdev_ops = &dlci_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dlp->receive = dlci_receive; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index eb915281197e..78596e42a3f3 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) return -EIO; } - dev->destructor = free_netdev; + dev->needs_free_netdev = true; *get_dev_p(pvc, type) = dev; if (!used) { state(hdlc)->dce_changed = 1; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 9df9ed62beff..63f749078a1f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = { static void lapbeth_setup(struct net_device *dev) { dev->netdev_ops = &lapbeth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->type = ARPHRD_X25; dev->hard_header_len = 3; dev->mtu = 1000; diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 91ee542de3d7..b90c77ef792e 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev) struct ath6kl *ar = ath6kl_priv(dev); dev->netdev_ops = &ath6kl_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; dev->needed_headroom = ETH_HLEN; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cd1d6730eab7..617199c0e5a0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev) if (vif) brcmf_free_vif(vif); - free_netdev(ndev); } static bool brcmf_is_linkup(const struct brcmf_event_msg *e) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index a3d82368f1a9..511d190c6cca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, if (!ndev) return ERR_PTR(-ENOMEM); - ndev->destructor = brcmf_cfg80211_free_netdev; + ndev->needs_free_netdev = true; + ndev->priv_destructor = brcmf_cfg80211_free_netdev; ifp = netdev_priv(ndev); ifp->ndev = ndev; /* store mapping ifidx to bsscfgidx */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 544fc09dcb62..1372b20f931e 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, dev->mem_end = mdev->mem_end; hostap_setup_dev(dev, local, type); - dev->destructor = free_netdev; + dev->needs_free_netdev = true; sprintf(dev->name, "%s%s", prefix, name); if (!rtnl_locked) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 002b25cff5b6..c854a557998b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = { static void hwsim_mon_setup(struct net_device *dev) { dev->netdev_ops = &hwsim_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index dd87b9ff64c3..39b6b5e3f6e0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index cfe37eb026d6..859d0d6051cd 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = { static void mon_setup(struct net_device *dev) { dev->netdev_ops = &mon_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index b4058f0000e4..6a1ce6a55158 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev) dev->tx_queue_len = 1; dev->netdev_ops = &pn_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &phonet_header_ops; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f39d27decf4..ab7ca3fdc495 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1596,8 +1596,8 @@ enum netdev_priv_flags { * @rtnl_link_state: This enum represents the phases of creating * a new link * - * @destructor: Called from unregister, - * can be used to call free_netdev + * @needs_free_netdev: Should unregister perform free_netdev? + * @priv_destructor: Called from unregister * @npinfo: XXX: need comments on this one * @nd_net: Network namespace this network device is inside * @@ -1858,7 +1858,8 @@ struct net_device { RTNL_LINK_INITIALIZING, } rtnl_link_state:16; - void (*destructor)(struct net_device *dev); + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *dev); #ifdef CONFIG_NETPOLL struct netpoll_info __rcu *npinfo; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 953b6728bd00..abc5f400fc71 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev) free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; - free_netdev(dev); } void vlan_setup(struct net_device *dev) @@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev) netif_keep_dst(dev); dev->netdev_ops = &vlan_netdev_ops; - dev->destructor = vlan_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; dev->min_mtu = 0; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b25789abf7b9..10f7edfb176e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev) * netdev and its private data (bat_priv) */ rcu_barrier(); - - free_netdev(dev); } /** @@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &batadv_netdev_ops; - dev->destructor = batadv_softif_free; + dev->needs_free_netdev = true; + dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; dev->priv_flags |= IFF_NO_QUEUE; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 608959989f8e..ab3b654b05cc 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev) dev->netdev_ops = &netdev_ops; dev->header_ops = &header_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static struct device_type bt_type = { diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 430b53e7d941..f0f3447e8aa4 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &br_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 1816fc9f1ee7..fe3c53efb949 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev) { struct chnl_net *priv = netdev_priv(dev); caif_free_client(&priv->chnl); - free_netdev(dev); } static void ipcaif_net_setup(struct net_device *dev) { struct chnl_net *priv; dev->netdev_ops = &netdev_ops; - dev->destructor = chnl_net_destructor; + dev->needs_free_netdev = true; + dev->priv_destructor = chnl_net_destructor; dev->flags |= IFF_NOARP; dev->flags |= IFF_POINTOPOINT; dev->mtu = GPRS_PDP_MTU; diff --git a/net/core/dev.c b/net/core/dev.c index 84e1e86a4bce..4c15466305c3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7502,6 +7502,8 @@ out: err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); goto out; } EXPORT_SYMBOL(register_netdevice); @@ -7709,8 +7711,10 @@ void netdev_run_todo(void) WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); - if (dev->destructor) - dev->destructor(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); + if (dev->needs_free_netdev) + free_netdev(dev); /* Report a network device has been unregistered */ rtnl_lock(); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index c73160fb11e7..0a0a392dc2bd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) del_timer_sync(&hsr->announce_timer); synchronize_rcu(); - free_netdev(hsr_dev); } static const struct net_device_ops hsr_device_ops = { @@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &hsr_type); dev->priv_flags |= IFF_NO_QUEUE; - dev->destructor = hsr_dev_destroy; + dev->needs_free_netdev = true; + dev->priv_destructor = hsr_dev_destroy; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index d7efbf0dad20..0a866f332290 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev) ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; - ldev->destructor = free_netdev; + ldev->needs_free_netdev = true; ldev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b878ecbc0608..b436d0775631 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -967,7 +967,6 @@ static void ip_tunnel_dev_free(struct net_device *dev) gro_cells_destroy(&tunnel->gro_cells); dst_cache_destroy(&tunnel->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) @@ -1155,7 +1154,8 @@ int ip_tunnel_init(struct net_device *dev) struct iphdr *iph = &tunnel->parms.iph; int err; - dev->destructor = ip_tunnel_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip_tunnel_dev_free; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 551de4d023a8..b4f9622ee9f5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev) dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0c5b4caa1949..64eea3962733 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev) dst_cache_destroy(&t->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } static void ip6gre_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ip6gre_netdev_ops; - dev->destructor = ip6gre_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6gre_dev_free; dev->type = ARPHRD_IP6GRE; @@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - ip6gre_dev_free(ign->fb_tunnel_dev); + free_netdev(ign->fb_tunnel_dev); err_alloc_dev: return err; } @@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &ip6gre_tap_netdev_ops; - dev->destructor = ip6gre_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6gre_dev_free; dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_TX_SKB_SHARING; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9b37f9747fc6..c3581973f5d7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev) gro_cells_destroy(&t->gro_cells); dst_cache_destroy(&t->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } static int ip6_tnl_create2(struct net_device *dev) @@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) return t; failed_free: - ip6_dev_free(dev); + free_netdev(dev); failed: return ERR_PTR(err); } @@ -1777,7 +1776,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { static void ip6_tnl_dev_setup(struct net_device *dev) { dev->netdev_ops = &ip6_tnl_netdev_ops; - dev->destructor = ip6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->flags |= IFF_NOARP; @@ -2224,7 +2224,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) return 0; err_register: - ip6_dev_free(ip6n->fb_tnl_dev); + free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d67ef56454b2..837ea1eefe7f 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) static void vti6_dev_free(struct net_device *dev) { free_percpu(dev->tstats); - free_netdev(dev); } static int vti6_tnl_create2(struct net_device *dev) @@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p return t; failed_free: - vti6_dev_free(dev); + free_netdev(dev); failed: return NULL; } @@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = { static void vti6_dev_setup(struct net_device *dev) { dev->netdev_ops = &vti6_netdev_ops; - dev->destructor = vti6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = vti6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); @@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net) return 0; err_register: - vti6_dev_free(ip6n->fb_tnl_dev); + free_netdev(ip6n->fb_tnl_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 374997d26488..2ecb39b943b5 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev) dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 61e5902f0687..2378503577b0 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, return nt; failed_free: - ipip6_dev_free(dev); + free_netdev(dev); failed: return NULL; } @@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev) dst_cache_destroy(&tunnel->dst_cache); free_percpu(dev->tstats); - free_netdev(dev); } #define SIT_FEATURES (NETIF_F_SG | \ @@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev) int t_hlen = tunnel->hlen + sizeof(struct iphdr); dev->netdev_ops = &ipip6_netdev_ops; - dev->destructor = ipip6_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; dev->hard_header_len = LL_MAX_HEADER + t_hlen; diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 74d09f91709e..3be852808a9d 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &irlan_eth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 8b21af7321b9..f7c54ece3733 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -141,7 +141,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->features |= NETIF_F_LLTX; dev->netdev_ops = &l2tp_eth_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8fae1a72e6a7..915d7e1b4545 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_free(struct net_device *dev) { free_percpu(dev->tstats); - free_netdev(dev); } static void ieee80211_if_setup(struct net_device *dev) @@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; - dev->destructor = ieee80211_if_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ieee80211_if_free; } static void ieee80211_if_setup_no_queue(struct net_device *dev) @@ -1905,7 +1905,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ret = register_netdevice(ndev); if (ret) { - ieee80211_if_free(ndev); + free_netdev(ndev); return ret; } } diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 06019dba4b10..bd88a9b80773 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); mac802154_llsec_destroy(&sdata->sec); - - free_netdev(dev); } static void ieee802154_if_setup(struct net_device *dev) @@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, sdata->dev->dev_addr); sdata->dev->header_ops = &mac802154_header_ops; - sdata->dev->destructor = mac802154_wpan_free; + sdata->dev->needs_free_netdev = true; + sdata->dev->priv_destructor = mac802154_wpan_free; sdata->dev->netdev_ops = &mac802154_wpan_ops; sdata->dev->ml_priv = &mac802154_mlme_wpan; wpan_dev->promiscuous_mode = false; @@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, break; case NL802154_IFTYPE_MONITOR: - sdata->dev->destructor = free_netdev; + sdata->dev->needs_free_netdev = true; sdata->dev->netdev_ops = &mac802154_monitor_ops; wpan_dev->promiscuous_mode = true; break; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 89193a634da4..04a3128adcf0 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev) struct vport *vport = ovs_internal_dev_get_vport(dev); ovs_vport_free(vport); - free_netdev(dev); } static void @@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev) netdev->priv_flags &= ~IFF_TX_SKB_SHARING; netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | IFF_PHONY_HEADROOM | IFF_NO_QUEUE; - netdev->destructor = internal_dev_destructor; + netdev->needs_free_netdev = true; + netdev->priv_destructor = internal_dev_destructor; netdev->ethtool_ops = &internal_dev_ethtool_ops; netdev->rtnl_link_ops = &internal_dev_link_ops; diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 21c28b51be94..2c9337946e30 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c @@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev) dev->tx_queue_len = 10; dev->netdev_ops = &gprs_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } /* -- cgit v1.2.3 From 8397ed36b7c585f8d3e06c431f4137309124f78f Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 7 Jun 2017 12:26:23 -0600 Subject: net: ipv6: Release route when device is unregistering Roopa reported attempts to delete a bond device that is referenced in a multipath route is hanging: $ ifdown bond2 # ifupdown2 command that deletes virtual devices unregister_netdevice: waiting for bond2 to become free. Usage count = 2 Steps to reproduce: echo 1 > /proc/sys/net/ipv6/conf/all/ignore_routes_with_linkdown ip link add dev bond12 type bond ip link add dev bond13 type bond ip addr add 2001:db8:2::0/64 dev bond12 ip addr add 2001:db8:3::0/64 dev bond13 ip route add 2001:db8:33::0/64 nexthop via 2001:db8:2::2 nexthop via 2001:db8:3::2 ip link del dev bond12 ip link del dev bond13 The root cause is the recent change to keep routes on a linkdown. Update the check to detect when the device is unregistering and release the route for that case. Fixes: a1a22c12060e4 ("net: ipv6: Keep nexthop of multipath route on admin down") Reported-by: Roopa Prabhu Signed-off-by: David Ahern Acked-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/ipv6/route.c | 1 + 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ab7ca3fdc495..846193dfb0ac 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4262,6 +4262,11 @@ static inline const char *netdev_name(const struct net_device *dev) return dev->name; } +static inline bool netdev_unregistering(const struct net_device *dev) +{ + return dev->reg_state == NETREG_UNREGISTERING; +} + static inline const char *netdev_reg_state(const struct net_device *dev) { switch (dev->reg_state) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index dc61b0b5e64e..7cebd954d5bb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg) if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || + (dev && netdev_unregistering(dev)) || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; -- cgit v1.2.3 From 1123a6041654e8f889014659593bad4168e542c2 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 31 May 2017 14:03:11 +0200 Subject: srcu: Allow use of Classic SRCU from both process and interrupt context Linu Cherian reported a WARN in cleanup_srcu_struct() when shutting down a guest running iperf on a VFIO assigned device. This happens because irqfd_wakeup() calls srcu_read_lock(&kvm->irq_srcu) in interrupt context, while a worker thread does the same inside kvm_set_irq(). If the interrupt happens while the worker thread is executing __srcu_read_lock(), updates to the Classic SRCU ->lock_count[] field or the Tree SRCU ->srcu_lock_count[] field can be lost. The docs say you are not supposed to call srcu_read_lock() and srcu_read_unlock() from irq context, but KVM interrupt injection happens from (host) interrupt context and it would be nice if SRCU supported the use case. KVM is using SRCU here not really for the "sleepable" part, but rather due to its IPI-free fast detection of grace periods. It is therefore not desirable to switch back to RCU, which would effectively revert commit 719d93cd5f5c ("kvm/irqchip: Speed up KVM_SET_GSI_ROUTING", 2014-01-16). However, the docs are overly conservative. You can have an SRCU instance only has users in irq context, and you can mix process and irq context as long as process context users disable interrupts. In addition, __srcu_read_unlock() actually uses this_cpu_dec() on both Tree SRCU and Classic SRCU. For those two implementations, only srcu_read_lock() is unsafe. When Classic SRCU's __srcu_read_unlock() was changed to use this_cpu_dec(), in commit 5a41344a3d83 ("srcu: Simplify __srcu_read_unlock() via this_cpu_dec()", 2012-11-29), __srcu_read_lock() did two increments. Therefore it kept __this_cpu_inc(), with preempt_disable/enable in the caller. Tree SRCU however only does one increment, so on most architectures it is more efficient for __srcu_read_lock() to use this_cpu_inc(), and any performance differences appear to be down in the noise. Cc: stable@vger.kernel.org Fixes: 719d93cd5f5c ("kvm/irqchip: Speed up KVM_SET_GSI_ROUTING") Reported-by: Linu Cherian Suggested-by: Linu Cherian Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini Cc: Linus Torvalds Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 2 -- kernel/rcu/srcu.c | 5 ++--- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 167ad8831aaf..4c1d5f7e62c4 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) { int retval; - preempt_disable(); retval = __srcu_read_lock(sp); - preempt_enable(); rcu_lock_acquire(&(sp)->dep_map); return retval; } diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 584d8a983883..dea03614263f 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct); /* * Counts the new reader in the appropriate per-CPU element of the - * srcu_struct. Must be called from process context. + * srcu_struct. * Returns an index that must be passed to the matching srcu_read_unlock(). */ int __srcu_read_lock(struct srcu_struct *sp) @@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp) int idx; idx = READ_ONCE(sp->completed) & 0x1; - __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); + this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); smp_mb(); /* B */ /* Avoid leaking the critical section. */ return idx; } @@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); * Removes the count for the old reader from the appropriate per-CPU * element of the srcu_struct. Note that this may well be a different * CPU than that which was incremented by the corresponding srcu_read_lock(). - * Must be called from process context. */ void __srcu_read_unlock(struct srcu_struct *sp, int idx) { -- cgit v1.2.3 From 0620fddb56dfaf0e1034eeb69d79c73b361debbf Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 8 Jun 2017 14:49:26 +0100 Subject: KEYS: sanitize key structs before freeing While a 'struct key' itself normally does not contain sensitive information, Documentation/security/keys.txt actually encourages this: "Having a payload is not required; and the payload can, in fact, just be a value stored in the struct key itself." In case someone has taken this advice, or will take this advice in the future, zero the key structure before freeing it. We might as well, and as a bonus this could make it a bit more difficult for an adversary to determine which keys have recently been in use. This is safe because the key_jar cache does not use a constructor. Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: James Morris --- include/linux/key.h | 1 - security/keys/gc.c | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/key.h b/include/linux/key.h index 0c9b93b0d1f7..78e25aabedaf 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -173,7 +173,6 @@ struct key { #ifdef KEY_DEBUGGING unsigned magic; #define KEY_DEBUG_MAGIC 0x18273645u -#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu #endif unsigned long flags; /* status flags (change with bitops) */ diff --git a/security/keys/gc.c b/security/keys/gc.c index 595becc6d0d2..87cb260e4890 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -158,9 +158,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kfree(key->description); -#ifdef KEY_DEBUGGING - key->magic = KEY_DEBUG_MAGIC_X; -#endif + memzero_explicit(key, sizeof(*key)); kmem_cache_free(key_jar, key); } } -- cgit v1.2.3 From 6d53cefb18e4646fb4bf62ccb6098fb3808486df Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 11 Jun 2017 15:51:56 -0700 Subject: compiler, clang: properly override 'inline' for clang Commit abb2ea7dfd82 ("compiler, clang: suppress warning for unused static inline functions") just caused more warnings due to re-defining the 'inline' macro. So undef it before re-defining it, and also add the 'notrace' attribute like the gcc version that this is overriding does. Maybe this makes clang happier. Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ea9126006a69..d614c5ea1b5e 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -21,4 +21,5 @@ * -Wunused-function. This turns out to avoid the need for complex #ifdef * directives. Suppress the warning in clang as well. */ -#define inline inline __attribute__((unused)) +#undef inline +#define inline inline __attribute__((unused)) notrace -- cgit v1.2.3 From 19e72d3abb63cb16d021a4066ce1a18880509e99 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 9 Feb 2017 17:28:50 -0800 Subject: configfs: Introduce config_item_get_unless_zero() Signed-off-by: Bart Van Assche [hch: minor style tweak] Signed-off-by: Christoph Hellwig --- fs/configfs/item.c | 8 ++++++++ include/linux/configfs.h | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 8b2a994042dd..a66f6624d899 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c @@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item) } EXPORT_SYMBOL(config_item_get); +struct config_item *config_item_get_unless_zero(struct config_item *item) +{ + if (item && kref_get_unless_zero(&item->ci_kref)) + return item; + return NULL; +} +EXPORT_SYMBOL(config_item_get_unless_zero); + static void config_item_cleanup(struct config_item *item) { struct config_item_type *t = item->ci_type; diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2319b8c108e8..c96709049683 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item, const char *name, struct config_item_type *type); -extern struct config_item * config_item_get(struct config_item *); +extern struct config_item *config_item_get(struct config_item *); +extern struct config_item *config_item_get_unless_zero(struct config_item *); extern void config_item_put(struct config_item *); struct config_item_type { -- cgit v1.2.3 From db46a0e1be7eac45d0bb1bdcd438b8d47c920451 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 14 Jun 2017 16:15:24 +0900 Subject: net: update undefined ->ndo_change_mtu() comment Update ->ndo_change_mtu() callback comment to remove text about returning error in case of undefined callback. This change makes the comment match the existing code behavior. Signed-off-by: Magnus Damm Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 846193dfb0ac..4ed952c17fc7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -914,8 +914,7 @@ struct xfrmdev_ops { * * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); * Called when a user wants to change the Maximum Transfer Unit - * of a device. If not defined, any request to change MTU will - * will return an error. + * of a device. * * void (*ndo_tx_timeout)(struct net_device *dev); * Callback used when the transmitter has not made any progress -- cgit v1.2.3 From dc9edc44de6cd7cc8cc7f5b36c1adb221eda3207 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 14 Jun 2017 13:27:50 -0600 Subject: block: Fix a blk_exit_rl() regression Avoid that the following complaint is reported: BUG: sleeping function called from invalid context at kernel/workqueue.c:2790 in_atomic(): 1, irqs_disabled(): 0, pid: 41, name: rcuop/3 1 lock held by rcuop/3/41: #0: (rcu_callback){......}, at: [] rcu_nocb_kthread+0x282/0x500 Call Trace: dump_stack+0x86/0xcf ___might_sleep+0x174/0x260 __might_sleep+0x4a/0x80 flush_work+0x7e/0x2e0 __cancel_work_timer+0x143/0x1c0 cancel_work_sync+0x10/0x20 blk_throtl_exit+0x25/0x60 blkcg_exit_queue+0x35/0x40 blk_release_queue+0x42/0x130 kobject_put+0xa9/0x190 This happens since we invoke callbacks that need to block from the queue release handler. Fix this by pushing the final release to a workqueue. Reported-by: Ross Zwisler Fixes: commit b425e5049258 ("block: Avoid that blk_exit_rl() triggers a use-after-free") Signed-off-by: Bart Van Assche Tested-by: Ross Zwisler Updated changelog Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 34 ++++++++++++++++++++++------------ include/linux/blkdev.h | 2 ++ 2 files changed, 24 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 283da7fbe034..27aceab1cc31 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) } /** - * blk_release_queue: - release a &struct request_queue when it is no longer needed - * @kobj: the kobj belonging to the request queue to be released + * __blk_release_queue - release a request queue when it is no longer needed + * @work: pointer to the release_work member of the request queue to be released * * Description: - * blk_release_queue is the pair to blk_init_queue() or - * blk_queue_make_request(). It should be called when a request queue is - * being released; typically when a block device is being de-registered. - * Currently, its primary task it to free all the &struct request - * structures that were allocated to the queue and the queue itself. + * blk_release_queue is the counterpart of blk_init_queue(). It should be + * called when a request queue is being released; typically when a block + * device is being de-registered. Its primary task it to free the queue + * itself. * - * Note: + * Notes: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). - **/ -static void blk_release_queue(struct kobject *kobj) + * + * Although blk_release_queue() may be called with preemption disabled, + * __blk_release_queue() may sleep. + */ +static void __blk_release_queue(struct work_struct *work) { - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); + struct request_queue *q = container_of(work, typeof(*q), release_work); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); @@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj) call_rcu(&q->rcu_head, blk_free_queue_rcu); } +static void blk_release_queue(struct kobject *kobj) +{ + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + + INIT_WORK(&q->release_work, __blk_release_queue); + schedule_work(&q->release_work); +} + static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ab92c4ea138b..b74a3edcb3da 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -586,6 +586,8 @@ struct request_queue { size_t cmd_size; void *rq_alloc_data; + + struct work_struct release_work; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ -- cgit v1.2.3 From c926820085437a27b27e78996b2c7a5ad94e8055 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 15 Jun 2017 13:46:00 +0200 Subject: firmware: dmi_scan: Make dmi_walk and dmi_walk_early return real error codes Currently they return -1 on error, which will confuse callers if they try to interpret it as a normal negative error code. Signed-off-by: Andy Lutomirski Signed-off-by: Darren Hart (VMware) Signed-off-by: Jean Delvare --- drivers/firmware/dmi_scan.c | 9 +++++---- include/linux/dmi.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 82ee042f075c..fc30249132f5 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, buf = dmi_early_remap(dmi_base, orig_dmi_len); if (buf == NULL) - return -1; + return -ENOMEM; dmi_decode_table(buf, decode, NULL); @@ -1008,7 +1008,8 @@ EXPORT_SYMBOL(dmi_get_date); * @decode: Callback function * @private_data: Private data to be passed to the callback function * - * Returns -1 when the DMI table can't be reached, 0 on success. + * Returns 0 on success, -ENXIO if DMI is not selected or not present, + * or a different negative error code if DMI walking fails. */ int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data) @@ -1016,11 +1017,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), u8 *buf; if (!dmi_available) - return -1; + return -ENXIO; buf = dmi_remap(dmi_base, dmi_len); if (buf == NULL) - return -1; + return -ENOMEM; dmi_decode_table(buf, decode, private_data); diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 5e9c74cf8894..9bbf21a516e4 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; } static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), - void *private_data) { return -1; } + void *private_data) { return -ENXIO; } static inline bool dmi_match(enum dmi_field f, const char *str) { return false; } static inline void dmi_memdev_name(u16 handle, const char **bank, -- cgit v1.2.3 From 466749f13e33d892cf9263d7efbc0ea713c23ed7 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 10 Apr 2017 12:27:01 +0200 Subject: gpu: host1x: Flesh out kerneldoc Improve kerneldoc for the public parts of the host1x infrastructure in preparation for adding driver-specific part to the GPU documentation. Acked-by: Daniel Vetter Signed-off-by: Thierry Reding --- drivers/gpu/host1x/bus.c | 75 +++++++++++++++++++++++++++++++++++++++++ drivers/gpu/host1x/syncpt.c | 81 +++++++++++++++++++++++++++++++++++++++------ include/linux/host1x.h | 25 ++++++++++++++ 3 files changed, 170 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 561831e1ae2c..a048e3ac523d 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -40,6 +40,9 @@ struct host1x_subdev { /** * host1x_subdev_add() - add a new subdevice with an associated device node + * @device: host1x device to add the subdevice to + * @driver: host1x driver + * @np: device node */ static int host1x_subdev_add(struct host1x_device *device, struct device_node *np) @@ -62,6 +65,7 @@ static int host1x_subdev_add(struct host1x_device *device, /** * host1x_subdev_del() - remove subdevice + * @subdev: subdevice to remove */ static void host1x_subdev_del(struct host1x_subdev *subdev) { @@ -72,6 +76,8 @@ static void host1x_subdev_del(struct host1x_subdev *subdev) /** * host1x_device_parse_dt() - scan device tree and add matching subdevices + * @device: host1x logical device + * @driver: host1x driver */ static int host1x_device_parse_dt(struct host1x_device *device, struct host1x_driver *driver) @@ -166,6 +172,16 @@ static void host1x_subdev_unregister(struct host1x_device *device, mutex_unlock(&device->subdevs_lock); } +/** + * host1x_device_init() - initialize a host1x logical device + * @device: host1x logical device + * + * The driver for the host1x logical device can call this during execution of + * its &host1x_driver.probe implementation to initialize each of its clients. + * The client drivers access the subsystem specific driver data using the + * &host1x_client.parent field and driver data associated with it (usually by + * calling dev_get_drvdata()). + */ int host1x_device_init(struct host1x_device *device) { struct host1x_client *client; @@ -192,6 +208,15 @@ int host1x_device_init(struct host1x_device *device) } EXPORT_SYMBOL(host1x_device_init); +/** + * host1x_device_exit() - uninitialize host1x logical device + * @device: host1x logical device + * + * When the driver for a host1x logical device is unloaded, it can call this + * function to tear down each of its clients. Typically this is done after a + * subsystem-specific data structure is removed and the functionality can no + * longer be used. + */ int host1x_device_exit(struct host1x_device *device) { struct host1x_client *client; @@ -446,6 +471,14 @@ static void host1x_detach_driver(struct host1x *host1x, mutex_unlock(&host1x->devices_lock); } +/** + * host1x_register() - register a host1x controller + * @host1x: host1x controller + * + * The host1x controller driver uses this to register a host1x controller with + * the infrastructure. Note that all Tegra SoC generations have only ever come + * with a single host1x instance, so this function is somewhat academic. + */ int host1x_register(struct host1x *host1x) { struct host1x_driver *driver; @@ -464,6 +497,13 @@ int host1x_register(struct host1x *host1x) return 0; } +/** + * host1x_unregister() - unregister a host1x controller + * @host1x: host1x controller + * + * The host1x controller driver uses this to remove a host1x controller from + * the infrastructure. + */ int host1x_unregister(struct host1x *host1x) { struct host1x_driver *driver; @@ -513,6 +553,16 @@ static void host1x_device_shutdown(struct device *dev) driver->shutdown(device); } +/** + * host1x_driver_register_full() - register a host1x driver + * @driver: host1x driver + * @owner: owner module + * + * Drivers for host1x logical devices call this function to register a driver + * with the infrastructure. Note that since these drive logical devices, the + * registration of the driver actually triggers tho logical device creation. + * A logical device will be created for each host1x instance. + */ int host1x_driver_register_full(struct host1x_driver *driver, struct module *owner) { @@ -541,6 +591,13 @@ int host1x_driver_register_full(struct host1x_driver *driver, } EXPORT_SYMBOL(host1x_driver_register_full); +/** + * host1x_driver_unregister() - unregister a host1x driver + * @driver: host1x driver + * + * Unbinds the driver from each of the host1x logical devices that it is + * bound to, effectively removing the subsystem devices that they represent. + */ void host1x_driver_unregister(struct host1x_driver *driver) { driver_unregister(&driver->driver); @@ -551,6 +608,17 @@ void host1x_driver_unregister(struct host1x_driver *driver) } EXPORT_SYMBOL(host1x_driver_unregister); +/** + * host1x_client_register() - register a host1x client + * @client: host1x client + * + * Registers a host1x client with each host1x controller instance. Note that + * each client will only match their parent host1x controller and will only be + * associated with that instance. Once all clients have been registered with + * their parent host1x controller, the infrastructure will set up the logical + * device and call host1x_device_init(), which will in turn call each client's + * &host1x_client_ops.init implementation. + */ int host1x_client_register(struct host1x_client *client) { struct host1x *host1x; @@ -576,6 +644,13 @@ int host1x_client_register(struct host1x_client *client) } EXPORT_SYMBOL(host1x_client_register); +/** + * host1x_client_unregister() - unregister a host1x client + * @client: host1x client + * + * Removes a host1x client from its host1x controller instance. If a logical + * device has already been initialized, it will be torn down. + */ int host1x_client_unregister(struct host1x_client *client) { struct host1x_client *c; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index 0ac026cdc30c..048ac9e344ce 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -99,14 +99,24 @@ unlock: return NULL; } +/** + * host1x_syncpt_id() - retrieve syncpoint ID + * @sp: host1x syncpoint + * + * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is + * often used as a value to program into registers that control how hardware + * blocks interact with syncpoints. + */ u32 host1x_syncpt_id(struct host1x_syncpt *sp) { return sp->id; } EXPORT_SYMBOL(host1x_syncpt_id); -/* - * Updates the value sent to hardware. +/** + * host1x_syncpt_incr_max() - update the value sent to hardware + * @sp: host1x syncpoint + * @incrs: number of increments */ u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs) { @@ -175,8 +185,9 @@ u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) return sp->base_val; } -/* - * Increment syncpoint value from cpu, updating cache +/** + * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache + * @sp: host1x syncpoint */ int host1x_syncpt_incr(struct host1x_syncpt *sp) { @@ -195,8 +206,12 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) return host1x_syncpt_is_expired(sp, thresh); } -/* - * Main entrypoint for syncpoint value waits. +/** + * host1x_syncpt_wait() - wait for a syncpoint to reach a given value + * @sp: host1x syncpoint + * @thresh: threshold + * @timeout: maximum time to wait for the syncpoint to reach the given value + * @value: return location for the syncpoint value */ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value) @@ -402,6 +417,16 @@ int host1x_syncpt_init(struct host1x *host) return 0; } +/** + * host1x_syncpt_request() - request a syncpoint + * @dev: device requesting the syncpoint + * @flags: flags + * + * host1x client drivers can use this function to allocate a syncpoint for + * subsequent use. A syncpoint returned by this function will be reserved for + * use by the client exclusively. When no longer using a syncpoint, a host1x + * client driver needs to release it using host1x_syncpt_free(). + */ struct host1x_syncpt *host1x_syncpt_request(struct device *dev, unsigned long flags) { @@ -411,6 +436,16 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev, } EXPORT_SYMBOL(host1x_syncpt_request); +/** + * host1x_syncpt_free() - free a requested syncpoint + * @sp: host1x syncpoint + * + * Release a syncpoint previously allocated using host1x_syncpt_request(). A + * host1x client driver should call this when the syncpoint is no longer in + * use. Note that client drivers must ensure that the syncpoint doesn't remain + * under the control of hardware after calling this function, otherwise two + * clients may end up trying to access the same syncpoint concurrently. + */ void host1x_syncpt_free(struct host1x_syncpt *sp) { if (!sp) @@ -438,9 +473,12 @@ void host1x_syncpt_deinit(struct host1x *host) kfree(sp->name); } -/* - * Read max. It indicates how many operations there are in queue, either in - * channel or in a software thread. +/** + * host1x_syncpt_read_max() - read maximum syncpoint value + * @sp: host1x syncpoint + * + * The maximum syncpoint value indicates how many operations there are in + * queue, either in channel or in a software thread. */ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) { @@ -450,8 +488,12 @@ u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) } EXPORT_SYMBOL(host1x_syncpt_read_max); -/* - * Read min, which is a shadow of the current sync point value in hardware. +/** + * host1x_syncpt_read_min() - read minimum syncpoint value + * @sp: host1x syncpoint + * + * The minimum syncpoint value is a shadow of the current sync point value in + * hardware. */ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) { @@ -461,6 +503,10 @@ u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) } EXPORT_SYMBOL(host1x_syncpt_read_min); +/** + * host1x_syncpt_read() - read the current syncpoint value + * @sp: host1x syncpoint + */ u32 host1x_syncpt_read(struct host1x_syncpt *sp) { return host1x_syncpt_load(sp); @@ -482,6 +528,11 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) return host->info->nb_mlocks; } +/** + * host1x_syncpt_get() - obtain a syncpoint by ID + * @host: host1x controller + * @id: syncpoint ID + */ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) { if (id >= host->info->nb_pts) @@ -491,12 +542,20 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) } EXPORT_SYMBOL(host1x_syncpt_get); +/** + * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint + * @sp: host1x syncpoint + */ struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) { return sp ? sp->base : NULL; } EXPORT_SYMBOL(host1x_syncpt_get_base); +/** + * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base + * @base: host1x syncpoint wait base + */ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) { return base->id; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 3d04aa1dc83e..840a8ad627b2 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -32,11 +32,27 @@ enum host1x_class { struct host1x_client; +/** + * struct host1x_client_ops - host1x client operations + * @init: host1x client initialization code + * @exit: host1x client tear down code + */ struct host1x_client_ops { int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); }; +/** + * struct host1x_client - host1x client structure + * @list: list node for the host1x client + * @parent: pointer to struct device representing the host1x controller + * @dev: pointer to struct device backing this host1x client + * @ops: host1x client operations + * @class: host1x class represented by this client + * @channel: host1x channel associated with this client + * @syncpts: array of syncpoints requested for this client + * @num_syncpts: number of syncpoints requested for this client + */ struct host1x_client { struct list_head list; struct device *parent; @@ -251,6 +267,15 @@ void host1x_job_unpin(struct host1x_job *job); struct host1x_device; +/** + * struct host1x_driver - host1x logical device driver + * @driver: core driver + * @subdevs: table of OF device IDs matching subdevices for this driver + * @list: list node for the driver + * @probe: called when the host1x logical device is probed + * @remove: called when the host1x logical device is removed + * @shutdown: called when the host1x logical device is shut down + */ struct host1x_driver { struct device_driver driver; -- cgit v1.2.3 From d0fbbdff2e19aabccc1107b7e12ab9f3cbf626ef Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 15 Jun 2017 02:18:27 +0300 Subject: drm/tegra: Correct copying of waitchecks and disable them in the 'submit' IOCTL The waitchecks along with multiple syncpoints per submit are not ready for use yet, let's forbid them for now. Signed-off-by: Dmitry Osipenko Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/gpu/drm/tegra/drm.c | 60 ++++++++++++++++++++++++++++++++++++++++++--- drivers/gpu/host1x/job.h | 7 ------ include/linux/host1x.h | 7 ++++++ 3 files changed, 63 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 51e20e015053..0928f2bb4203 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -349,6 +349,36 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, return 0; } +static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest, + struct drm_tegra_waitchk __user *src, + struct drm_file *file) +{ + u32 cmdbuf; + int err; + + err = get_user(cmdbuf, &src->handle); + if (err < 0) + return err; + + err = get_user(dest->offset, &src->offset); + if (err < 0) + return err; + + err = get_user(dest->syncpt_id, &src->syncpt); + if (err < 0) + return err; + + err = get_user(dest->thresh, &src->thresh); + if (err < 0) + return err; + + dest->bo = host1x_bo_lookup(file, cmdbuf); + if (!dest->bo) + return -ENOENT; + + return 0; +} + int tegra_drm_submit(struct tegra_drm_context *context, struct drm_tegra_submit *args, struct drm_device *drm, struct drm_file *file) @@ -370,6 +400,10 @@ int tegra_drm_submit(struct tegra_drm_context *context, if (args->num_syncpts != 1) return -EINVAL; + /* We don't yet support waitchks */ + if (args->num_waitchks != 0) + return -EINVAL; + job = host1x_job_alloc(context->channel, args->num_cmdbufs, args->num_relocs, args->num_waitchks); if (!job) @@ -458,10 +492,28 @@ int tegra_drm_submit(struct tegra_drm_context *context, } } - if (copy_from_user(job->waitchk, waitchks, - sizeof(*waitchks) * num_waitchks)) { - err = -EFAULT; - goto fail; + /* copy and resolve waitchks from submit */ + while (num_waitchks--) { + struct host1x_waitchk *wait = &job->waitchk[num_waitchks]; + struct tegra_bo *obj; + + err = host1x_waitchk_copy_from_user(wait, + &waitchks[num_waitchks], + file); + if (err < 0) + goto fail; + + obj = host1x_to_tegra_bo(wait->bo); + + /* + * The unaligned offset will cause an unaligned write during + * of the waitchks patching, corrupting the commands stream. + */ + if (wait->offset & 3 || + wait->offset >= obj->gem.size) { + err = -EINVAL; + goto fail; + } } if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h index 878239c476d2..0debd93a1849 100644 --- a/drivers/gpu/host1x/job.h +++ b/drivers/gpu/host1x/job.h @@ -34,13 +34,6 @@ struct host1x_cmdbuf { u32 pad; }; -struct host1x_waitchk { - struct host1x_bo *bo; - u32 offset; - u32 syncpt_id; - u32 thresh; -}; - struct host1x_job_unpin_data { struct host1x_bo *bo; struct sg_table *sgt; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 840a8ad627b2..ba0b245da732 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -193,6 +193,13 @@ struct host1x_reloc { unsigned long shift; }; +struct host1x_waitchk { + struct host1x_bo *bo; + u32 offset; + u32 syncpt_id; + u32 thresh; +}; + struct host1x_job { /* When refcount goes to zero, job can be freed */ struct kref ref; -- cgit v1.2.3 From 0f563a4bf66e5182f0882efee398f7e6bc0bb1be Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 15 Jun 2017 02:18:37 +0300 Subject: gpu: host1x: Forbid unrelated SETCLASS opcode in the firewall Several channels could be made to write the same unit concurrently via the SETCLASS opcode, trusting userspace is a bad idea. It should be possible to drop the per-client channel reservation and add a per-unit locking by inserting MLOCK's to the command stream to re-allow the SETCLASS opcode, but it will be much more work. Let's forbid the unit-unrelated class changes for now. Signed-off-by: Dmitry Osipenko Reviewed-by: Erik Faye-Lund Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/gpu/drm/tegra/drm.c | 1 + drivers/gpu/drm/tegra/drm.h | 1 + drivers/gpu/drm/tegra/gr2d.c | 7 +++++++ drivers/gpu/host1x/job.c | 24 ++++++++++++++++++++---- include/linux/host1x.h | 3 +++ 5 files changed, 32 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index b44f1eddb570..4a46ba846a0f 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -532,6 +532,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, } job->is_addr_reg = context->client->ops->is_addr_reg; + job->is_valid_class = context->client->ops->is_valid_class; job->syncpt_incrs = syncpt.incrs; job->syncpt_id = syncpt.id; job->timeout = 10000; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 85aa2e3d9d4e..6d6da01282f3 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -83,6 +83,7 @@ struct tegra_drm_client_ops { struct tegra_drm_context *context); void (*close_channel)(struct tegra_drm_context *context); int (*is_addr_reg)(struct device *dev, u32 class, u32 offset); + int (*is_valid_class)(u32 class); int (*submit)(struct tegra_drm_context *context, struct drm_tegra_submit *args, struct drm_device *drm, struct drm_file *file); diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 02cd3e37a6ec..fbe0b8b25b42 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -109,10 +109,17 @@ static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset) return 0; } +static int gr2d_is_valid_class(u32 class) +{ + return (class == HOST1X_CLASS_GR2D || + class == HOST1X_CLASS_GR2D_SB); +} + static const struct tegra_drm_client_ops gr2d_ops = { .open_channel = gr2d_open_channel, .close_channel = gr2d_close_channel, .is_addr_reg = gr2d_is_addr_reg, + .is_valid_class = gr2d_is_valid_class, .submit = tegra_drm_submit, }; diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 54230ec4f81e..ef746f7afb88 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -356,6 +356,9 @@ struct host1x_firewall { static int check_register(struct host1x_firewall *fw, unsigned long offset) { + if (!fw->job->is_addr_reg) + return 0; + if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) { if (!fw->num_relocs) return -EINVAL; @@ -370,6 +373,19 @@ static int check_register(struct host1x_firewall *fw, unsigned long offset) return 0; } +static int check_class(struct host1x_firewall *fw, u32 class) +{ + if (!fw->job->is_valid_class) { + if (fw->class != class) + return -EINVAL; + } else { + if (!fw->job->is_valid_class(fw->class)) + return -EINVAL; + } + + return 0; +} + static int check_mask(struct host1x_firewall *fw) { u32 mask = fw->mask; @@ -443,11 +459,9 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) { u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped + (g->offset / sizeof(u32)); + u32 job_class = fw->class; int err = 0; - if (!fw->job->is_addr_reg) - return 0; - fw->words = g->words; fw->cmdbuf = g->bo; fw->offset = 0; @@ -467,7 +481,9 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) fw->class = word >> 6 & 0x3ff; fw->mask = word & 0x3f; fw->reg = word >> 16 & 0xfff; - err = check_mask(fw); + err = check_class(fw, job_class); + if (!err) + err = check_mask(fw); if (err) goto out; break; diff --git a/include/linux/host1x.h b/include/linux/host1x.h index ba0b245da732..b5358f855d9e 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -251,6 +251,9 @@ struct host1x_job { /* Check if register is marked as an address reg */ int (*is_addr_reg)(struct device *dev, u32 reg, u32 class); + /* Check if class belongs to the unit */ + int (*is_valid_class)(u32 class); + /* Request a SETCLASS to this class */ u32 class; -- cgit v1.2.3 From a2b78b0d53f0808ebc2a0368b589a5cb6b672294 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 15 Jun 2017 02:18:38 +0300 Subject: gpu: host1x: Correct swapped arguments in the is_addr_reg() definition Arguments of the .is_addr_reg() are swapped in the definition of the function, that is quite confusing. Signed-off-by: Dmitry Osipenko Reviewed-by: Erik Faye-Lund Reviewed-by: Mikko Perttunen Signed-off-by: Thierry Reding --- include/linux/host1x.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/host1x.h b/include/linux/host1x.h index b5358f855d9e..476da0e06bb2 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -249,7 +249,7 @@ struct host1x_job { u8 *gather_copy_mapped; /* Check if register is marked as an address reg */ - int (*is_addr_reg)(struct device *dev, u32 reg, u32 class); + int (*is_addr_reg)(struct device *dev, u32 class, u32 reg); /* Check if class belongs to the unit */ int (*is_valid_class)(u32 class); -- cgit v1.2.3 From 8474b02531c4881a762c52ef869c52429e38633f Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Thu, 15 Jun 2017 02:18:42 +0300 Subject: gpu: host1x: Refactor channel allocation code This is largely a rewrite of the Host1x channel allocation code, bringing several changes: - The previous code could deadlock due to an interaction between the 'reflock' mutex and CDMA timeout handling. This gets rid of the mutex. - Support for more than 32 channels, required for Tegra186 - General refactoring, including better encapsulation of channel ownership handling into channel.c Signed-off-by: Mikko Perttunen Reviewed-by: Dmitry Osipenko Tested-by: Dmitry Osipenko Signed-off-by: Thierry Reding --- drivers/gpu/drm/tegra/gr2d.c | 4 +- drivers/gpu/drm/tegra/gr3d.c | 4 +- drivers/gpu/drm/tegra/vic.c | 4 +- drivers/gpu/host1x/channel.c | 147 +++++++++++++++++++++++-------------- drivers/gpu/host1x/channel.h | 21 ++++-- drivers/gpu/host1x/debug.c | 47 +++++------- drivers/gpu/host1x/dev.c | 7 +- drivers/gpu/host1x/dev.h | 6 +- drivers/gpu/host1x/hw/channel_hw.c | 4 - include/linux/host1x.h | 1 - 10 files changed, 135 insertions(+), 110 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index fbe0b8b25b42..6ea070da7718 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -38,7 +38,7 @@ static int gr2d_init(struct host1x_client *client) client->syncpts[0] = host1x_syncpt_request(client->dev, flags); if (!client->syncpts[0]) { - host1x_channel_free(gr2d->channel); + host1x_channel_put(gr2d->channel); return -ENOMEM; } @@ -57,7 +57,7 @@ static int gr2d_exit(struct host1x_client *client) return err; host1x_syncpt_free(client->syncpts[0]); - host1x_channel_free(gr2d->channel); + host1x_channel_put(gr2d->channel); return 0; } diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 13f0d1b7cd98..cee2ab645cde 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -48,7 +48,7 @@ static int gr3d_init(struct host1x_client *client) client->syncpts[0] = host1x_syncpt_request(client->dev, flags); if (!client->syncpts[0]) { - host1x_channel_free(gr3d->channel); + host1x_channel_put(gr3d->channel); return -ENOMEM; } @@ -67,7 +67,7 @@ static int gr3d_exit(struct host1x_client *client) return err; host1x_syncpt_free(client->syncpts[0]); - host1x_channel_free(gr3d->channel); + host1x_channel_put(gr3d->channel); return 0; } diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index cd804e404a11..47cb1aaa58b1 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -182,7 +182,7 @@ static int vic_init(struct host1x_client *client) free_syncpt: host1x_syncpt_free(client->syncpts[0]); free_channel: - host1x_channel_free(vic->channel); + host1x_channel_put(vic->channel); detach_device: if (tegra->domain) iommu_detach_device(tegra->domain, vic->dev); @@ -203,7 +203,7 @@ static int vic_exit(struct host1x_client *client) return err; host1x_syncpt_free(client->syncpts[0]); - host1x_channel_free(vic->channel); + host1x_channel_put(vic->channel); if (vic->domain) { iommu_detach_device(vic->domain, vic->dev); diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 8f437d924c10..db9b91d1384c 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -24,19 +24,33 @@ #include "job.h" /* Constructor for the host1x device list */ -int host1x_channel_list_init(struct host1x *host) +int host1x_channel_list_init(struct host1x_channel_list *chlist, + unsigned int num_channels) { - INIT_LIST_HEAD(&host->chlist.list); - mutex_init(&host->chlist_mutex); - - if (host->info->nb_channels > BITS_PER_LONG) { - WARN(1, "host1x hardware has more channels than supported by the driver\n"); - return -ENOSYS; + chlist->channels = kcalloc(num_channels, sizeof(struct host1x_channel), + GFP_KERNEL); + if (!chlist->channels) + return -ENOMEM; + + chlist->allocated_channels = + kcalloc(BITS_TO_LONGS(num_channels), sizeof(unsigned long), + GFP_KERNEL); + if (!chlist->allocated_channels) { + kfree(chlist->channels); + return -ENOMEM; } + bitmap_zero(chlist->allocated_channels, num_channels); + return 0; } +void host1x_channel_list_free(struct host1x_channel_list *chlist) +{ + kfree(chlist->allocated_channels); + kfree(chlist->channels); +} + int host1x_job_submit(struct host1x_job *job) { struct host1x *host = dev_get_drvdata(job->channel->dev->parent); @@ -47,86 +61,107 @@ EXPORT_SYMBOL(host1x_job_submit); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel) { - int err = 0; + kref_get(&channel->refcount); - mutex_lock(&channel->reflock); + return channel; +} +EXPORT_SYMBOL(host1x_channel_get); - if (channel->refcount == 0) - err = host1x_cdma_init(&channel->cdma); +/** + * host1x_channel_get_index() - Attempt to get channel reference by index + * @host: Host1x device object + * @index: Index of channel + * + * If channel number @index is currently allocated, increase its refcount + * and return a pointer to it. Otherwise, return NULL. + */ +struct host1x_channel *host1x_channel_get_index(struct host1x *host, + unsigned int index) +{ + struct host1x_channel *ch = &host->channel_list.channels[index]; - if (!err) - channel->refcount++; + if (!kref_get_unless_zero(&ch->refcount)) + return NULL; - mutex_unlock(&channel->reflock); + return ch; +} + +static void release_channel(struct kref *kref) +{ + struct host1x_channel *channel = + container_of(kref, struct host1x_channel, refcount); + struct host1x *host = dev_get_drvdata(channel->dev->parent); + struct host1x_channel_list *chlist = &host->channel_list; + + host1x_hw_cdma_stop(host, &channel->cdma); + host1x_cdma_deinit(&channel->cdma); - return err ? NULL : channel; + clear_bit(channel->id, chlist->allocated_channels); } -EXPORT_SYMBOL(host1x_channel_get); void host1x_channel_put(struct host1x_channel *channel) { - mutex_lock(&channel->reflock); + kref_put(&channel->refcount, release_channel); +} +EXPORT_SYMBOL(host1x_channel_put); - if (channel->refcount == 1) { - struct host1x *host = dev_get_drvdata(channel->dev->parent); +static struct host1x_channel *acquire_unused_channel(struct host1x *host) +{ + struct host1x_channel_list *chlist = &host->channel_list; + unsigned int max_channels = host->info->nb_channels; + unsigned int index; - host1x_hw_cdma_stop(host, &channel->cdma); - host1x_cdma_deinit(&channel->cdma); + index = find_first_zero_bit(chlist->allocated_channels, max_channels); + if (index >= max_channels) { + dev_err(host->dev, "failed to find free channel\n"); + return NULL; } - channel->refcount--; + chlist->channels[index].id = index; - mutex_unlock(&channel->reflock); + set_bit(index, chlist->allocated_channels); + + return &chlist->channels[index]; } -EXPORT_SYMBOL(host1x_channel_put); +/** + * host1x_channel_request() - Allocate a channel + * @device: Host1x unit this channel will be used to send commands to + * + * Allocates a new host1x channel for @device. If there are no free channels, + * this will sleep until one becomes available. May return NULL if CDMA + * initialization fails. + */ struct host1x_channel *host1x_channel_request(struct device *dev) { struct host1x *host = dev_get_drvdata(dev->parent); - unsigned int max_channels = host->info->nb_channels; - struct host1x_channel *channel = NULL; - unsigned long index; + struct host1x_channel_list *chlist = &host->channel_list; + struct host1x_channel *channel; int err; - mutex_lock(&host->chlist_mutex); + channel = acquire_unused_channel(host); + if (!channel) + return NULL; - index = find_first_zero_bit(&host->allocated_channels, max_channels); - if (index >= max_channels) - goto fail; + kref_init(&channel->refcount); + mutex_init(&channel->submitlock); + channel->dev = dev; - channel = kzalloc(sizeof(*channel), GFP_KERNEL); - if (!channel) + err = host1x_hw_channel_init(host, channel, channel->id); + if (err < 0) goto fail; - err = host1x_hw_channel_init(host, channel, index); + err = host1x_cdma_init(&channel->cdma); if (err < 0) goto fail; - /* Link device to host1x_channel */ - channel->dev = dev; - - /* Add to channel list */ - list_add_tail(&channel->list, &host->chlist.list); - - host->allocated_channels |= BIT(index); - - mutex_unlock(&host->chlist_mutex); return channel; fail: - dev_err(dev, "failed to init channel\n"); - kfree(channel); - mutex_unlock(&host->chlist_mutex); - return NULL; -} -EXPORT_SYMBOL(host1x_channel_request); + clear_bit(channel->id, chlist->allocated_channels); -void host1x_channel_free(struct host1x_channel *channel) -{ - struct host1x *host = dev_get_drvdata(channel->dev->parent); + dev_err(dev, "failed to initialize channel\n"); - host->allocated_channels &= ~BIT(channel->id); - list_del(&channel->list); - kfree(channel); + return NULL; } -EXPORT_SYMBOL(host1x_channel_free); +EXPORT_SYMBOL(host1x_channel_request); diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h index df767cf90d51..7068e42d42df 100644 --- a/drivers/gpu/host1x/channel.h +++ b/drivers/gpu/host1x/channel.h @@ -20,17 +20,21 @@ #define __HOST1X_CHANNEL_H #include +#include #include "cdma.h" struct host1x; +struct host1x_channel; -struct host1x_channel { - struct list_head list; +struct host1x_channel_list { + struct host1x_channel *channels; + unsigned long *allocated_channels; +}; - unsigned int refcount; +struct host1x_channel { + struct kref refcount; unsigned int id; - struct mutex reflock; struct mutex submitlock; void __iomem *regs; struct device *dev; @@ -38,9 +42,10 @@ struct host1x_channel { }; /* channel list operations */ -int host1x_channel_list_init(struct host1x *host); - -#define host1x_for_each_channel(host, channel) \ - list_for_each_entry(channel, &host->chlist.list, list) +int host1x_channel_list_init(struct host1x_channel_list *chlist, + unsigned int num_channels); +void host1x_channel_list_free(struct host1x_channel_list *chlist); +struct host1x_channel *host1x_channel_get_index(struct host1x *host, + unsigned int index); #endif diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index d9330fcc62ad..2aae0e63214c 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -43,24 +43,19 @@ void host1x_debug_output(struct output *o, const char *fmt, ...) o->fn(o->ctx, o->buf, len); } -static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo) +static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) { struct host1x *m = dev_get_drvdata(ch->dev->parent); struct output *o = data; - mutex_lock(&ch->reflock); + mutex_lock(&ch->cdma.lock); - if (ch->refcount) { - mutex_lock(&ch->cdma.lock); + if (show_fifo) + host1x_hw_show_channel_fifo(m, ch, o); - if (show_fifo) - host1x_hw_show_channel_fifo(m, ch, o); + host1x_hw_show_channel_cdma(m, ch, o); - host1x_hw_show_channel_cdma(m, ch, o); - mutex_unlock(&ch->cdma.lock); - } - - mutex_unlock(&ch->reflock); + mutex_unlock(&ch->cdma.lock); return 0; } @@ -94,28 +89,22 @@ static void show_syncpts(struct host1x *m, struct output *o) host1x_debug_output(o, "\n"); } -static void show_all(struct host1x *m, struct output *o) +static void show_all(struct host1x *m, struct output *o, bool show_fifo) { - struct host1x_channel *ch; + int i; host1x_hw_show_mlocks(m, o); show_syncpts(m, o); host1x_debug_output(o, "---- channels ----\n"); - host1x_for_each_channel(m, ch) - show_channels(ch, o, true); -} - -static void show_all_no_fifo(struct host1x *host1x, struct output *o) -{ - struct host1x_channel *ch; - - host1x_hw_show_mlocks(host1x, o); - show_syncpts(host1x, o); - host1x_debug_output(o, "---- channels ----\n"); + for (i = 0; i < m->info->nb_channels; ++i) { + struct host1x_channel *ch = host1x_channel_get_index(m, i); - host1x_for_each_channel(host1x, ch) - show_channels(ch, o, false); + if (ch) { + show_channel(ch, o, show_fifo); + host1x_channel_put(ch); + } + } } static int host1x_debug_show_all(struct seq_file *s, void *unused) @@ -125,7 +114,7 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused) .ctx = s }; - show_all(s->private, &o); + show_all(s->private, &o, true); return 0; } @@ -137,7 +126,7 @@ static int host1x_debug_show(struct seq_file *s, void *unused) .ctx = s }; - show_all_no_fifo(s->private, &o); + show_all(s->private, &o, false); return 0; } @@ -216,7 +205,7 @@ void host1x_debug_dump(struct host1x *host1x) .fn = write_to_printk }; - show_all(host1x, &o); + show_all(host1x, &o, true); } void host1x_debug_dump_syncpts(struct host1x *host1x) diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f05ebb14fa63..5c1c711a21af 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -198,7 +198,8 @@ static int host1x_probe(struct platform_device *pdev) host->iova_end = geometry->aperture_end; } - err = host1x_channel_list_init(host); + err = host1x_channel_list_init(&host->channel_list, + host->info->nb_channels); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); goto fail_detach_device; @@ -207,7 +208,7 @@ static int host1x_probe(struct platform_device *pdev) err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); - goto fail_detach_device; + goto fail_free_channels; } err = reset_control_deassert(host->rst); @@ -244,6 +245,8 @@ fail_reset_assert: reset_control_assert(host->rst); fail_unprepare_disable: clk_disable_unprepare(host->clk); +fail_free_channels: + host1x_channel_list_free(&host->channel_list); fail_detach_device: if (host->domain) { put_iova_domain(&host->iova); diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 229d08b6a45e..ffdbc15b749b 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -129,10 +129,8 @@ struct host1x { struct host1x_syncpt *nop_sp; struct mutex syncpt_mutex; - struct mutex chlist_mutex; - struct host1x_channel chlist; - unsigned long allocated_channels; - unsigned int num_allocated_channels; + + struct host1x_channel_list channel_list; struct dentry *debugfs; diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 5e8df78b7acd..8447a56c41ca 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -181,10 +181,6 @@ error: static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, unsigned int index) { - ch->id = index; - mutex_init(&ch->reflock); - mutex_init(&ch->submitlock); - ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE; return 0; } diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 476da0e06bb2..630b1a98ab58 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -172,7 +172,6 @@ struct host1x_channel; struct host1x_job; struct host1x_channel *host1x_channel_request(struct device *dev); -void host1x_channel_free(struct host1x_channel *channel); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); void host1x_channel_put(struct host1x_channel *channel); int host1x_job_submit(struct host1x_job *job); -- cgit v1.2.3 From 1be7107fbe18eed3e319a6c3e83c78254b693acb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2017 04:03:24 -0700 Subject: mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov Original-patch-by: Michal Hocko Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Tested-by: Helge Deller # parisc Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 7 ++ arch/arc/mm/mmap.c | 2 +- arch/arm/mm/mmap.c | 4 +- arch/frv/mm/elf-fdpic.c | 2 +- arch/mips/mm/mmap.c | 2 +- arch/parisc/kernel/sys_parisc.c | 15 ++- arch/powerpc/mm/hugetlbpage-radix.c | 2 +- arch/powerpc/mm/mmap.c | 4 +- arch/powerpc/mm/slice.c | 2 +- arch/s390/mm/mmap.c | 4 +- arch/sh/mm/mmap.c | 4 +- arch/sparc/kernel/sys_sparc_64.c | 4 +- arch/sparc/mm/hugetlbpage.c | 2 +- arch/tile/mm/hugetlbpage.c | 2 +- arch/x86/kernel/sys_x86_64.c | 4 +- arch/x86/mm/hugetlbpage.c | 2 +- arch/xtensa/kernel/syscall.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/proc/task_mmu.c | 4 - include/linux/mm.h | 53 ++++----- mm/gup.c | 5 - mm/memory.c | 38 ------ mm/mmap.c | 149 ++++++++++++++---------- 23 files changed, 152 insertions(+), 163 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0f5c3b4347c6..7737ab5d04b2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3811,6 +3811,13 @@ expediting. Set to zero to disable automatic expediting. + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 3e25e8d6486b..2e13683dfb24 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2239fde10b80..f0701d8d24df 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index da82c25301e7..46aa289c5102 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 64dd8bdd92c3..28adeabe851f 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index e5288638a1d9..378a754ca186 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; @@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } @@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; @@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 6575b9aabef4..a12e86395025 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 9dbd2a733d6b..0ee6be4f1ba4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 966b9fccfa66..45f6740dd407 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b017daed6887..b854b1da281a 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } @@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 08e7af0be4a7..6a1a1297baae 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index ef4520efc813..043544d0cda3 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7c29d38e6b99..88855e383b34 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index cb10153b5c9f..03e5cc4e76e4 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 207b8f2582c7..213ddf3e937d 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 302f43fd9c28..adad702b39cd 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 06937928cb72..74afbf02d07e 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dde861387a40..d44f5456eb9b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0c8b33d99b1..520802da059c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/include/linux/mm.h b/include/linux/mm.h index b892e95d4929..6f543a47fc92 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; @@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma); static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } #endif -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, @@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/gup.c b/mm/gup.c index b3c7214d710d..576c4df58882 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; - /* For mm_populate(), just skip the stack guard page. */ - if ((*flags & FOLL_POPULATE) && - (stack_guard_page_start(vma, address) || - stack_guard_page_end(vma, address + PAGE_SIZE))) - return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) diff --git a/mm/memory.c b/mm/memory.c index 2e65df1831d9..bb11c474857e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2854,40 +2854,6 @@ out_release: return ret; } -/* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, vmf->address) < 0) - return VM_FAULT_SIGSEGV; - /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created diff --git a/mm/mmap.c b/mm/mmap.c index f82741e199c0..8e07976d5e47 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; LIST_HEAD(uf); @@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -253,10 +255,22 @@ out: static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm) anon_vma_unlock_read(anon_vma); } - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -856,7 +870,7 @@ again: vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -941,7 +955,7 @@ again: * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ - VM_WARN_ON(mm->highest_vm_end != end); + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) @@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1798,7 +1812,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) @@ -1825,8 +1839,8 @@ check_current: vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1890,7 +1904,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1903,7 +1917,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) @@ -1929,7 +1943,7 @@ check_current: struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1967,7 +1981,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1978,9 +1992,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2003,7 +2018,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -2018,9 +2033,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2155,21 +2171,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2207,17 +2221,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *next; + unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else + address &= PAGE_MASK; + address += PAGE_SIZE; + if (!address) return -ENOMEM; + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + if (gap_addr < address) + return -ENOMEM; + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2261,7 +2288,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = address; + mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2282,6 +2309,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + unsigned long gap_addr; int error; address &= PAGE_MASK; @@ -2289,6 +2318,17 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2343,28 +2383,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL< Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner --- include/linux/timekeeper_internal.h | 4 ++-- kernel/time/timekeeping.c | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e9834ada4d0c..f7043ccca81c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,7 +57,7 @@ struct tk_read_base { * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -99,7 +99,7 @@ struct timekeeper { u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index eff94cb8e89e..b602c48cb841 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -280,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = (interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { @@ -1996,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, u32 shift, unsigned int *clock_set) { u64 interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -2011,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_time.tv_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; -- cgit v1.2.3 From 8e8320c9315c47a6a090188720ccff32a6a6ba18 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 20 Jun 2017 17:56:13 -0600 Subject: blk-mq: fix performance regression with shared tags If we have shared tags enabled, then every IO completion will trigger a full loop of every queue belonging to a tag set, and every hardware queue for each of those queues, even if nothing needs to be done. This causes a massive performance regression if you have a lot of shared devices. Instead of doing this huge full scan on every IO, add an atomic counter to the main queue that tracks how many hardware queues have been marked as needing a restart. With that, we can avoid looking for restartable queues, if we don't have to. Max reports that this restores performance. Before this patch, 4K IOPS was limited to 22-23K IOPS. With the patch, we are running at 950-970K IOPS. Fixes: 6d8c6c0f97ad ("blk-mq: Restart a single queue if tag sets are shared") Reported-by: Max Gurtovoy Tested-by: Max Gurtovoy Reviewed-by: Bart Van Assche Tested-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 58 +++++++++++++++++++++++++++++++++++++++----------- block/blk-mq-sched.h | 9 -------- block/blk-mq.c | 16 +++++++++++--- include/linux/blkdev.h | 2 ++ 4 files changed, 61 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1f5b692526ae..0ded5e846335 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, __blk_mq_sched_assign_ioc(q, rq, bio, ioc); } +/* + * Mark a hardware queue as needing a restart. For shared queues, maintain + * a count of how many hardware queues are marked for restart. + */ +static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); + } else + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); +} + +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return false; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); + } else + clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + + if (blk_mq_hctx_has_pending(hctx)) { + blk_mq_run_hw_queue(hctx, true); + return true; + } + + return false; +} + struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, @@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { - clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) { - blk_mq_run_hw_queue(hctx, true); - return true; - } - } - return false; -} - /** * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * @pos: loop cursor. @@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) unsigned int i, j; if (set->flags & BLK_MQ_F_TAG_SHARED) { + /* + * If this is 0, then we know that no hardware queues + * have RESTART marked. We're done. + */ + if (!atomic_read(&queue->shared_hctx_restart)) + return; + rcu_read_lock(); list_for_each_entry_rcu_rr(q, queue, &set->tag_list, tag_set_list) { diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index edafb5383b7b..5007edece51a 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) return false; } -/* - * Mark a hardware queue as needing a restart. - */ -static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..958cedaff8b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q, } } +/* + * Caller needs to ensure that we're either frozen/quiesced, or that + * the queue isn't live yet. + */ static void queue_set_hctx_shared(struct request_queue *q, bool shared) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { - if (shared) + if (shared) { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); hctx->flags |= BLK_MQ_F_TAG_SHARED; - else + } else { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } } } -static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, + bool shared) { struct request_queue *q; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b74a3edcb3da..1ddd36bd2173 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -391,6 +391,8 @@ struct request_queue { int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + atomic_t shared_hctx_restart; + struct blk_queue_stats *stats; struct rq_wb *rq_wb; -- cgit v1.2.3 From 3b7b314053d021601940c50b07f5f1423ae67e21 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 23 Jun 2017 15:08:52 -0700 Subject: slub: make sysfs file removal asynchronous Commit bf5eb3de3847 ("slub: separate out sysfs_slab_release() from sysfs_slab_remove()") made slub sysfs file removals synchronous to kmem_cache shutdown. Unfortunately, this created a possible ABBA deadlock between slab_mutex and sysfs draining mechanism triggering the following lockdep warning. ====================================================== [ INFO: possible circular locking dependency detected ] 4.10.0-test+ #48 Not tainted ------------------------------------------------------- rmmod/1211 is trying to acquire lock: (s_active#120){++++.+}, at: [] kernfs_remove+0x23/0x40 but task is already holding lock: (slab_mutex){+.+.+.}, at: [] kmem_cache_destroy+0x41/0x2d0 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (slab_mutex){+.+.+.}: lock_acquire+0xf6/0x1f0 __mutex_lock+0x75/0x950 mutex_lock_nested+0x1b/0x20 slab_attr_store+0x75/0xd0 sysfs_kf_write+0x45/0x60 kernfs_fop_write+0x13c/0x1c0 __vfs_write+0x28/0x120 vfs_write+0xc8/0x1e0 SyS_write+0x49/0xa0 entry_SYSCALL_64_fastpath+0x1f/0xc2 -> #0 (s_active#120){++++.+}: __lock_acquire+0x10ed/0x1260 lock_acquire+0xf6/0x1f0 __kernfs_remove+0x254/0x320 kernfs_remove+0x23/0x40 sysfs_remove_dir+0x51/0x80 kobject_del+0x18/0x50 __kmem_cache_shutdown+0x3e6/0x460 kmem_cache_destroy+0x1fb/0x2d0 kvm_exit+0x2d/0x80 [kvm] vmx_exit+0x19/0xa1b [kvm_intel] SyS_delete_module+0x198/0x1f0 entry_SYSCALL_64_fastpath+0x1f/0xc2 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(slab_mutex); lock(s_active#120); lock(slab_mutex); lock(s_active#120); *** DEADLOCK *** 2 locks held by rmmod/1211: #0: (cpu_hotplug.dep_map){++++++}, at: [] get_online_cpus+0x37/0x80 #1: (slab_mutex){+.+.+.}, at: [] kmem_cache_destroy+0x41/0x2d0 stack backtrace: CPU: 3 PID: 1211 Comm: rmmod Not tainted 4.10.0-test+ #48 Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01 v02.05 05/07/2012 Call Trace: print_circular_bug+0x1be/0x210 __lock_acquire+0x10ed/0x1260 lock_acquire+0xf6/0x1f0 __kernfs_remove+0x254/0x320 kernfs_remove+0x23/0x40 sysfs_remove_dir+0x51/0x80 kobject_del+0x18/0x50 __kmem_cache_shutdown+0x3e6/0x460 kmem_cache_destroy+0x1fb/0x2d0 kvm_exit+0x2d/0x80 [kvm] vmx_exit+0x19/0xa1b [kvm_intel] SyS_delete_module+0x198/0x1f0 ? SyS_delete_module+0x5/0x1f0 entry_SYSCALL_64_fastpath+0x1f/0xc2 It'd be the cleanest to deal with the issue by removing sysfs files without holding slab_mutex before the rest of shutdown; however, given the current code structure, it is pretty difficult to do so. This patch punts sysfs file removal to a work item. Before commit bf5eb3de3847, the removal was punted to a RCU delayed work item which is executed after release. Now, we're punting to a different work item on shutdown which still maintains the goal removing the sysfs files earlier when destroying kmem_caches. Link: http://lkml.kernel.org/r/20170620204512.GI21326@htj.duckdns.org Fixes: bf5eb3de3847 ("slub: separate out sysfs_slab_release() from sysfs_slab_remove()") Signed-off-by: Tejun Heo Reported-by: Steven Rostedt (VMware) Tested-by: Steven Rostedt (VMware) Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 1 + mm/slub.c | 40 ++++++++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 07ef550c6627..93315d6b21a8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -84,6 +84,7 @@ struct kmem_cache { int red_left_pad; /* Left redzone padding size */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ + struct work_struct kobj_remove_work; #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; diff --git a/mm/slub.c b/mm/slub.c index 7449593fca72..8addc535bcdc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s) return name; } +static void sysfs_slab_remove_workfn(struct work_struct *work) +{ + struct kmem_cache *s = + container_of(work, struct kmem_cache, kobj_remove_work); + + if (!s->kobj.state_in_sysfs) + /* + * For a memcg cache, this may be called during + * deactivation and again on shutdown. Remove only once. + * A cache is never shut down before deactivation is + * complete, so no need to worry about synchronization. + */ + return; + +#ifdef CONFIG_MEMCG + kset_unregister(s->memcg_kset); +#endif + kobject_uevent(&s->kobj, KOBJ_REMOVE); + kobject_del(&s->kobj); + kobject_put(&s->kobj); +} + static int sysfs_slab_add(struct kmem_cache *s) { int err; @@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s) struct kset *kset = cache_kset(s); int unmergeable = slab_unmergeable(s); + INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); + if (!kset) { kobject_init(&s->kobj, &slab_ktype); return 0; @@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s) */ return; - if (!s->kobj.state_in_sysfs) - /* - * For a memcg cache, this may be called during - * deactivation and again on shutdown. Remove only once. - * A cache is never shut down before deactivation is - * complete, so no need to worry about synchronization. - */ - return; - -#ifdef CONFIG_MEMCG - kset_unregister(s->memcg_kset); -#endif - kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); + kobject_get(&s->kobj); + schedule_work(&s->kobj_remove_work); } void sysfs_slab_release(struct kmem_cache *s) -- cgit v1.2.3