From 5f66f73b9ff4dcabd4e2405ba9c32e80e02f9408 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 9 Mar 2021 09:39:03 +0300 Subject: coccinelle: misc: add minmax script Check for opencoded min(), max() implementations. Signed-off-by: Denis Efremov Signed-off-by: Julia Lawall --- scripts/coccinelle/misc/minmax.cocci | 206 +++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 scripts/coccinelle/misc/minmax.cocci (limited to 'scripts') diff --git a/scripts/coccinelle/misc/minmax.cocci b/scripts/coccinelle/misc/minmax.cocci new file mode 100644 index 000000000000..eccdd3eb3452 --- /dev/null +++ b/scripts/coccinelle/misc/minmax.cocci @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/// +/// Check for opencoded min(), max() implementations. +/// Generated patches sometimes require adding a cast to fix compile warning. +/// Warnings/patches scope intentionally limited to a function body. +/// +// Confidence: Medium +// Copyright: (C) 2021 Denis Efremov ISPRAS +// Options: --no-includes --include-headers +// +// Keywords: min, max +// + + +virtual report +virtual org +virtual context +virtual patch + +@rmax depends on !patch@ +identifier func; +expression x, y; +binary operator cmp = {>, >=}; +position p; +@@ + +func(...) +{ + <... +* ((x) cmp@p (y) ? (x) : (y)) + ...> +} + +@rmaxif depends on !patch@ +identifier func; +expression x, y; +expression max_val; +binary operator cmp = {>, >=}; +position p; +@@ + +func(...) +{ + <... +* if ((x) cmp@p (y)) { +* max_val = (x); +* } else { +* max_val = (y); +* } + ...> +} + +@rmin depends on !patch@ +identifier func; +expression x, y; +binary operator cmp = {<, <=}; +position p; +@@ + +func(...) +{ + <... +* ((x) cmp@p (y) ? (x) : (y)) + ...> +} + +@rminif depends on !patch@ +identifier func; +expression x, y; +expression min_val; +binary operator cmp = {<, <=}; +position p; +@@ + +func(...) +{ + <... +* if ((x) cmp@p (y)) { +* min_val = (x); +* } else { +* min_val = (y); +* } + ...> +} + +@pmax depends on patch@ +identifier func; +expression x, y; +binary operator cmp = {>=, >}; +@@ + +func(...) +{ + <... +- ((x) cmp (y) ? (x) : (y)) ++ max(x, y) + ...> +} + +@pmaxif depends on patch@ +identifier func; +expression x, y; +expression max_val; +binary operator cmp = {>=, >}; +@@ + +func(...) +{ + <... +- if ((x) cmp (y)) { +- max_val = x; +- } else { +- max_val = y; +- } ++ max_val = max(x, y); + ...> +} + +@pmin depends on patch@ +identifier func; +expression x, y; +binary operator cmp = {<=, <}; +@@ + +func(...) +{ + <... +- ((x) cmp (y) ? (x) : (y)) ++ min(x, y) + ...> +} + +@pminif depends on patch@ +identifier func; +expression x, y; +expression min_val; +binary operator cmp = {<=, <}; +@@ + +func(...) +{ + <... +- if ((x) cmp (y)) { +- min_val = x; +- } else { +- min_val = y; +- } ++ min_val = min(x, y); + ...> +} + +@script:python depends on report@ +p << rmax.p; +@@ + +for p0 in p: + coccilib.report.print_report(p0, "WARNING opportunity for max()") + +@script:python depends on org@ +p << rmax.p; +@@ + +for p0 in p: + coccilib.org.print_todo(p0, "WARNING opportunity for max()") + +@script:python depends on report@ +p << rmaxif.p; +@@ + +for p0 in p: + coccilib.report.print_report(p0, "WARNING opportunity for max()") + +@script:python depends on org@ +p << rmaxif.p; +@@ + +for p0 in p: + coccilib.org.print_todo(p0, "WARNING opportunity for max()") + +@script:python depends on report@ +p << rmin.p; +@@ + +for p0 in p: + coccilib.report.print_report(p0, "WARNING opportunity for min()") + +@script:python depends on org@ +p << rmin.p; +@@ + +for p0 in p: + coccilib.org.print_todo(p0, "WARNING opportunity for min()") + +@script:python depends on report@ +p << rminif.p; +@@ + +for p0 in p: + coccilib.report.print_report(p0, "WARNING opportunity for min()") + +@script:python depends on org@ +p << rminif.p; +@@ + +for p0 in p: + coccilib.org.print_todo(p0, "WARNING opportunity for min()") -- cgit v1.2.3 From 3afb532b19df3238dede98b184bc8852517f206a Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 8 Mar 2021 22:12:15 +0300 Subject: coccinelle: misc: restrict patch mode in flexible_array.cocci Skip patches generation for structs with a single field. Changing a zero-length array to a flexible array member in a struct with no named members breaks the compilation. However, reporting such cases is still valuable, e.g. commit 637464c59e0b ("ACPI: NFIT: Fix flexible_array.cocci warnings"). Signed-off-by: Denis Efremov Signed-off-by: Julia Lawall --- scripts/coccinelle/misc/flexible_array.cocci | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/coccinelle/misc/flexible_array.cocci b/scripts/coccinelle/misc/flexible_array.cocci index 947fbaff82a9..f427fd68ed2d 100644 --- a/scripts/coccinelle/misc/flexible_array.cocci +++ b/scripts/coccinelle/misc/flexible_array.cocci @@ -51,21 +51,40 @@ position p : script:python() { relevant(p) }; }; ) +@only_field depends on patch@ +identifier name, array; +type T; +position q; +@@ + +( + struct name {@q + T array[0]; + }; +| + struct {@q + T array[0]; + }; +) + @depends on patch@ identifier name, array; type T; position p : script:python() { relevant(p) }; +// position @q with rule "only_field" simplifies +// handling of bitfields, arrays, etc. +position q != only_field.q; @@ ( - struct name { + struct name {@q ... T array@p[ - 0 ]; }; | - struct { + struct {@q ... T array@p[ - 0 -- cgit v1.2.3 From cb62732d3bf0cd4c136d5927b003f002ff658e1c Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Mon, 8 Mar 2021 10:30:18 +0300 Subject: coccinelle: misc: update uninitialized_var.cocci documentation Remove the documentation link from the warning message because commit 3942ea7a10c9 ("deprecated.rst: Remove now removed uninitialized_var") removed the section from documentation. Update the rule documentation accordingly. Signed-off-by: Denis Efremov Signed-off-by: Julia Lawall --- scripts/coccinelle/misc/uninitialized_var.cocci | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/scripts/coccinelle/misc/uninitialized_var.cocci b/scripts/coccinelle/misc/uninitialized_var.cocci index 8fa845cefe11..69bbaae47e73 100644 --- a/scripts/coccinelle/misc/uninitialized_var.cocci +++ b/scripts/coccinelle/misc/uninitialized_var.cocci @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /// /// Please, don't reintroduce uninitialized_var(). -/// From Documentation/process/deprecated.rst: +/// +/// From Documentation/process/deprecated.rst, +/// commit 4b19bec97c88 ("docs: deprecated.rst: Add uninitialized_var()"): /// For any compiler warnings about uninitialized variables, just add /// an initializer. Using warning-silencing tricks is dangerous as it /// papers over real bugs (or can in the future), and suppresses unrelated @@ -11,6 +13,11 @@ /// obviously redundant, the compiler's dead-store elimination pass will make /// sure there are no needless variable writes. /// +/// Later, commit 3942ea7a10c9 ("deprecated.rst: Remove now removed +/// uninitialized_var") removed this section because all initializations of +/// this kind were cleaned-up from the kernel. This cocci rule checks that +/// the macro is not explicitly or implicitly reintroduced. +/// // Confidence: High // Copyright: (C) 2020 Denis Efremov ISPRAS // Options: --no-includes --include-headers @@ -40,12 +47,10 @@ position p; p << r.p; @@ -coccilib.report.print_report(p[0], - "WARNING this kind of initialization is deprecated (https://www.kernel.org/doc/html/latest/process/deprecated.html#uninitialized-var)") +coccilib.report.print_report(p[0], "WARNING this kind of initialization is deprecated") @script:python depends on org@ p << r.p; @@ -coccilib.org.print_todo(p[0], - "WARNING this kind of initialization is deprecated (https://www.kernel.org/doc/html/latest/process/deprecated.html#uninitialized-var)") +coccilib.org.print_todo(p[0], "WARNING this kind of initialization is deprecated") -- cgit v1.2.3 From 7845daa8bd72efa8bbc1de122edfce6e058bbe41 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 5 Mar 2021 13:09:56 +0300 Subject: coccinelle: misc: add swap script Check for opencoded swap() implementation. Signed-off-by: Denis Efremov Signed-off-by: Julia Lawall --- scripts/coccinelle/misc/swap.cocci | 122 +++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 scripts/coccinelle/misc/swap.cocci (limited to 'scripts') diff --git a/scripts/coccinelle/misc/swap.cocci b/scripts/coccinelle/misc/swap.cocci new file mode 100644 index 000000000000..c5e71b7ef7f5 --- /dev/null +++ b/scripts/coccinelle/misc/swap.cocci @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/// +/// Check for opencoded swap() implementation. +/// +// Confidence: High +// Copyright: (C) 2021 Denis Efremov ISPRAS +// Options: --no-includes --include-headers +// +// Keywords: swap +// + +virtual patch +virtual org +virtual report +virtual context + +@rvar depends on !patch@ +identifier tmp; +expression a, b; +type T; +position p; +@@ + +( +* T tmp; +| +* T tmp = 0; +| +* T *tmp = NULL; +) +... when != tmp +* tmp = a; +* a = b;@p +* b = tmp; +... when != tmp + +@r depends on !patch@ +identifier tmp; +expression a, b; +position p != rvar.p; +@@ + +* tmp = a; +* a = b;@p +* b = tmp; + +@rpvar depends on patch@ +identifier tmp; +expression a, b; +type T; +@@ + +( +- T tmp; +| +- T tmp = 0; +| +- T *tmp = NULL; +) +... when != tmp +- tmp = a; +- a = b; +- b = tmp ++ swap(a, b) + ; +... when != tmp + +@rp depends on patch@ +identifier tmp; +expression a, b; +@@ + +- tmp = a; +- a = b; +- b = tmp ++ swap(a, b) + ; + +@depends on patch && (rpvar || rp)@ +@@ + +( + for (...;...;...) +- { + swap(...); +- } +| + while (...) +- { + swap(...); +- } +| + if (...) +- { + swap(...); +- } +) + + +@script:python depends on report@ +p << r.p; +@@ + +coccilib.report.print_report(p[0], "WARNING opportunity for swap()") + +@script:python depends on org@ +p << r.p; +@@ + +coccilib.org.print_todo(p[0], "WARNING opportunity for swap()") + +@script:python depends on report@ +p << rvar.p; +@@ + +coccilib.report.print_report(p[0], "WARNING opportunity for swap()") + +@script:python depends on org@ +p << rvar.p; +@@ + +coccilib.org.print_todo(p[0], "WARNING opportunity for swap()") -- cgit v1.2.3 From 5d2db9bb5f8a850d037983f0df72ad59cefa9e3d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 23 Apr 2021 12:00:33 +0200 Subject: coccinelle: irqf_oneshot: reduce the severity due to false positives The IRQF_ONESHOT should be present for threaded IRQ using default primary handler. However intetrupt of many child devices, e.g. children of MFD, is nested thus the IRQF_ONESHOT is not needed. The coccinelle message about error misleads submitters and reviewers about the severity of the issue, so make it a warning and mention possible false positive. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Julia Lawall --- scripts/coccinelle/misc/irqf_oneshot.cocci | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci index 7b48287b3dc1..9b6f404d07f2 100644 --- a/scripts/coccinelle/misc/irqf_oneshot.cocci +++ b/scripts/coccinelle/misc/irqf_oneshot.cocci @@ -103,11 +103,11 @@ devm_request_threaded_irq@p(dev, irq, NULL, ...) @script:python depends on org@ p << match.p; @@ -msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT" +msg = "WARNING: Threaded IRQ with no primary handler requested without IRQF_ONESHOT (unless it is nested IRQ)" coccilib.org.print_todo(p[0],msg) @script:python depends on report@ p << match.p; @@ -msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT" +msg = "WARNING: Threaded IRQ with no primary handler requested without IRQF_ONESHOT (unless it is nested IRQ)" coccilib.report.print_report(p[0],msg) -- cgit v1.2.3 From 32c465613959248a8db8a1458d65a266411ddccc Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 28 Apr 2021 09:21:26 +0200 Subject: drop unneeded *s Kfree.cocci only supports org and report mode, so the *s (used for context mode) are not useful. Signed-off-by: Julia Lawall --- scripts/coccinelle/free/kfree.cocci | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'scripts') diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci index 168568386034..9b6e2037c2a9 100644 --- a/scripts/coccinelle/free/kfree.cocci +++ b/scripts/coccinelle/free/kfree.cocci @@ -22,9 +22,9 @@ position p1; @@ ( -* kfree@p1(E) + kfree@p1(E) | -* kfree_sensitive@p1(E) + kfree_sensitive@p1(E) ) @print expression@ @@ -66,9 +66,9 @@ position ok; while (1) { ... ( -* kfree@ok(E) + kfree@ok(E) | -* kfree_sensitive@ok(E) + kfree_sensitive@ok(E) ) ... when != break; when != goto l; @@ -84,9 +84,9 @@ position free.p1!=loop.ok,p2!={print.p,sz.p}; @@ ( -* kfree@p1(E,...) + kfree@p1(E,...) | -* kfree_sensitive@p1(E,...) + kfree_sensitive@p1(E,...) ) ... ( -- cgit v1.2.3 From aeb300c1dbfc77b493728f608dd14d6814676546 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Wed, 28 Apr 2021 09:03:50 +0300 Subject: coccinelle: misc: minmax: suppress patch generation for err returns There is a standard idiom for "if 'ret' holds an error, return it": return ret < 0 ? ret : 0; Developers prefer to keep the things as they are because stylistic change to "return min(ret, 0);" breaks readability. Let's suppress automatic generation for this type of patches. Signed-off-by: Denis Efremov --- scripts/coccinelle/misc/minmax.cocci | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/coccinelle/misc/minmax.cocci b/scripts/coccinelle/misc/minmax.cocci index eccdd3eb3452..fcf908b34f27 100644 --- a/scripts/coccinelle/misc/minmax.cocci +++ b/scripts/coccinelle/misc/minmax.cocci @@ -116,16 +116,32 @@ func(...) ...> } +// Don't generate patches for errcode returns. +@errcode depends on patch@ +position p; +identifier func; +expression x; +binary operator cmp = {<, <=}; +@@ + +func(...) +{ + <... + return ((x) cmp@p 0 ? (x) : 0); + ...> +} + @pmin depends on patch@ identifier func; expression x, y; binary operator cmp = {<=, <}; +position p != errcode.p; @@ func(...) { <... -- ((x) cmp (y) ? (x) : (y)) +- ((x) cmp@p (y) ? (x) : (y)) + min(x, y) ...> } -- cgit v1.2.3 From e86bdb24375a810ea7993d64ed406a803db71225 Mon Sep 17 00:00:00 2001 From: Aditya Srivastava Date: Fri, 14 May 2021 20:12:44 +0530 Subject: scripts: kernel-doc: reduce repeated regex expressions into variables There are some regex expressions in the kernel-doc script, which are used repeatedly in the script. Reduce such expressions into variables, which can be used everywhere. A quick manual check found that no errors and warnings were added/removed in this process. Suggested-by: Jonathan Corbet Signed-off-by: Aditya Srivastava Link: https://lore.kernel.org/r/20210514144244.25341-1-yashsri421@gmail.com Signed-off-by: Jonathan Corbet --- scripts/kernel-doc | 71 ++++++++++++++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 37 deletions(-) (limited to 'scripts') diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 4840e748fca8..7c4a6a507ac4 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -406,6 +406,8 @@ my $doc_inline_sect = '\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)'; my $doc_inline_end = '^\s*\*/\s*$'; my $doc_inline_oneline = '^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$'; my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;'; +my $function_pointer = qr{([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)}; +my $attribute = qr{__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)}i; my %parameterdescs; my %parameterdesc_start_lines; @@ -694,7 +696,7 @@ sub output_function_man(%) { $post = ");"; } $type = $args{'parametertypes'}{$parameter}; - if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { + if ($type =~ m/$function_pointer/) { # pointer-to-function print ".BI \"" . $parenth . $1 . "\" " . " \") (" . $2 . ")" . $post . "\"\n"; } else { @@ -974,7 +976,7 @@ sub output_function_rst(%) { $count++; $type = $args{'parametertypes'}{$parameter}; - if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { + if ($type =~ m/$function_pointer/) { # pointer-to-function print $1 . $parameter . ") (" . $2 . ")"; } else { @@ -1211,7 +1213,9 @@ sub dump_struct($$) { my $members; my $type = qr{struct|union}; # For capturing struct/union definition body, i.e. "{members*}qualifiers*" - my $definition_body = qr{\{(.*)\}(?:\s*(?:__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*}; + my $qualifiers = qr{$attribute|__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned}; + my $definition_body = qr{\{(.*)\}\s*$qualifiers*}; + my $struct_members = qr{($type)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;}; if ($x =~ /($type)\s+(\w+)\s*$definition_body/) { $decl_type = $1; @@ -1235,27 +1239,27 @@ sub dump_struct($$) { # strip comments: $members =~ s/\/\*.*?\*\///gos; # strip attributes - $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi; + $members =~ s/\s*$attribute/ /gi; $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos; $members =~ s/\s*__packed\s*/ /gos; $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; $members =~ s/\s*____cacheline_aligned/ /gos; + my $args = qr{([^,)]+)}; # replace DECLARE_BITMAP $members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos; - $members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos; + $members =~ s/DECLARE_BITMAP\s*\($args,\s*$args\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos; # replace DECLARE_HASHTABLE - $members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos; + $members =~ s/DECLARE_HASHTABLE\s*\($args,\s*$args\)/unsigned long $1\[1 << (($2) - 1)\]/gos; # replace DECLARE_KFIFO - $members =~ s/DECLARE_KFIFO\s*\(([^,)]+),\s*([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos; + $members =~ s/DECLARE_KFIFO\s*\($args,\s*$args,\s*$args\)/$2 \*$1/gos; # replace DECLARE_KFIFO_PTR - $members =~ s/DECLARE_KFIFO_PTR\s*\(([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos; - + $members =~ s/DECLARE_KFIFO_PTR\s*\($args,\s*$args\)/$2 \*$1/gos; my $declaration = $members; # Split nested struct/union elements as newer ones - while ($members =~ m/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/) { + while ($members =~ m/$struct_members/) { my $newmember; my $maintype = $1; my $ids = $4; @@ -1315,7 +1319,7 @@ sub dump_struct($$) { } } } - $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/$newmember/; + $members =~ s/$struct_members/$newmember/; } # Ignore other nested elements, like enums @@ -1555,8 +1559,9 @@ sub create_parameterlist($$$$) { my $param; # temporarily replace commas inside function pointer definition - while ($args =~ /(\([^\),]+),/) { - $args =~ s/(\([^\),]+),/$1#/g; + my $arg_expr = qr{\([^\),]+}; + while ($args =~ /$arg_expr,/) { + $args =~ s/($arg_expr),/$1#/g; } foreach my $arg (split($splitter, $args)) { @@ -1707,7 +1712,7 @@ sub check_sections($$$$$) { foreach $px (0 .. $#prms) { $prm_clean = $prms[$px]; $prm_clean =~ s/\[.*\]//; - $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; + $prm_clean =~ s/$attribute//i; # ignore array size in a parameter string; # however, the original param string may contain # spaces, e.g.: addr[6 + 2] @@ -1809,8 +1814,14 @@ sub dump_function($$) { # - parport_register_device (function pointer parameters) # - atomic_set (macro) # - pci_match_device, __copy_to_user (long return type) - - if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) { + my $name = qr{[a-zA-Z0-9_~:]+}; + my $prototype_end1 = qr{[^\(]*}; + my $prototype_end2 = qr{[^\{]*}; + my $prototype_end = qr{\(($prototype_end1|$prototype_end2)\)}; + my $type1 = qr{[\w\s]+}; + my $type2 = qr{$type1\*+}; + + if ($define && $prototype =~ m/^()($name)\s+/) { # This is an object-like macro, it has no return type and no parameter # list. # Function-like macros are not allowed to have spaces between @@ -1818,23 +1829,9 @@ sub dump_function($$) { $return_type = $1; $declaration_name = $2; $noret = 1; - } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+\s*\w+\s*\*+\s*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/) { + } elsif ($prototype =~ m/^()($name)\s*$prototype_end/ || + $prototype =~ m/^($type1)\s+($name)\s*$prototype_end/ || + $prototype =~ m/^($type2+)\s*($name)\s*$prototype_end/) { $return_type = $1; $declaration_name = $2; my $args = $3; @@ -2111,12 +2108,12 @@ sub process_name($$) { } elsif (/$doc_decl/o) { $identifier = $1; my $is_kernel_comment = 0; - my $decl_start = qr{\s*\*}; + my $decl_start = qr{$doc_com}; # test for pointer declaration type, foo * bar() - desc my $fn_type = qr{\w+\s*\*\s*}; my $parenthesis = qr{\(\w*\)}; my $decl_end = qr{[-:].*}; - if (/^$decl_start\s*([\w\s]+?)$parenthesis?\s*$decl_end?$/) { + if (/^$decl_start([\w\s]+?)$parenthesis?\s*$decl_end?$/) { $identifier = $1; } if ($identifier =~ m/^(struct|union|enum|typedef)\b\s*(\S*)/) { @@ -2126,8 +2123,8 @@ sub process_name($$) { } # Look for foo() or static void foo() - description; or misspelt # identifier - elsif (/^$decl_start\s*$fn_type?(\w+)\s*$parenthesis?\s*$decl_end?$/ || - /^$decl_start\s*$fn_type?(\w+.*)$parenthesis?\s*$decl_end$/) { + elsif (/^$decl_start$fn_type?(\w+)\s*$parenthesis?\s*$decl_end?$/ || + /^$decl_start$fn_type?(\w+.*)$parenthesis?\s*$decl_end$/) { $identifier = $1; $decl_type = 'function'; $identifier =~ s/^define\s+//; -- cgit v1.2.3 From f5b3553b5019f22ac668651ea9cddb9fa675ac41 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 18 May 2021 11:05:26 +0200 Subject: scripts: coccicheck: fix troubles on non-English builds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When LANG is not set to English, the logic which checks the number of CPUs fail, as the messages can be localized, and the logic at: THREADS_PER_CORE=$(lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]") will not get the number of threads per core. This causes the script to not run properly, as it will produce a warning: $ make coccicheck COCCI=$PWD/scripts/coccinelle/misc/add_namespace.cocci MODE=report drivers/media/ ./scripts/coccicheck: linha 93: [: nĂºmero excessivo de argumentos Fix it by forcing LANG=C when calling lscpu. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Julia Lawall --- scripts/coccicheck | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/coccicheck b/scripts/coccicheck index 65fee63aeadb..caba0bff6da7 100755 --- a/scripts/coccicheck +++ b/scripts/coccicheck @@ -87,7 +87,7 @@ else fi # Use only one thread per core by default if hyperthreading is enabled - THREADS_PER_CORE=$(lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]") + THREADS_PER_CORE=$(LANG=C lscpu | grep "Thread(s) per core: " | tr -cd "[:digit:]") if [ -z "$J" ]; then NPROC=$(getconf _NPROCESSORS_ONLN) if [ $THREADS_PER_CORE -gt 1 -a $NPROC -gt 4 ] ; then -- cgit v1.2.3 From 8852c552402979508fdc395ae07aa8761aa46045 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Tue, 18 May 2021 18:59:15 -0500 Subject: kbuild: Fix objtool dependency for 'OBJECT_FILES_NON_STANDARD_ := n' "OBJECT_FILES_NON_STANDARD_vma.o := n" has a dependency bug. When objtool source is updated, the affected object doesn't get re-analyzed by objtool. Peter's new variable-sized jump label feature relies on objtool rewriting the object file. Otherwise the system can fail to boot. That effectively upgrades this minor dependency issue to a major bug. The problem is that variables in prerequisites are expanded early, during the read-in phase. The '$(objtool_dep)' variable indirectly uses '$@', which isn't yet available when the target prerequisites are evaluated. Use '.SECONDEXPANSION:' which causes '$(objtool_dep)' to be expanded in a later phase, after the target-specific '$@' variable has been defined. Fixes: b9ab5ebb14ec ("objtool: Add CONFIG_STACK_VALIDATION option") Fixes: ab3257042c26 ("jump_label, x86: Allow short NOPs") Reported-by: Matthew Wilcox Signed-off-by: Josh Poimboeuf --- scripts/Makefile.build | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 949f723efe53..34d257653fb4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -268,7 +268,8 @@ define rule_as_o_S endef # Built-in and composite module parts -$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE +.SECONDEXPANSION: +$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE $(call if_changed_rule,cc_o_c) $(call cmd,force_checksrc) @@ -349,7 +350,7 @@ cmd_modversions_S = \ fi endif -$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE +$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE $(call if_changed_rule,as_o_S) targets += $(filter-out $(subdir-builtin), $(real-obj-y)) -- cgit v1.2.3 From 0d989ac2c90b5f51fe12102d3cddf54b959f2014 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 12 May 2021 15:52:01 +0900 Subject: kbuild: remove libelf checks from top Makefile I do not see a good reason why only the libelf development package must be so carefully checked. Kbuild generally does not check host tools or libraries. For example, x86_64 defconfig fails to build with no libssl development package installed. scripts/extract-cert.c:21:10: fatal error: openssl/bio.h: No such file or directory 21 | #include | ^~~~~~~~~~~~~~~ To solve the build error, you need to install libssl-dev or openssl-devel package, depending on your distribution. 'apt-file search', 'dnf provides', etc. is your frined to find a proper package to install. This commit removes all the libelf checks from the top Makefile. If libelf is missing, objtool will fail to build in a similar pattern: .../linux/tools/objtool/include/objtool/elf.h:10:10: fatal error: gelf.h: No such file or directory 10 | #include You need to install libelf-dev, libelf-devel, or elfutils-libelf-devel to proceed. Another remarkable change is, CONFIG_STACK_VALIDATION (without CONFIG_UNWINDER_ORC) previously continued to build with a warning, but now it will treat missing libelf as an error. This is just a one-time installation, so it should not hurt to break a build and make a user install the package. BTW, the traditional way to handle such checks is autotool, but according to [1], I do not expect the kernel build would have similar scripting like './configure' does. [1]: https://lore.kernel.org/lkml/CA+55aFzr2HTZVOuzpHYDwmtRJLsVzE-yqg2DHpHi_9ePsYp5ug@mail.gmail.com/ Signed-off-by: Masahiro Yamada Acked-by: Andrii Nakryiko --- Makefile | 78 +++++++++++++---------------------------------- scripts/Makefile.build | 2 -- scripts/Makefile.modfinal | 2 -- 3 files changed, 22 insertions(+), 60 deletions(-) (limited to 'scripts') diff --git a/Makefile b/Makefile index 034a36ece49d..b0bd0ce0aab1 100644 --- a/Makefile +++ b/Makefile @@ -1081,41 +1081,6 @@ export INSTALL_DTBS_PATH ?= $(INSTALL_PATH)/dtbs/$(KERNELRELEASE) MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) export MODLIB -HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) - -has_libelf = $(call try-run,\ - echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) - -ifdef CONFIG_STACK_VALIDATION - ifeq ($(has_libelf),1) - objtool_target := tools/objtool FORCE - else - SKIP_STACK_VALIDATION := 1 - export SKIP_STACK_VALIDATION - endif -endif - -PHONY += resolve_btfids_clean - -resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids - -# tools/bpf/resolve_btfids directory might not exist -# in output directory, skip its clean in that case -resolve_btfids_clean: -ifneq ($(wildcard $(resolve_btfids_O)),) - $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean -endif - -ifdef CONFIG_BPF -ifdef CONFIG_DEBUG_INFO_BTF - ifeq ($(has_libelf),1) - resolve_btfids_target := tools/bpf/resolve_btfids FORCE - else - ERROR_RESOLVE_BTFIDS := 1 - endif -endif # CONFIG_DEBUG_INFO_BTF -endif # CONFIG_BPF - PHONY += prepare0 export extmod_prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/) @@ -1227,7 +1192,7 @@ prepare0: archprepare $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 prepare-objtool prepare-resolve_btfids +prepare: prepare0 PHONY += remove-stale-files remove-stale-files: @@ -1244,26 +1209,6 @@ uapi-asm-generic: $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ generic=include/uapi/asm-generic -PHONY += prepare-objtool prepare-resolve_btfids -prepare-objtool: $(objtool_target) -ifeq ($(SKIP_STACK_VALIDATION),1) -ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL - @echo "error: Cannot generate __mcount_loc for CONFIG_DYNAMIC_FTRACE=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -endif -ifdef CONFIG_UNWINDER_ORC - @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -else - @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 -endif -endif - -prepare-resolve_btfids: $(resolve_btfids_target) -ifeq ($(ERROR_RESOLVE_BTFIDS),1) - @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 - @false -endif # Generate some files # --------------------------------------------------------------------------- @@ -1354,6 +1299,27 @@ scripts_unifdef: scripts_basic # --------------------------------------------------------------------------- # Tools +ifdef CONFIG_STACK_VALIDATION +prepare: tools/objtool +endif + +ifdef CONFIG_BPF +ifdef CONFIG_DEBUG_INFO_BTF +prepare: tools/bpf/resolve_btfids +endif +endif + +PHONY += resolve_btfids_clean + +resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids + +# tools/bpf/resolve_btfids directory might not exist +# in output directory, skip its clean in that case +resolve_btfids_clean: +ifneq ($(wildcard $(resolve_btfids_O)),) + $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean +endif + # Clear a bunch of variables before executing the submake ifeq ($(quiet),silent_) tools_silent=s diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 949f723efe53..7adc3a2c3c31 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -219,7 +219,6 @@ endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT ifdef CONFIG_STACK_VALIDATION ifndef CONFIG_LTO_CLANG -ifneq ($(SKIP_STACK_VALIDATION),1) __objtool_obj := $(objtree)/tools/objtool/objtool @@ -233,7 +232,6 @@ objtool_obj = $(if $(patsubst y%,, \ $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ $(__objtool_obj)) -endif # SKIP_STACK_VALIDATION endif # CONFIG_LTO_CLANG endif # CONFIG_STACK_VALIDATION diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index dd87cea9fba7..bdee3babc5cf 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -39,12 +39,10 @@ prelink-ext := .lto # so let's now process the prelinked binary before we link the module. ifdef CONFIG_STACK_VALIDATION -ifneq ($(SKIP_STACK_VALIDATION),1) cmd_ld_ko_o += \ $(objtree)/tools/objtool/objtool $(objtool_args) \ $(@:.ko=$(prelink-ext).o); -endif # SKIP_STACK_VALIDATION endif # CONFIG_STACK_VALIDATION endif # CONFIG_LTO_CLANG -- cgit v1.2.3 From 3c1885187bc1faa0a1c52f7bd34550740a208169 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 25 May 2021 15:02:31 +0100 Subject: locking/atomic: delete !ARCH_ATOMIC remnants Now that all architectures implement ARCH_ATOMIC, we can make it mandatory, removing the Kconfig symbol and logic for !ARCH_ATOMIC. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Geert Uytterhoeven Cc: Boqun Feng Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210525140232.53872-33-mark.rutland@arm.com --- arch/Kconfig | 3 - arch/alpha/Kconfig | 1 - arch/arc/Kconfig | 1 - arch/arm/Kconfig | 1 - arch/arm64/Kconfig | 1 - arch/csky/Kconfig | 1 - arch/h8300/Kconfig | 1 - arch/hexagon/Kconfig | 1 - arch/ia64/Kconfig | 1 - arch/m68k/Kconfig | 1 - arch/microblaze/Kconfig | 1 - arch/mips/Kconfig | 1 - arch/nds32/Kconfig | 1 - arch/nios2/Kconfig | 1 - arch/openrisc/Kconfig | 1 - arch/parisc/Kconfig | 1 - arch/powerpc/Kconfig | 1 - arch/riscv/Kconfig | 1 - arch/s390/Kconfig | 1 - arch/sh/Kconfig | 1 - arch/sparc/Kconfig | 1 - arch/um/Kconfig | 1 - arch/x86/Kconfig | 1 - arch/xtensa/Kconfig | 1 - include/asm-generic/atomic.h | 44 +- include/asm-generic/atomic64.h | 29 - include/asm-generic/cmpxchg.h | 21 - include/linux/atomic-fallback.h | 2595 --------------------------------------- include/linux/atomic.h | 4 - scripts/atomic/check-atomics.sh | 1 - scripts/atomic/gen-atomics.sh | 1 - 31 files changed, 3 insertions(+), 2718 deletions(-) delete mode 100644 include/linux/atomic-fallback.h (limited to 'scripts') diff --git a/arch/Kconfig b/arch/Kconfig index 3fb3b12d4a95..c45b770d3579 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -11,9 +11,6 @@ source "arch/$(SRCARCH)/Kconfig" menu "General architecture-dependent options" -config ARCH_ATOMIC - bool - config CRASH_CORE bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 7920fc2e2a2a..5998106faa60 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,7 +2,6 @@ config ALPHA bool default y - select ARCH_ATOMIC select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 098ecc72d048..2d98501c0897 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -6,7 +6,6 @@ config ARC def_bool y select ARC_TIMERS - select ARCH_ATOMIC select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b7334a6643b9..24804f11302d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -3,7 +3,6 @@ config ARM bool default y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 62ab429d1f42..9f1d8566bbf9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -9,7 +9,6 @@ config ARM64 select ACPI_MCFG if (ACPI && PCI) select ACPI_SPCR_TABLE if ACPI select ACPI_PPTT if ACPI - select ARCH_ATOMIC select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 3521f14bcd96..8de5b987edb9 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -2,7 +2,6 @@ config CSKY def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index bdf05ad3206a..3e3e0f16f7e0 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -2,7 +2,6 @@ config H8300 def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_BINFMT_FLAT select BINFMT_FLAT_ARGVP_ENVP_ON_STACK select BINFMT_FLAT_OLD_ALWAYS_RAM diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 1368954ef679..44a409967af1 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -5,7 +5,6 @@ comment "Linux Kernel Configuration for Hexagon" config HEXAGON def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_PREEMPT # Other pending projects/to-do items. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index c5414dcd5d0d..279252e3e0f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -8,7 +8,6 @@ menu "Processor type and features" config IA64 bool - select ARCH_ATOMIC select ARCH_HAS_DMA_MARK_CLEAN select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index d1d91ac47f51..372e4e69c43a 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -3,7 +3,6 @@ config M68K bool default y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 5a52922dc225..0660f47012bc 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -2,7 +2,6 @@ config MICROBLAZE def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_NO_SWAP select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 55b4da96872f..ed51970c08e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3,7 +3,6 @@ config MIPS bool default y select ARCH_32BIT_OFF_T if !64BIT - select ARCH_ATOMIC select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_FORTIFY_SOURCE diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 352913573aee..62313902d75d 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -7,7 +7,6 @@ config NDS32 def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 67dae88c5b53..c24955c81c92 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -2,7 +2,6 @@ config NIOS2 def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 8c50bc9674f5..591acc5990dc 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,7 +7,6 @@ config OPENRISC def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bfa120a4add1..bde9907bc5b2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -2,7 +2,6 @@ config PARISC def_bool y select ARCH_32BIT_OFF_T if !64BIT - select ARCH_ATOMIC select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_IDE select HAVE_FUNCTION_TRACER diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d143c2b616f0..088dd2afcfe4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -118,7 +118,6 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 - select ARCH_ATOMIC select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_COPY_MC if PPC64 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c59b9f4a9d62..a8ad8eb76120 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -12,7 +12,6 @@ config 32BIT config RISCV def_bool y - select ARCH_ATOMIC select ARCH_CLOCKSOURCE_INIT select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 85374a36c69e..b4c7c34069f8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -58,7 +58,6 @@ config S390 # Note: keep this list sorted alphabetically # imply IMA_SECURE_AND_OR_TRUSTED_BOOT - select ARCH_ATOMIC select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index d2925cbb6fa4..68129537e350 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,7 +2,6 @@ config SUPERH def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 46790083e918..164a5254c91c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -13,7 +13,6 @@ config 64BIT config SPARC bool default y - select ARCH_ATOMIC select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO select DMA_OPS diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 4370a9521ea4..57cfd9a1c082 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -5,7 +5,6 @@ menu "UML-specific options" config UML bool default y - select ARCH_ATOMIC select ARCH_EPHEMERAL_INODES select ARCH_HAS_KCOV select ARCH_NO_PREEMPT diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 11a27563033d..0045e1b44190 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -58,7 +58,6 @@ config X86 # select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI - select ARCH_ATOMIC select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 39bb9bdae6b1..2332b2156993 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -2,7 +2,6 @@ config XTENSA def_bool y select ARCH_32BIT_OFF_T - select ARCH_ATOMIC select ARCH_HAS_BINFMT_FLAT if !MMU select ARCH_HAS_DMA_PREP_COHERENT if MMU select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 649060fa0fe8..04b8be9f1a77 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -12,14 +12,6 @@ #include #include -#ifdef CONFIG_ARCH_ATOMIC -#define __ga_cmpxchg arch_cmpxchg -#define __ga_xchg arch_xchg -#else -#define __ga_cmpxchg cmpxchg -#define __ga_xchg xchg -#endif - #ifdef CONFIG_SMP /* we can build all atomic primitives from cmpxchg */ @@ -30,7 +22,7 @@ static inline void generic_atomic_##op(int i, atomic_t *v) \ int c, old; \ \ c = v->counter; \ - while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ } @@ -40,7 +32,7 @@ static inline int generic_atomic_##op##_return(int i, atomic_t *v) \ int c, old; \ \ c = v->counter; \ - while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c c_op i; \ @@ -52,7 +44,7 @@ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \ int c, old; \ \ c = v->counter; \ - while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c) \ + while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \ c = old; \ \ return c; \ @@ -120,11 +112,6 @@ ATOMIC_OP(xor, ^) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#undef __ga_cmpxchg -#undef __ga_xchg - -#ifdef CONFIG_ARCH_ATOMIC - #define arch_atomic_add_return generic_atomic_add_return #define arch_atomic_sub_return generic_atomic_sub_return @@ -146,29 +133,4 @@ ATOMIC_OP(xor, ^) #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) -#else /* CONFIG_ARCH_ATOMIC */ - -#define atomic_add_return generic_atomic_add_return -#define atomic_sub_return generic_atomic_sub_return - -#define atomic_fetch_add generic_atomic_fetch_add -#define atomic_fetch_sub generic_atomic_fetch_sub -#define atomic_fetch_and generic_atomic_fetch_and -#define atomic_fetch_or generic_atomic_fetch_or -#define atomic_fetch_xor generic_atomic_fetch_xor - -#define atomic_add generic_atomic_add -#define atomic_sub generic_atomic_sub -#define atomic_and generic_atomic_and -#define atomic_or generic_atomic_or -#define atomic_xor generic_atomic_xor - -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) - -#endif /* CONFIG_ARCH_ATOMIC */ - #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index c8c7d9fae820..100d24b02e52 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -49,8 +49,6 @@ extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new); extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); -#ifdef CONFIG_ARCH_ATOMIC - #define arch_atomic64_read generic_atomic64_read #define arch_atomic64_set generic_atomic64_set #define arch_atomic64_set_release generic_atomic64_set @@ -74,31 +72,4 @@ extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); #define arch_atomic64_xchg generic_atomic64_xchg #define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless -#else /* CONFIG_ARCH_ATOMIC */ - -#define atomic64_read generic_atomic64_read -#define atomic64_set generic_atomic64_set -#define atomic64_set_release generic_atomic64_set - -#define atomic64_add generic_atomic64_add -#define atomic64_add_return generic_atomic64_add_return -#define atomic64_fetch_add generic_atomic64_fetch_add -#define atomic64_sub generic_atomic64_sub -#define atomic64_sub_return generic_atomic64_sub_return -#define atomic64_fetch_sub generic_atomic64_fetch_sub - -#define atomic64_and generic_atomic64_and -#define atomic64_fetch_and generic_atomic64_fetch_and -#define atomic64_or generic_atomic64_or -#define atomic64_fetch_or generic_atomic64_fetch_or -#define atomic64_xor generic_atomic64_xor -#define atomic64_fetch_xor generic_atomic64_fetch_xor - -#define atomic64_dec_if_positive generic_atomic64_dec_if_positive -#define atomic64_cmpxchg generic_atomic64_cmpxchg -#define atomic64_xchg generic_atomic64_xchg -#define atomic64_fetch_add_unless generic_atomic64_fetch_add_unless - -#endif /* CONFIG_ARCH_ATOMIC */ - #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 98c931199089..dca4419922a9 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -97,8 +97,6 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) __generic_cmpxchg64_local((ptr), (o), (n)) -#ifdef CONFIG_ARCH_ATOMIC - #ifndef arch_xchg #define arch_xchg generic_xchg #endif @@ -114,23 +112,4 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #define arch_cmpxchg arch_cmpxchg_local #define arch_cmpxchg64 arch_cmpxchg64_local -#else /* CONFIG_ARCH_ATOMIC */ - -#ifndef xchg -#define xchg generic_xchg -#endif - -#ifndef cmpxchg_local -#define cmpxchg_local generic_cmpxchg_local -#endif - -#ifndef cmpxchg64_local -#define cmpxchg64_local generic_cmpxchg64_local -#endif - -#define cmpxchg cmpxchg_local -#define cmpxchg64 cmpxchg64_local - -#endif /* CONFIG_ARCH_ATOMIC */ - #endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h deleted file mode 100644 index 2a3f55d98be9..000000000000 --- a/include/linux/atomic-fallback.h +++ /dev/null @@ -1,2595 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-fallback.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _LINUX_ATOMIC_FALLBACK_H -#define _LINUX_ATOMIC_FALLBACK_H - -#include - -#ifndef xchg_relaxed -#define xchg_acquire xchg -#define xchg_release xchg -#define xchg_relaxed xchg -#else /* xchg_relaxed */ - -#ifndef xchg_acquire -#define xchg_acquire(...) \ - __atomic_op_acquire(xchg, __VA_ARGS__) -#endif - -#ifndef xchg_release -#define xchg_release(...) \ - __atomic_op_release(xchg, __VA_ARGS__) -#endif - -#ifndef xchg -#define xchg(...) \ - __atomic_op_fence(xchg, __VA_ARGS__) -#endif - -#endif /* xchg_relaxed */ - -#ifndef cmpxchg_relaxed -#define cmpxchg_acquire cmpxchg -#define cmpxchg_release cmpxchg -#define cmpxchg_relaxed cmpxchg -#else /* cmpxchg_relaxed */ - -#ifndef cmpxchg_acquire -#define cmpxchg_acquire(...) \ - __atomic_op_acquire(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg_release -#define cmpxchg_release(...) \ - __atomic_op_release(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg -#define cmpxchg(...) \ - __atomic_op_fence(cmpxchg, __VA_ARGS__) -#endif - -#endif /* cmpxchg_relaxed */ - -#ifndef cmpxchg64_relaxed -#define cmpxchg64_acquire cmpxchg64 -#define cmpxchg64_release cmpxchg64 -#define cmpxchg64_relaxed cmpxchg64 -#else /* cmpxchg64_relaxed */ - -#ifndef cmpxchg64_acquire -#define cmpxchg64_acquire(...) \ - __atomic_op_acquire(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64_release -#define cmpxchg64_release(...) \ - __atomic_op_release(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64 -#define cmpxchg64(...) \ - __atomic_op_fence(cmpxchg64, __VA_ARGS__) -#endif - -#endif /* cmpxchg64_relaxed */ - -#ifndef try_cmpxchg_relaxed -#ifdef try_cmpxchg -#define try_cmpxchg_acquire try_cmpxchg -#define try_cmpxchg_release try_cmpxchg -#define try_cmpxchg_relaxed try_cmpxchg -#endif /* try_cmpxchg */ - -#ifndef try_cmpxchg -#define try_cmpxchg(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg */ - -#ifndef try_cmpxchg_acquire -#define try_cmpxchg_acquire(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_acquire */ - -#ifndef try_cmpxchg_release -#define try_cmpxchg_release(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_release((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_release */ - -#ifndef try_cmpxchg_relaxed -#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* try_cmpxchg_relaxed */ - -#else /* try_cmpxchg_relaxed */ - -#ifndef try_cmpxchg_acquire -#define try_cmpxchg_acquire(...) \ - __atomic_op_acquire(try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef try_cmpxchg_release -#define try_cmpxchg_release(...) \ - __atomic_op_release(try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef try_cmpxchg -#define try_cmpxchg(...) \ - __atomic_op_fence(try_cmpxchg, __VA_ARGS__) -#endif - -#endif /* try_cmpxchg_relaxed */ - -#define arch_atomic_read atomic_read -#define arch_atomic_read_acquire atomic_read_acquire - -#ifndef atomic_read_acquire -static __always_inline int -atomic_read_acquire(const atomic_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define atomic_read_acquire atomic_read_acquire -#endif - -#define arch_atomic_set atomic_set -#define arch_atomic_set_release atomic_set_release - -#ifndef atomic_set_release -static __always_inline void -atomic_set_release(atomic_t *v, int i) -{ - smp_store_release(&(v)->counter, i); -} -#define atomic_set_release atomic_set_release -#endif - -#define arch_atomic_add atomic_add - -#define arch_atomic_add_return atomic_add_return -#define arch_atomic_add_return_acquire atomic_add_return_acquire -#define arch_atomic_add_return_release atomic_add_return_release -#define arch_atomic_add_return_relaxed atomic_add_return_relaxed - -#ifndef atomic_add_return_relaxed -#define atomic_add_return_acquire atomic_add_return -#define atomic_add_return_release atomic_add_return -#define atomic_add_return_relaxed atomic_add_return -#else /* atomic_add_return_relaxed */ - -#ifndef atomic_add_return_acquire -static __always_inline int -atomic_add_return_acquire(int i, atomic_t *v) -{ - int ret = atomic_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_add_return_acquire atomic_add_return_acquire -#endif - -#ifndef atomic_add_return_release -static __always_inline int -atomic_add_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_add_return_relaxed(i, v); -} -#define atomic_add_return_release atomic_add_return_release -#endif - -#ifndef atomic_add_return -static __always_inline int -atomic_add_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_add_return atomic_add_return -#endif - -#endif /* atomic_add_return_relaxed */ - -#define arch_atomic_fetch_add atomic_fetch_add -#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire -#define arch_atomic_fetch_add_release atomic_fetch_add_release -#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed - -#ifndef atomic_fetch_add_relaxed -#define atomic_fetch_add_acquire atomic_fetch_add -#define atomic_fetch_add_release atomic_fetch_add -#define atomic_fetch_add_relaxed atomic_fetch_add -#else /* atomic_fetch_add_relaxed */ - -#ifndef atomic_fetch_add_acquire -static __always_inline int -atomic_fetch_add_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#endif - -#ifndef atomic_fetch_add_release -static __always_inline int -atomic_fetch_add_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_add_relaxed(i, v); -} -#define atomic_fetch_add_release atomic_fetch_add_release -#endif - -#ifndef atomic_fetch_add -static __always_inline int -atomic_fetch_add(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_add atomic_fetch_add -#endif - -#endif /* atomic_fetch_add_relaxed */ - -#define arch_atomic_sub atomic_sub - -#define arch_atomic_sub_return atomic_sub_return -#define arch_atomic_sub_return_acquire atomic_sub_return_acquire -#define arch_atomic_sub_return_release atomic_sub_return_release -#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed - -#ifndef atomic_sub_return_relaxed -#define atomic_sub_return_acquire atomic_sub_return -#define atomic_sub_return_release atomic_sub_return -#define atomic_sub_return_relaxed atomic_sub_return -#else /* atomic_sub_return_relaxed */ - -#ifndef atomic_sub_return_acquire -static __always_inline int -atomic_sub_return_acquire(int i, atomic_t *v) -{ - int ret = atomic_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_sub_return_acquire atomic_sub_return_acquire -#endif - -#ifndef atomic_sub_return_release -static __always_inline int -atomic_sub_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_sub_return_relaxed(i, v); -} -#define atomic_sub_return_release atomic_sub_return_release -#endif - -#ifndef atomic_sub_return -static __always_inline int -atomic_sub_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_sub_return atomic_sub_return -#endif - -#endif /* atomic_sub_return_relaxed */ - -#define arch_atomic_fetch_sub atomic_fetch_sub -#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#define arch_atomic_fetch_sub_release atomic_fetch_sub_release -#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed - -#ifndef atomic_fetch_sub_relaxed -#define atomic_fetch_sub_acquire atomic_fetch_sub -#define atomic_fetch_sub_release atomic_fetch_sub -#define atomic_fetch_sub_relaxed atomic_fetch_sub -#else /* atomic_fetch_sub_relaxed */ - -#ifndef atomic_fetch_sub_acquire -static __always_inline int -atomic_fetch_sub_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#endif - -#ifndef atomic_fetch_sub_release -static __always_inline int -atomic_fetch_sub_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_sub_relaxed(i, v); -} -#define atomic_fetch_sub_release atomic_fetch_sub_release -#endif - -#ifndef atomic_fetch_sub -static __always_inline int -atomic_fetch_sub(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_sub atomic_fetch_sub -#endif - -#endif /* atomic_fetch_sub_relaxed */ - -#define arch_atomic_inc atomic_inc - -#ifndef atomic_inc -static __always_inline void -atomic_inc(atomic_t *v) -{ - atomic_add(1, v); -} -#define atomic_inc atomic_inc -#endif - -#define arch_atomic_inc_return atomic_inc_return -#define arch_atomic_inc_return_acquire atomic_inc_return_acquire -#define arch_atomic_inc_return_release atomic_inc_return_release -#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed - -#ifndef atomic_inc_return_relaxed -#ifdef atomic_inc_return -#define atomic_inc_return_acquire atomic_inc_return -#define atomic_inc_return_release atomic_inc_return -#define atomic_inc_return_relaxed atomic_inc_return -#endif /* atomic_inc_return */ - -#ifndef atomic_inc_return -static __always_inline int -atomic_inc_return(atomic_t *v) -{ - return atomic_add_return(1, v); -} -#define atomic_inc_return atomic_inc_return -#endif - -#ifndef atomic_inc_return_acquire -static __always_inline int -atomic_inc_return_acquire(atomic_t *v) -{ - return atomic_add_return_acquire(1, v); -} -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif - -#ifndef atomic_inc_return_release -static __always_inline int -atomic_inc_return_release(atomic_t *v) -{ - return atomic_add_return_release(1, v); -} -#define atomic_inc_return_release atomic_inc_return_release -#endif - -#ifndef atomic_inc_return_relaxed -static __always_inline int -atomic_inc_return_relaxed(atomic_t *v) -{ - return atomic_add_return_relaxed(1, v); -} -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#endif - -#else /* atomic_inc_return_relaxed */ - -#ifndef atomic_inc_return_acquire -static __always_inline int -atomic_inc_return_acquire(atomic_t *v) -{ - int ret = atomic_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif - -#ifndef atomic_inc_return_release -static __always_inline int -atomic_inc_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_inc_return_relaxed(v); -} -#define atomic_inc_return_release atomic_inc_return_release -#endif - -#ifndef atomic_inc_return -static __always_inline int -atomic_inc_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_inc_return atomic_inc_return -#endif - -#endif /* atomic_inc_return_relaxed */ - -#define arch_atomic_fetch_inc atomic_fetch_inc -#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#define arch_atomic_fetch_inc_release atomic_fetch_inc_release -#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed - -#ifndef atomic_fetch_inc_relaxed -#ifdef atomic_fetch_inc -#define atomic_fetch_inc_acquire atomic_fetch_inc -#define atomic_fetch_inc_release atomic_fetch_inc -#define atomic_fetch_inc_relaxed atomic_fetch_inc -#endif /* atomic_fetch_inc */ - -#ifndef atomic_fetch_inc -static __always_inline int -atomic_fetch_inc(atomic_t *v) -{ - return atomic_fetch_add(1, v); -} -#define atomic_fetch_inc atomic_fetch_inc -#endif - -#ifndef atomic_fetch_inc_acquire -static __always_inline int -atomic_fetch_inc_acquire(atomic_t *v) -{ - return atomic_fetch_add_acquire(1, v); -} -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif - -#ifndef atomic_fetch_inc_release -static __always_inline int -atomic_fetch_inc_release(atomic_t *v) -{ - return atomic_fetch_add_release(1, v); -} -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif - -#ifndef atomic_fetch_inc_relaxed -static __always_inline int -atomic_fetch_inc_relaxed(atomic_t *v) -{ - return atomic_fetch_add_relaxed(1, v); -} -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#endif - -#else /* atomic_fetch_inc_relaxed */ - -#ifndef atomic_fetch_inc_acquire -static __always_inline int -atomic_fetch_inc_acquire(atomic_t *v) -{ - int ret = atomic_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif - -#ifndef atomic_fetch_inc_release -static __always_inline int -atomic_fetch_inc_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_inc_relaxed(v); -} -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif - -#ifndef atomic_fetch_inc -static __always_inline int -atomic_fetch_inc(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_inc atomic_fetch_inc -#endif - -#endif /* atomic_fetch_inc_relaxed */ - -#define arch_atomic_dec atomic_dec - -#ifndef atomic_dec -static __always_inline void -atomic_dec(atomic_t *v) -{ - atomic_sub(1, v); -} -#define atomic_dec atomic_dec -#endif - -#define arch_atomic_dec_return atomic_dec_return -#define arch_atomic_dec_return_acquire atomic_dec_return_acquire -#define arch_atomic_dec_return_release atomic_dec_return_release -#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed - -#ifndef atomic_dec_return_relaxed -#ifdef atomic_dec_return -#define atomic_dec_return_acquire atomic_dec_return -#define atomic_dec_return_release atomic_dec_return -#define atomic_dec_return_relaxed atomic_dec_return -#endif /* atomic_dec_return */ - -#ifndef atomic_dec_return -static __always_inline int -atomic_dec_return(atomic_t *v) -{ - return atomic_sub_return(1, v); -} -#define atomic_dec_return atomic_dec_return -#endif - -#ifndef atomic_dec_return_acquire -static __always_inline int -atomic_dec_return_acquire(atomic_t *v) -{ - return atomic_sub_return_acquire(1, v); -} -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif - -#ifndef atomic_dec_return_release -static __always_inline int -atomic_dec_return_release(atomic_t *v) -{ - return atomic_sub_return_release(1, v); -} -#define atomic_dec_return_release atomic_dec_return_release -#endif - -#ifndef atomic_dec_return_relaxed -static __always_inline int -atomic_dec_return_relaxed(atomic_t *v) -{ - return atomic_sub_return_relaxed(1, v); -} -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#endif - -#else /* atomic_dec_return_relaxed */ - -#ifndef atomic_dec_return_acquire -static __always_inline int -atomic_dec_return_acquire(atomic_t *v) -{ - int ret = atomic_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif - -#ifndef atomic_dec_return_release -static __always_inline int -atomic_dec_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_dec_return_relaxed(v); -} -#define atomic_dec_return_release atomic_dec_return_release -#endif - -#ifndef atomic_dec_return -static __always_inline int -atomic_dec_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_dec_return atomic_dec_return -#endif - -#endif /* atomic_dec_return_relaxed */ - -#define arch_atomic_fetch_dec atomic_fetch_dec -#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#define arch_atomic_fetch_dec_release atomic_fetch_dec_release -#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed - -#ifndef atomic_fetch_dec_relaxed -#ifdef atomic_fetch_dec -#define atomic_fetch_dec_acquire atomic_fetch_dec -#define atomic_fetch_dec_release atomic_fetch_dec -#define atomic_fetch_dec_relaxed atomic_fetch_dec -#endif /* atomic_fetch_dec */ - -#ifndef atomic_fetch_dec -static __always_inline int -atomic_fetch_dec(atomic_t *v) -{ - return atomic_fetch_sub(1, v); -} -#define atomic_fetch_dec atomic_fetch_dec -#endif - -#ifndef atomic_fetch_dec_acquire -static __always_inline int -atomic_fetch_dec_acquire(atomic_t *v) -{ - return atomic_fetch_sub_acquire(1, v); -} -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif - -#ifndef atomic_fetch_dec_release -static __always_inline int -atomic_fetch_dec_release(atomic_t *v) -{ - return atomic_fetch_sub_release(1, v); -} -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif - -#ifndef atomic_fetch_dec_relaxed -static __always_inline int -atomic_fetch_dec_relaxed(atomic_t *v) -{ - return atomic_fetch_sub_relaxed(1, v); -} -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#endif - -#else /* atomic_fetch_dec_relaxed */ - -#ifndef atomic_fetch_dec_acquire -static __always_inline int -atomic_fetch_dec_acquire(atomic_t *v) -{ - int ret = atomic_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif - -#ifndef atomic_fetch_dec_release -static __always_inline int -atomic_fetch_dec_release(atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_dec_relaxed(v); -} -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif - -#ifndef atomic_fetch_dec -static __always_inline int -atomic_fetch_dec(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_dec atomic_fetch_dec -#endif - -#endif /* atomic_fetch_dec_relaxed */ - -#define arch_atomic_and atomic_and - -#define arch_atomic_fetch_and atomic_fetch_and -#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire -#define arch_atomic_fetch_and_release atomic_fetch_and_release -#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed - -#ifndef atomic_fetch_and_relaxed -#define atomic_fetch_and_acquire atomic_fetch_and -#define atomic_fetch_and_release atomic_fetch_and -#define atomic_fetch_and_relaxed atomic_fetch_and -#else /* atomic_fetch_and_relaxed */ - -#ifndef atomic_fetch_and_acquire -static __always_inline int -atomic_fetch_and_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#endif - -#ifndef atomic_fetch_and_release -static __always_inline int -atomic_fetch_and_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_and_relaxed(i, v); -} -#define atomic_fetch_and_release atomic_fetch_and_release -#endif - -#ifndef atomic_fetch_and -static __always_inline int -atomic_fetch_and(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_and atomic_fetch_and -#endif - -#endif /* atomic_fetch_and_relaxed */ - -#define arch_atomic_andnot atomic_andnot - -#ifndef atomic_andnot -static __always_inline void -atomic_andnot(int i, atomic_t *v) -{ - atomic_and(~i, v); -} -#define atomic_andnot atomic_andnot -#endif - -#define arch_atomic_fetch_andnot atomic_fetch_andnot -#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release -#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed - -#ifndef atomic_fetch_andnot_relaxed -#ifdef atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#endif /* atomic_fetch_andnot */ - -#ifndef atomic_fetch_andnot -static __always_inline int -atomic_fetch_andnot(int i, atomic_t *v) -{ - return atomic_fetch_and(~i, v); -} -#define atomic_fetch_andnot atomic_fetch_andnot -#endif - -#ifndef atomic_fetch_andnot_acquire -static __always_inline int -atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - return atomic_fetch_and_acquire(~i, v); -} -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif - -#ifndef atomic_fetch_andnot_release -static __always_inline int -atomic_fetch_andnot_release(int i, atomic_t *v) -{ - return atomic_fetch_and_release(~i, v); -} -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif - -#ifndef atomic_fetch_andnot_relaxed -static __always_inline int -atomic_fetch_andnot_relaxed(int i, atomic_t *v) -{ - return atomic_fetch_and_relaxed(~i, v); -} -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#endif - -#else /* atomic_fetch_andnot_relaxed */ - -#ifndef atomic_fetch_andnot_acquire -static __always_inline int -atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif - -#ifndef atomic_fetch_andnot_release -static __always_inline int -atomic_fetch_andnot_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_andnot_relaxed(i, v); -} -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif - -#ifndef atomic_fetch_andnot -static __always_inline int -atomic_fetch_andnot(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_andnot atomic_fetch_andnot -#endif - -#endif /* atomic_fetch_andnot_relaxed */ - -#define arch_atomic_or atomic_or - -#define arch_atomic_fetch_or atomic_fetch_or -#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire -#define arch_atomic_fetch_or_release atomic_fetch_or_release -#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed - -#ifndef atomic_fetch_or_relaxed -#define atomic_fetch_or_acquire atomic_fetch_or -#define atomic_fetch_or_release atomic_fetch_or -#define atomic_fetch_or_relaxed atomic_fetch_or -#else /* atomic_fetch_or_relaxed */ - -#ifndef atomic_fetch_or_acquire -static __always_inline int -atomic_fetch_or_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#endif - -#ifndef atomic_fetch_or_release -static __always_inline int -atomic_fetch_or_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_or_relaxed(i, v); -} -#define atomic_fetch_or_release atomic_fetch_or_release -#endif - -#ifndef atomic_fetch_or -static __always_inline int -atomic_fetch_or(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_or atomic_fetch_or -#endif - -#endif /* atomic_fetch_or_relaxed */ - -#define arch_atomic_xor atomic_xor - -#define arch_atomic_fetch_xor atomic_fetch_xor -#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#define arch_atomic_fetch_xor_release atomic_fetch_xor_release -#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed - -#ifndef atomic_fetch_xor_relaxed -#define atomic_fetch_xor_acquire atomic_fetch_xor -#define atomic_fetch_xor_release atomic_fetch_xor -#define atomic_fetch_xor_relaxed atomic_fetch_xor -#else /* atomic_fetch_xor_relaxed */ - -#ifndef atomic_fetch_xor_acquire -static __always_inline int -atomic_fetch_xor_acquire(int i, atomic_t *v) -{ - int ret = atomic_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#endif - -#ifndef atomic_fetch_xor_release -static __always_inline int -atomic_fetch_xor_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return atomic_fetch_xor_relaxed(i, v); -} -#define atomic_fetch_xor_release atomic_fetch_xor_release -#endif - -#ifndef atomic_fetch_xor -static __always_inline int -atomic_fetch_xor(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic_fetch_xor atomic_fetch_xor -#endif - -#endif /* atomic_fetch_xor_relaxed */ - -#define arch_atomic_xchg atomic_xchg -#define arch_atomic_xchg_acquire atomic_xchg_acquire -#define arch_atomic_xchg_release atomic_xchg_release -#define arch_atomic_xchg_relaxed atomic_xchg_relaxed - -#ifndef atomic_xchg_relaxed -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg -#define atomic_xchg_relaxed atomic_xchg -#else /* atomic_xchg_relaxed */ - -#ifndef atomic_xchg_acquire -static __always_inline int -atomic_xchg_acquire(atomic_t *v, int i) -{ - int ret = atomic_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define atomic_xchg_acquire atomic_xchg_acquire -#endif - -#ifndef atomic_xchg_release -static __always_inline int -atomic_xchg_release(atomic_t *v, int i) -{ - __atomic_release_fence(); - return atomic_xchg_relaxed(v, i); -} -#define atomic_xchg_release atomic_xchg_release -#endif - -#ifndef atomic_xchg -static __always_inline int -atomic_xchg(atomic_t *v, int i) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define atomic_xchg atomic_xchg -#endif - -#endif /* atomic_xchg_relaxed */ - -#define arch_atomic_cmpxchg atomic_cmpxchg -#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#define arch_atomic_cmpxchg_release atomic_cmpxchg_release -#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed - -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#else /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_cmpxchg_acquire -static __always_inline int -atomic_cmpxchg_acquire(atomic_t *v, int old, int new) -{ - int ret = atomic_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#endif - -#ifndef atomic_cmpxchg_release -static __always_inline int -atomic_cmpxchg_release(atomic_t *v, int old, int new) -{ - __atomic_release_fence(); - return atomic_cmpxchg_relaxed(v, old, new); -} -#define atomic_cmpxchg_release atomic_cmpxchg_release -#endif - -#ifndef atomic_cmpxchg -static __always_inline int -atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - __atomic_pre_full_fence(); - ret = atomic_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic_cmpxchg atomic_cmpxchg -#endif - -#endif /* atomic_cmpxchg_relaxed */ - -#define arch_atomic_try_cmpxchg atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed - -#ifndef atomic_try_cmpxchg_relaxed -#ifdef atomic_try_cmpxchg -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg -#define atomic_try_cmpxchg_release atomic_try_cmpxchg -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg -#endif /* atomic_try_cmpxchg */ - -#ifndef atomic_try_cmpxchg -static __always_inline bool -atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif - -#ifndef atomic_try_cmpxchg_acquire -static __always_inline bool -atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif - -#ifndef atomic_try_cmpxchg_release -static __always_inline bool -atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif - -#ifndef atomic_try_cmpxchg_relaxed -static __always_inline bool -atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = atomic_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed -#endif - -#else /* atomic_try_cmpxchg_relaxed */ - -#ifndef atomic_try_cmpxchg_acquire -static __always_inline bool -atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - bool ret = atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif - -#ifndef atomic_try_cmpxchg_release -static __always_inline bool -atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - __atomic_release_fence(); - return atomic_try_cmpxchg_relaxed(v, old, new); -} -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif - -#ifndef atomic_try_cmpxchg -static __always_inline bool -atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif - -#endif /* atomic_try_cmpxchg_relaxed */ - -#define arch_atomic_sub_and_test atomic_sub_and_test - -#ifndef atomic_sub_and_test -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic_sub_and_test(int i, atomic_t *v) -{ - return atomic_sub_return(i, v) == 0; -} -#define atomic_sub_and_test atomic_sub_and_test -#endif - -#define arch_atomic_dec_and_test atomic_dec_and_test - -#ifndef atomic_dec_and_test -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -atomic_dec_and_test(atomic_t *v) -{ - return atomic_dec_return(v) == 0; -} -#define atomic_dec_and_test atomic_dec_and_test -#endif - -#define arch_atomic_inc_and_test atomic_inc_and_test - -#ifndef atomic_inc_and_test -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic_inc_and_test(atomic_t *v) -{ - return atomic_inc_return(v) == 0; -} -#define atomic_inc_and_test atomic_inc_and_test -#endif - -#define arch_atomic_add_negative atomic_add_negative - -#ifndef atomic_add_negative -/** - * atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} -#define atomic_add_negative atomic_add_negative -#endif - -#define arch_atomic_fetch_add_unless atomic_fetch_add_unless - -#ifndef atomic_fetch_add_unless -/** - * atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline int -atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - int c = atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define atomic_fetch_add_unless atomic_fetch_add_unless -#endif - -#define arch_atomic_add_unless atomic_add_unless - -#ifndef atomic_add_unless -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -atomic_add_unless(atomic_t *v, int a, int u) -{ - return atomic_fetch_add_unless(v, a, u) != u; -} -#define atomic_add_unless atomic_add_unless -#endif - -#define arch_atomic_inc_not_zero atomic_inc_not_zero - -#ifndef atomic_inc_not_zero -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -atomic_inc_not_zero(atomic_t *v) -{ - return atomic_add_unless(v, 1, 0); -} -#define atomic_inc_not_zero atomic_inc_not_zero -#endif - -#define arch_atomic_inc_unless_negative atomic_inc_unless_negative - -#ifndef atomic_inc_unless_negative -static __always_inline bool -atomic_inc_unless_negative(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define atomic_inc_unless_negative atomic_inc_unless_negative -#endif - -#define arch_atomic_dec_unless_positive atomic_dec_unless_positive - -#ifndef atomic_dec_unless_positive -static __always_inline bool -atomic_dec_unless_positive(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define atomic_dec_unless_positive atomic_dec_unless_positive -#endif - -#define arch_atomic_dec_if_positive atomic_dec_if_positive - -#ifndef atomic_dec_if_positive -static __always_inline int -atomic_dec_if_positive(atomic_t *v) -{ - int dec, c = atomic_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define atomic_dec_if_positive atomic_dec_if_positive -#endif - -#ifdef CONFIG_GENERIC_ATOMIC64 -#include -#endif - -#define arch_atomic64_read atomic64_read -#define arch_atomic64_read_acquire atomic64_read_acquire - -#ifndef atomic64_read_acquire -static __always_inline s64 -atomic64_read_acquire(const atomic64_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define atomic64_read_acquire atomic64_read_acquire -#endif - -#define arch_atomic64_set atomic64_set -#define arch_atomic64_set_release atomic64_set_release - -#ifndef atomic64_set_release -static __always_inline void -atomic64_set_release(atomic64_t *v, s64 i) -{ - smp_store_release(&(v)->counter, i); -} -#define atomic64_set_release atomic64_set_release -#endif - -#define arch_atomic64_add atomic64_add - -#define arch_atomic64_add_return atomic64_add_return -#define arch_atomic64_add_return_acquire atomic64_add_return_acquire -#define arch_atomic64_add_return_release atomic64_add_return_release -#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed - -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return -#define atomic64_add_return_relaxed atomic64_add_return -#else /* atomic64_add_return_relaxed */ - -#ifndef atomic64_add_return_acquire -static __always_inline s64 -atomic64_add_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_add_return_acquire atomic64_add_return_acquire -#endif - -#ifndef atomic64_add_return_release -static __always_inline s64 -atomic64_add_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_add_return_relaxed(i, v); -} -#define atomic64_add_return_release atomic64_add_return_release -#endif - -#ifndef atomic64_add_return -static __always_inline s64 -atomic64_add_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_add_return atomic64_add_return -#endif - -#endif /* atomic64_add_return_relaxed */ - -#define arch_atomic64_fetch_add atomic64_fetch_add -#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#define arch_atomic64_fetch_add_release atomic64_fetch_add_release -#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed - -#ifndef atomic64_fetch_add_relaxed -#define atomic64_fetch_add_acquire atomic64_fetch_add -#define atomic64_fetch_add_release atomic64_fetch_add -#define atomic64_fetch_add_relaxed atomic64_fetch_add -#else /* atomic64_fetch_add_relaxed */ - -#ifndef atomic64_fetch_add_acquire -static __always_inline s64 -atomic64_fetch_add_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#endif - -#ifndef atomic64_fetch_add_release -static __always_inline s64 -atomic64_fetch_add_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_add_relaxed(i, v); -} -#define atomic64_fetch_add_release atomic64_fetch_add_release -#endif - -#ifndef atomic64_fetch_add -static __always_inline s64 -atomic64_fetch_add(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_add atomic64_fetch_add -#endif - -#endif /* atomic64_fetch_add_relaxed */ - -#define arch_atomic64_sub atomic64_sub - -#define arch_atomic64_sub_return atomic64_sub_return -#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire -#define arch_atomic64_sub_return_release atomic64_sub_return_release -#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed - -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return -#define atomic64_sub_return_relaxed atomic64_sub_return -#else /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_sub_return_acquire -static __always_inline s64 -atomic64_sub_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#endif - -#ifndef atomic64_sub_return_release -static __always_inline s64 -atomic64_sub_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_sub_return_relaxed(i, v); -} -#define atomic64_sub_return_release atomic64_sub_return_release -#endif - -#ifndef atomic64_sub_return -static __always_inline s64 -atomic64_sub_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_sub_return atomic64_sub_return -#endif - -#endif /* atomic64_sub_return_relaxed */ - -#define arch_atomic64_fetch_sub atomic64_fetch_sub -#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release -#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed - -#ifndef atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_acquire atomic64_fetch_sub -#define atomic64_fetch_sub_release atomic64_fetch_sub -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub -#else /* atomic64_fetch_sub_relaxed */ - -#ifndef atomic64_fetch_sub_acquire -static __always_inline s64 -atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#endif - -#ifndef atomic64_fetch_sub_release -static __always_inline s64 -atomic64_fetch_sub_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_sub_relaxed(i, v); -} -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#endif - -#ifndef atomic64_fetch_sub -static __always_inline s64 -atomic64_fetch_sub(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_sub atomic64_fetch_sub -#endif - -#endif /* atomic64_fetch_sub_relaxed */ - -#define arch_atomic64_inc atomic64_inc - -#ifndef atomic64_inc -static __always_inline void -atomic64_inc(atomic64_t *v) -{ - atomic64_add(1, v); -} -#define atomic64_inc atomic64_inc -#endif - -#define arch_atomic64_inc_return atomic64_inc_return -#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire -#define arch_atomic64_inc_return_release atomic64_inc_return_release -#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed - -#ifndef atomic64_inc_return_relaxed -#ifdef atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return -#define atomic64_inc_return_relaxed atomic64_inc_return -#endif /* atomic64_inc_return */ - -#ifndef atomic64_inc_return -static __always_inline s64 -atomic64_inc_return(atomic64_t *v) -{ - return atomic64_add_return(1, v); -} -#define atomic64_inc_return atomic64_inc_return -#endif - -#ifndef atomic64_inc_return_acquire -static __always_inline s64 -atomic64_inc_return_acquire(atomic64_t *v) -{ - return atomic64_add_return_acquire(1, v); -} -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif - -#ifndef atomic64_inc_return_release -static __always_inline s64 -atomic64_inc_return_release(atomic64_t *v) -{ - return atomic64_add_return_release(1, v); -} -#define atomic64_inc_return_release atomic64_inc_return_release -#endif - -#ifndef atomic64_inc_return_relaxed -static __always_inline s64 -atomic64_inc_return_relaxed(atomic64_t *v) -{ - return atomic64_add_return_relaxed(1, v); -} -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#endif - -#else /* atomic64_inc_return_relaxed */ - -#ifndef atomic64_inc_return_acquire -static __always_inline s64 -atomic64_inc_return_acquire(atomic64_t *v) -{ - s64 ret = atomic64_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif - -#ifndef atomic64_inc_return_release -static __always_inline s64 -atomic64_inc_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_inc_return_relaxed(v); -} -#define atomic64_inc_return_release atomic64_inc_return_release -#endif - -#ifndef atomic64_inc_return -static __always_inline s64 -atomic64_inc_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_inc_return atomic64_inc_return -#endif - -#endif /* atomic64_inc_return_relaxed */ - -#define arch_atomic64_fetch_inc atomic64_fetch_inc -#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release -#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed - -#ifndef atomic64_fetch_inc_relaxed -#ifdef atomic64_fetch_inc -#define atomic64_fetch_inc_acquire atomic64_fetch_inc -#define atomic64_fetch_inc_release atomic64_fetch_inc -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc -#endif /* atomic64_fetch_inc */ - -#ifndef atomic64_fetch_inc -static __always_inline s64 -atomic64_fetch_inc(atomic64_t *v) -{ - return atomic64_fetch_add(1, v); -} -#define atomic64_fetch_inc atomic64_fetch_inc -#endif - -#ifndef atomic64_fetch_inc_acquire -static __always_inline s64 -atomic64_fetch_inc_acquire(atomic64_t *v) -{ - return atomic64_fetch_add_acquire(1, v); -} -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif - -#ifndef atomic64_fetch_inc_release -static __always_inline s64 -atomic64_fetch_inc_release(atomic64_t *v) -{ - return atomic64_fetch_add_release(1, v); -} -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif - -#ifndef atomic64_fetch_inc_relaxed -static __always_inline s64 -atomic64_fetch_inc_relaxed(atomic64_t *v) -{ - return atomic64_fetch_add_relaxed(1, v); -} -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#endif - -#else /* atomic64_fetch_inc_relaxed */ - -#ifndef atomic64_fetch_inc_acquire -static __always_inline s64 -atomic64_fetch_inc_acquire(atomic64_t *v) -{ - s64 ret = atomic64_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif - -#ifndef atomic64_fetch_inc_release -static __always_inline s64 -atomic64_fetch_inc_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_inc_relaxed(v); -} -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif - -#ifndef atomic64_fetch_inc -static __always_inline s64 -atomic64_fetch_inc(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_inc atomic64_fetch_inc -#endif - -#endif /* atomic64_fetch_inc_relaxed */ - -#define arch_atomic64_dec atomic64_dec - -#ifndef atomic64_dec -static __always_inline void -atomic64_dec(atomic64_t *v) -{ - atomic64_sub(1, v); -} -#define atomic64_dec atomic64_dec -#endif - -#define arch_atomic64_dec_return atomic64_dec_return -#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire -#define arch_atomic64_dec_return_release atomic64_dec_return_release -#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed - -#ifndef atomic64_dec_return_relaxed -#ifdef atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return -#define atomic64_dec_return_relaxed atomic64_dec_return -#endif /* atomic64_dec_return */ - -#ifndef atomic64_dec_return -static __always_inline s64 -atomic64_dec_return(atomic64_t *v) -{ - return atomic64_sub_return(1, v); -} -#define atomic64_dec_return atomic64_dec_return -#endif - -#ifndef atomic64_dec_return_acquire -static __always_inline s64 -atomic64_dec_return_acquire(atomic64_t *v) -{ - return atomic64_sub_return_acquire(1, v); -} -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif - -#ifndef atomic64_dec_return_release -static __always_inline s64 -atomic64_dec_return_release(atomic64_t *v) -{ - return atomic64_sub_return_release(1, v); -} -#define atomic64_dec_return_release atomic64_dec_return_release -#endif - -#ifndef atomic64_dec_return_relaxed -static __always_inline s64 -atomic64_dec_return_relaxed(atomic64_t *v) -{ - return atomic64_sub_return_relaxed(1, v); -} -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#endif - -#else /* atomic64_dec_return_relaxed */ - -#ifndef atomic64_dec_return_acquire -static __always_inline s64 -atomic64_dec_return_acquire(atomic64_t *v) -{ - s64 ret = atomic64_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif - -#ifndef atomic64_dec_return_release -static __always_inline s64 -atomic64_dec_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_dec_return_relaxed(v); -} -#define atomic64_dec_return_release atomic64_dec_return_release -#endif - -#ifndef atomic64_dec_return -static __always_inline s64 -atomic64_dec_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_dec_return atomic64_dec_return -#endif - -#endif /* atomic64_dec_return_relaxed */ - -#define arch_atomic64_fetch_dec atomic64_fetch_dec -#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release -#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed - -#ifndef atomic64_fetch_dec_relaxed -#ifdef atomic64_fetch_dec -#define atomic64_fetch_dec_acquire atomic64_fetch_dec -#define atomic64_fetch_dec_release atomic64_fetch_dec -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec -#endif /* atomic64_fetch_dec */ - -#ifndef atomic64_fetch_dec -static __always_inline s64 -atomic64_fetch_dec(atomic64_t *v) -{ - return atomic64_fetch_sub(1, v); -} -#define atomic64_fetch_dec atomic64_fetch_dec -#endif - -#ifndef atomic64_fetch_dec_acquire -static __always_inline s64 -atomic64_fetch_dec_acquire(atomic64_t *v) -{ - return atomic64_fetch_sub_acquire(1, v); -} -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif - -#ifndef atomic64_fetch_dec_release -static __always_inline s64 -atomic64_fetch_dec_release(atomic64_t *v) -{ - return atomic64_fetch_sub_release(1, v); -} -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif - -#ifndef atomic64_fetch_dec_relaxed -static __always_inline s64 -atomic64_fetch_dec_relaxed(atomic64_t *v) -{ - return atomic64_fetch_sub_relaxed(1, v); -} -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#endif - -#else /* atomic64_fetch_dec_relaxed */ - -#ifndef atomic64_fetch_dec_acquire -static __always_inline s64 -atomic64_fetch_dec_acquire(atomic64_t *v) -{ - s64 ret = atomic64_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif - -#ifndef atomic64_fetch_dec_release -static __always_inline s64 -atomic64_fetch_dec_release(atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_dec_relaxed(v); -} -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif - -#ifndef atomic64_fetch_dec -static __always_inline s64 -atomic64_fetch_dec(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_dec atomic64_fetch_dec -#endif - -#endif /* atomic64_fetch_dec_relaxed */ - -#define arch_atomic64_and atomic64_and - -#define arch_atomic64_fetch_and atomic64_fetch_and -#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#define arch_atomic64_fetch_and_release atomic64_fetch_and_release -#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed - -#ifndef atomic64_fetch_and_relaxed -#define atomic64_fetch_and_acquire atomic64_fetch_and -#define atomic64_fetch_and_release atomic64_fetch_and -#define atomic64_fetch_and_relaxed atomic64_fetch_and -#else /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_fetch_and_acquire -static __always_inline s64 -atomic64_fetch_and_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#endif - -#ifndef atomic64_fetch_and_release -static __always_inline s64 -atomic64_fetch_and_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_and_relaxed(i, v); -} -#define atomic64_fetch_and_release atomic64_fetch_and_release -#endif - -#ifndef atomic64_fetch_and -static __always_inline s64 -atomic64_fetch_and(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_and atomic64_fetch_and -#endif - -#endif /* atomic64_fetch_and_relaxed */ - -#define arch_atomic64_andnot atomic64_andnot - -#ifndef atomic64_andnot -static __always_inline void -atomic64_andnot(s64 i, atomic64_t *v) -{ - atomic64_and(~i, v); -} -#define atomic64_andnot atomic64_andnot -#endif - -#define arch_atomic64_fetch_andnot atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed - -#ifndef atomic64_fetch_andnot_relaxed -#ifdef atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#endif /* atomic64_fetch_andnot */ - -#ifndef atomic64_fetch_andnot -static __always_inline s64 -atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and(~i, v); -} -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif - -#ifndef atomic64_fetch_andnot_acquire -static __always_inline s64 -atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_acquire(~i, v); -} -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif - -#ifndef atomic64_fetch_andnot_release -static __always_inline s64 -atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_release(~i, v); -} -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif - -#ifndef atomic64_fetch_andnot_relaxed -static __always_inline s64 -atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) -{ - return atomic64_fetch_and_relaxed(~i, v); -} -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#endif - -#else /* atomic64_fetch_andnot_relaxed */ - -#ifndef atomic64_fetch_andnot_acquire -static __always_inline s64 -atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif - -#ifndef atomic64_fetch_andnot_release -static __always_inline s64 -atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_andnot_relaxed(i, v); -} -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif - -#ifndef atomic64_fetch_andnot -static __always_inline s64 -atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif - -#endif /* atomic64_fetch_andnot_relaxed */ - -#define arch_atomic64_or atomic64_or - -#define arch_atomic64_fetch_or atomic64_fetch_or -#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#define arch_atomic64_fetch_or_release atomic64_fetch_or_release -#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed - -#ifndef atomic64_fetch_or_relaxed -#define atomic64_fetch_or_acquire atomic64_fetch_or -#define atomic64_fetch_or_release atomic64_fetch_or -#define atomic64_fetch_or_relaxed atomic64_fetch_or -#else /* atomic64_fetch_or_relaxed */ - -#ifndef atomic64_fetch_or_acquire -static __always_inline s64 -atomic64_fetch_or_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#endif - -#ifndef atomic64_fetch_or_release -static __always_inline s64 -atomic64_fetch_or_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_or_relaxed(i, v); -} -#define atomic64_fetch_or_release atomic64_fetch_or_release -#endif - -#ifndef atomic64_fetch_or -static __always_inline s64 -atomic64_fetch_or(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_or atomic64_fetch_or -#endif - -#endif /* atomic64_fetch_or_relaxed */ - -#define arch_atomic64_xor atomic64_xor - -#define arch_atomic64_fetch_xor atomic64_fetch_xor -#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release -#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed - -#ifndef atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_acquire atomic64_fetch_xor -#define atomic64_fetch_xor_release atomic64_fetch_xor -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor -#else /* atomic64_fetch_xor_relaxed */ - -#ifndef atomic64_fetch_xor_acquire -static __always_inline s64 -atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) -{ - s64 ret = atomic64_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#endif - -#ifndef atomic64_fetch_xor_release -static __always_inline s64 -atomic64_fetch_xor_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return atomic64_fetch_xor_relaxed(i, v); -} -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#endif - -#ifndef atomic64_fetch_xor -static __always_inline s64 -atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_fetch_xor atomic64_fetch_xor -#endif - -#endif /* atomic64_fetch_xor_relaxed */ - -#define arch_atomic64_xchg atomic64_xchg -#define arch_atomic64_xchg_acquire atomic64_xchg_acquire -#define arch_atomic64_xchg_release atomic64_xchg_release -#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed - -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg -#define atomic64_xchg_relaxed atomic64_xchg -#else /* atomic64_xchg_relaxed */ - -#ifndef atomic64_xchg_acquire -static __always_inline s64 -atomic64_xchg_acquire(atomic64_t *v, s64 i) -{ - s64 ret = atomic64_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_xchg_acquire atomic64_xchg_acquire -#endif - -#ifndef atomic64_xchg_release -static __always_inline s64 -atomic64_xchg_release(atomic64_t *v, s64 i) -{ - __atomic_release_fence(); - return atomic64_xchg_relaxed(v, i); -} -#define atomic64_xchg_release atomic64_xchg_release -#endif - -#ifndef atomic64_xchg -static __always_inline s64 -atomic64_xchg(atomic64_t *v, s64 i) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_xchg atomic64_xchg -#endif - -#endif /* atomic64_xchg_relaxed */ - -#define arch_atomic64_cmpxchg atomic64_cmpxchg -#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release -#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed - -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#else /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_cmpxchg_acquire -static __always_inline s64 -atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) -{ - s64 ret = atomic64_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#endif - -#ifndef atomic64_cmpxchg_release -static __always_inline s64 -atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) -{ - __atomic_release_fence(); - return atomic64_cmpxchg_relaxed(v, old, new); -} -#define atomic64_cmpxchg_release atomic64_cmpxchg_release -#endif - -#ifndef atomic64_cmpxchg -static __always_inline s64 -atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = atomic64_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_cmpxchg atomic64_cmpxchg -#endif - -#endif /* atomic64_cmpxchg_relaxed */ - -#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed - -#ifndef atomic64_try_cmpxchg_relaxed -#ifdef atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg -#endif /* atomic64_try_cmpxchg */ - -#ifndef atomic64_try_cmpxchg -static __always_inline bool -atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif - -#ifndef atomic64_try_cmpxchg_acquire -static __always_inline bool -atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif - -#ifndef atomic64_try_cmpxchg_release -static __always_inline bool -atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif - -#ifndef atomic64_try_cmpxchg_relaxed -static __always_inline bool -atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = atomic64_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed -#endif - -#else /* atomic64_try_cmpxchg_relaxed */ - -#ifndef atomic64_try_cmpxchg_acquire -static __always_inline bool -atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif - -#ifndef atomic64_try_cmpxchg_release -static __always_inline bool -atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - __atomic_release_fence(); - return atomic64_try_cmpxchg_relaxed(v, old, new); -} -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif - -#ifndef atomic64_try_cmpxchg -static __always_inline bool -atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif - -#endif /* atomic64_try_cmpxchg_relaxed */ - -#define arch_atomic64_sub_and_test atomic64_sub_and_test - -#ifndef atomic64_sub_and_test -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic64_sub_and_test(s64 i, atomic64_t *v) -{ - return atomic64_sub_return(i, v) == 0; -} -#define atomic64_sub_and_test atomic64_sub_and_test -#endif - -#define arch_atomic64_dec_and_test atomic64_dec_and_test - -#ifndef atomic64_dec_and_test -/** - * atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -atomic64_dec_and_test(atomic64_t *v) -{ - return atomic64_dec_return(v) == 0; -} -#define atomic64_dec_and_test atomic64_dec_and_test -#endif - -#define arch_atomic64_inc_and_test atomic64_inc_and_test - -#ifndef atomic64_inc_and_test -/** - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -atomic64_inc_and_test(atomic64_t *v) -{ - return atomic64_inc_return(v) == 0; -} -#define atomic64_inc_and_test atomic64_inc_and_test -#endif - -#define arch_atomic64_add_negative atomic64_add_negative - -#ifndef atomic64_add_negative -/** - * atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -atomic64_add_negative(s64 i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} -#define atomic64_add_negative atomic64_add_negative -#endif - -#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless - -#ifndef atomic64_fetch_add_unless -/** - * atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline s64 -atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic64_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -#endif - -#define arch_atomic64_add_unless atomic64_add_unless - -#ifndef atomic64_add_unless -/** - * atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -atomic64_add_unless(atomic64_t *v, s64 a, s64 u) -{ - return atomic64_fetch_add_unless(v, a, u) != u; -} -#define atomic64_add_unless atomic64_add_unless -#endif - -#define arch_atomic64_inc_not_zero atomic64_inc_not_zero - -#ifndef atomic64_inc_not_zero -/** - * atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -atomic64_inc_not_zero(atomic64_t *v) -{ - return atomic64_add_unless(v, 1, 0); -} -#define atomic64_inc_not_zero atomic64_inc_not_zero -#endif - -#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative - -#ifndef atomic64_inc_unless_negative -static __always_inline bool -atomic64_inc_unless_negative(atomic64_t *v) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define atomic64_inc_unless_negative atomic64_inc_unless_negative -#endif - -#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive - -#ifndef atomic64_dec_unless_positive -static __always_inline bool -atomic64_dec_unless_positive(atomic64_t *v) -{ - s64 c = atomic64_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define atomic64_dec_unless_positive atomic64_dec_unless_positive -#endif - -#define arch_atomic64_dec_if_positive atomic64_dec_if_positive - -#ifndef atomic64_dec_if_positive -static __always_inline s64 -atomic64_dec_if_positive(atomic64_t *v) -{ - s64 dec, c = atomic64_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic64_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define atomic64_dec_if_positive atomic64_dec_if_positive -#endif - -#endif /* _LINUX_ATOMIC_FALLBACK_H */ -// d78e6c293c661c15188f0ec05bce45188c8d5892 diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 4f8d83f9e480..ed1d3ffd5b9d 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,12 +77,8 @@ __ret; \ }) -#ifdef CONFIG_ARCH_ATOMIC #include #include -#else -#include -#endif #include diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh index 82748d42ecc5..9c7fbd4bcbce 100755 --- a/scripts/atomic/check-atomics.sh +++ b/scripts/atomic/check-atomics.sh @@ -17,7 +17,6 @@ cat < ${LINUXDIR}/include/${header} -- cgit v1.2.3 From bccf1ec369ac126b0997d01a6e1deae00e2cf6b3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 25 May 2021 15:02:32 +0100 Subject: locking/atomics: atomic-instrumented: simplify ifdeffery Now that all architectures implement ARCH_ATOMIC, the fallbacks are generated before the instrumented wrappers are generated. Due to this, in atomic-instrumented.h we can assume that the whole set of atomic functions has been generated. Likewise, atomic-instrumented.h doesn't need to provide a preprocessor definition for every atomic it wraps. This patch removes the redundant ifdeffery. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Boqun Feng Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210525140232.53872-34-mark.rutland@arm.com --- include/asm-generic/atomic-instrumented.h | 498 +----------------------------- scripts/atomic/gen-atomic-instrumented.sh | 51 +-- 2 files changed, 3 insertions(+), 546 deletions(-) (limited to 'scripts') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 888b6cfeed91..bc45af52c93b 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -27,17 +27,13 @@ atomic_read(const atomic_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } -#define atomic_read atomic_read -#if defined(arch_atomic_read_acquire) static __always_inline int atomic_read_acquire(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } -#define atomic_read_acquire atomic_read_acquire -#endif static __always_inline void atomic_set(atomic_t *v, int i) @@ -45,17 +41,13 @@ atomic_set(atomic_t *v, int i) instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } -#define atomic_set atomic_set -#if defined(arch_atomic_set_release) static __always_inline void atomic_set_release(atomic_t *v, int i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } -#define atomic_set_release atomic_set_release -#endif static __always_inline void atomic_add(int i, atomic_t *v) @@ -63,87 +55,62 @@ atomic_add(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_add(i, v); } -#define atomic_add atomic_add -#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) static __always_inline int atomic_add_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } -#define atomic_add_return atomic_add_return -#endif -#if defined(arch_atomic_add_return_acquire) static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } -#define atomic_add_return_acquire atomic_add_return_acquire -#endif -#if defined(arch_atomic_add_return_release) static __always_inline int atomic_add_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } -#define atomic_add_return_release atomic_add_return_release -#endif -#if defined(arch_atomic_add_return_relaxed) static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } -#define atomic_add_return_relaxed atomic_add_return_relaxed -#endif -#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) static __always_inline int atomic_fetch_add(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } -#define atomic_fetch_add atomic_fetch_add -#endif -#if defined(arch_atomic_fetch_add_acquire) static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#endif -#if defined(arch_atomic_fetch_add_release) static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } -#define atomic_fetch_add_release atomic_fetch_add_release -#endif -#if defined(arch_atomic_fetch_add_relaxed) static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#endif static __always_inline void atomic_sub(int i, atomic_t *v) @@ -151,267 +118,188 @@ atomic_sub(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_sub(i, v); } -#define atomic_sub atomic_sub -#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) static __always_inline int atomic_sub_return(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } -#define atomic_sub_return atomic_sub_return -#endif -#if defined(arch_atomic_sub_return_acquire) static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } -#define atomic_sub_return_acquire atomic_sub_return_acquire -#endif -#if defined(arch_atomic_sub_return_release) static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } -#define atomic_sub_return_release atomic_sub_return_release -#endif -#if defined(arch_atomic_sub_return_relaxed) static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#endif -#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } -#define atomic_fetch_sub atomic_fetch_sub -#endif -#if defined(arch_atomic_fetch_sub_acquire) static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#endif -#if defined(arch_atomic_fetch_sub_release) static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } -#define atomic_fetch_sub_release atomic_fetch_sub_release -#endif -#if defined(arch_atomic_fetch_sub_relaxed) static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#endif -#if defined(arch_atomic_inc) static __always_inline void atomic_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_inc(v); } -#define atomic_inc atomic_inc -#endif -#if defined(arch_atomic_inc_return) static __always_inline int atomic_inc_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } -#define atomic_inc_return atomic_inc_return -#endif -#if defined(arch_atomic_inc_return_acquire) static __always_inline int atomic_inc_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } -#define atomic_inc_return_acquire atomic_inc_return_acquire -#endif -#if defined(arch_atomic_inc_return_release) static __always_inline int atomic_inc_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } -#define atomic_inc_return_release atomic_inc_return_release -#endif -#if defined(arch_atomic_inc_return_relaxed) static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#endif -#if defined(arch_atomic_fetch_inc) static __always_inline int atomic_fetch_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } -#define atomic_fetch_inc atomic_fetch_inc -#endif -#if defined(arch_atomic_fetch_inc_acquire) static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } -#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire -#endif -#if defined(arch_atomic_fetch_inc_release) static __always_inline int atomic_fetch_inc_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } -#define atomic_fetch_inc_release atomic_fetch_inc_release -#endif -#if defined(arch_atomic_fetch_inc_relaxed) static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#endif -#if defined(arch_atomic_dec) static __always_inline void atomic_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_dec(v); } -#define atomic_dec atomic_dec -#endif -#if defined(arch_atomic_dec_return) static __always_inline int atomic_dec_return(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } -#define atomic_dec_return atomic_dec_return -#endif -#if defined(arch_atomic_dec_return_acquire) static __always_inline int atomic_dec_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } -#define atomic_dec_return_acquire atomic_dec_return_acquire -#endif -#if defined(arch_atomic_dec_return_release) static __always_inline int atomic_dec_return_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } -#define atomic_dec_return_release atomic_dec_return_release -#endif -#if defined(arch_atomic_dec_return_relaxed) static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#endif -#if defined(arch_atomic_fetch_dec) static __always_inline int atomic_fetch_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } -#define atomic_fetch_dec atomic_fetch_dec -#endif -#if defined(arch_atomic_fetch_dec_acquire) static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } -#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire -#endif -#if defined(arch_atomic_fetch_dec_release) static __always_inline int atomic_fetch_dec_release(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } -#define atomic_fetch_dec_release atomic_fetch_dec_release -#endif -#if defined(arch_atomic_fetch_dec_relaxed) static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#endif static __always_inline void atomic_and(int i, atomic_t *v) @@ -419,97 +307,69 @@ atomic_and(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_and(i, v); } -#define atomic_and atomic_and -#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) static __always_inline int atomic_fetch_and(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } -#define atomic_fetch_and atomic_fetch_and -#endif -#if defined(arch_atomic_fetch_and_acquire) static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#endif -#if defined(arch_atomic_fetch_and_release) static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } -#define atomic_fetch_and_release atomic_fetch_and_release -#endif -#if defined(arch_atomic_fetch_and_relaxed) static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#endif -#if defined(arch_atomic_andnot) static __always_inline void atomic_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } -#define atomic_andnot atomic_andnot -#endif -#if defined(arch_atomic_fetch_andnot) static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } -#define atomic_fetch_andnot atomic_fetch_andnot -#endif -#if defined(arch_atomic_fetch_andnot_acquire) static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#endif -#if defined(arch_atomic_fetch_andnot_release) static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#endif -#if defined(arch_atomic_fetch_andnot_relaxed) static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#endif static __always_inline void atomic_or(int i, atomic_t *v) @@ -517,47 +377,34 @@ atomic_or(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_or(i, v); } -#define atomic_or atomic_or -#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) static __always_inline int atomic_fetch_or(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } -#define atomic_fetch_or atomic_fetch_or -#endif -#if defined(arch_atomic_fetch_or_acquire) static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#endif -#if defined(arch_atomic_fetch_or_release) static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } -#define atomic_fetch_or_release atomic_fetch_or_release -#endif -#if defined(arch_atomic_fetch_or_relaxed) static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#endif static __always_inline void atomic_xor(int i, atomic_t *v) @@ -565,129 +412,91 @@ atomic_xor(int i, atomic_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic_xor(i, v); } -#define atomic_xor atomic_xor -#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } -#define atomic_fetch_xor atomic_fetch_xor -#endif -#if defined(arch_atomic_fetch_xor_acquire) static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#endif -#if defined(arch_atomic_fetch_xor_release) static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } -#define atomic_fetch_xor_release atomic_fetch_xor_release -#endif -#if defined(arch_atomic_fetch_xor_relaxed) static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#endif -#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) static __always_inline int atomic_xchg(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } -#define atomic_xchg atomic_xchg -#endif -#if defined(arch_atomic_xchg_acquire) static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } -#define atomic_xchg_acquire atomic_xchg_acquire -#endif -#if defined(arch_atomic_xchg_release) static __always_inline int atomic_xchg_release(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } -#define atomic_xchg_release atomic_xchg_release -#endif -#if defined(arch_atomic_xchg_relaxed) static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } -#define atomic_xchg_relaxed atomic_xchg_relaxed -#endif -#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } -#define atomic_cmpxchg atomic_cmpxchg -#endif -#if defined(arch_atomic_cmpxchg_acquire) static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#endif -#if defined(arch_atomic_cmpxchg_release) static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } -#define atomic_cmpxchg_release atomic_cmpxchg_release -#endif -#if defined(arch_atomic_cmpxchg_relaxed) static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -#endif -#if defined(arch_atomic_try_cmpxchg) static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { @@ -695,10 +504,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } -#define atomic_try_cmpxchg atomic_try_cmpxchg -#endif -#if defined(arch_atomic_try_cmpxchg_acquire) static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { @@ -706,10 +512,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire -#endif -#if defined(arch_atomic_try_cmpxchg_release) static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { @@ -717,10 +520,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } -#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release -#endif -#if defined(arch_atomic_try_cmpxchg_relaxed) static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { @@ -728,108 +528,76 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic_sub_and_test) static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } -#define atomic_sub_and_test atomic_sub_and_test -#endif -#if defined(arch_atomic_dec_and_test) static __always_inline bool atomic_dec_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } -#define atomic_dec_and_test atomic_dec_and_test -#endif -#if defined(arch_atomic_inc_and_test) static __always_inline bool atomic_inc_and_test(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } -#define atomic_inc_and_test atomic_inc_and_test -#endif -#if defined(arch_atomic_add_negative) static __always_inline bool atomic_add_negative(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } -#define atomic_add_negative atomic_add_negative -#endif -#if defined(arch_atomic_fetch_add_unless) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } -#define atomic_fetch_add_unless atomic_fetch_add_unless -#endif -#if defined(arch_atomic_add_unless) static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } -#define atomic_add_unless atomic_add_unless -#endif -#if defined(arch_atomic_inc_not_zero) static __always_inline bool atomic_inc_not_zero(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } -#define atomic_inc_not_zero atomic_inc_not_zero -#endif -#if defined(arch_atomic_inc_unless_negative) static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } -#define atomic_inc_unless_negative atomic_inc_unless_negative -#endif -#if defined(arch_atomic_dec_unless_positive) static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } -#define atomic_dec_unless_positive atomic_dec_unless_positive -#endif -#if defined(arch_atomic_dec_if_positive) static __always_inline int atomic_dec_if_positive(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } -#define atomic_dec_if_positive atomic_dec_if_positive -#endif static __always_inline s64 atomic64_read(const atomic64_t *v) @@ -837,17 +605,13 @@ atomic64_read(const atomic64_t *v) instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } -#define atomic64_read atomic64_read -#if defined(arch_atomic64_read_acquire) static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } -#define atomic64_read_acquire atomic64_read_acquire -#endif static __always_inline void atomic64_set(atomic64_t *v, s64 i) @@ -855,17 +619,13 @@ atomic64_set(atomic64_t *v, s64 i) instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } -#define atomic64_set atomic64_set -#if defined(arch_atomic64_set_release) static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } -#define atomic64_set_release atomic64_set_release -#endif static __always_inline void atomic64_add(s64 i, atomic64_t *v) @@ -873,87 +633,62 @@ atomic64_add(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_add(i, v); } -#define atomic64_add atomic64_add -#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } -#define atomic64_add_return atomic64_add_return -#endif -#if defined(arch_atomic64_add_return_acquire) static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } -#define atomic64_add_return_acquire atomic64_add_return_acquire -#endif -#if defined(arch_atomic64_add_return_release) static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } -#define atomic64_add_return_release atomic64_add_return_release -#endif -#if defined(arch_atomic64_add_return_relaxed) static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } -#define atomic64_fetch_add atomic64_fetch_add -#endif -#if defined(arch_atomic64_fetch_add_acquire) static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#endif -#if defined(arch_atomic64_fetch_add_release) static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } -#define atomic64_fetch_add_release atomic64_fetch_add_release -#endif -#if defined(arch_atomic64_fetch_add_relaxed) static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#endif static __always_inline void atomic64_sub(s64 i, atomic64_t *v) @@ -961,267 +696,188 @@ atomic64_sub(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } -#define atomic64_sub atomic64_sub -#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } -#define atomic64_sub_return atomic64_sub_return -#endif -#if defined(arch_atomic64_sub_return_acquire) static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#endif -#if defined(arch_atomic64_sub_return_release) static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } -#define atomic64_sub_return_release atomic64_sub_return_release -#endif -#if defined(arch_atomic64_sub_return_relaxed) static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#endif -#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } -#define atomic64_fetch_sub atomic64_fetch_sub -#endif -#if defined(arch_atomic64_fetch_sub_acquire) static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#endif -#if defined(arch_atomic64_fetch_sub_release) static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#endif -#if defined(arch_atomic64_fetch_sub_relaxed) static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#endif -#if defined(arch_atomic64_inc) static __always_inline void atomic64_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_inc(v); } -#define atomic64_inc atomic64_inc -#endif -#if defined(arch_atomic64_inc_return) static __always_inline s64 atomic64_inc_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } -#define atomic64_inc_return atomic64_inc_return -#endif -#if defined(arch_atomic64_inc_return_acquire) static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } -#define atomic64_inc_return_acquire atomic64_inc_return_acquire -#endif -#if defined(arch_atomic64_inc_return_release) static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } -#define atomic64_inc_return_release atomic64_inc_return_release -#endif -#if defined(arch_atomic64_inc_return_relaxed) static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#endif -#if defined(arch_atomic64_fetch_inc) static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } -#define atomic64_fetch_inc atomic64_fetch_inc -#endif -#if defined(arch_atomic64_fetch_inc_acquire) static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } -#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire -#endif -#if defined(arch_atomic64_fetch_inc_release) static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } -#define atomic64_fetch_inc_release atomic64_fetch_inc_release -#endif -#if defined(arch_atomic64_fetch_inc_relaxed) static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#endif -#if defined(arch_atomic64_dec) static __always_inline void atomic64_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_dec(v); } -#define atomic64_dec atomic64_dec -#endif -#if defined(arch_atomic64_dec_return) static __always_inline s64 atomic64_dec_return(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } -#define atomic64_dec_return atomic64_dec_return -#endif -#if defined(arch_atomic64_dec_return_acquire) static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } -#define atomic64_dec_return_acquire atomic64_dec_return_acquire -#endif -#if defined(arch_atomic64_dec_return_release) static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } -#define atomic64_dec_return_release atomic64_dec_return_release -#endif -#if defined(arch_atomic64_dec_return_relaxed) static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#endif -#if defined(arch_atomic64_fetch_dec) static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } -#define atomic64_fetch_dec atomic64_fetch_dec -#endif -#if defined(arch_atomic64_fetch_dec_acquire) static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } -#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire -#endif -#if defined(arch_atomic64_fetch_dec_release) static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } -#define atomic64_fetch_dec_release atomic64_fetch_dec_release -#endif -#if defined(arch_atomic64_fetch_dec_relaxed) static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#endif static __always_inline void atomic64_and(s64 i, atomic64_t *v) @@ -1229,97 +885,69 @@ atomic64_and(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_and(i, v); } -#define atomic64_and atomic64_and -#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } -#define atomic64_fetch_and atomic64_fetch_and -#endif -#if defined(arch_atomic64_fetch_and_acquire) static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#endif -#if defined(arch_atomic64_fetch_and_release) static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } -#define atomic64_fetch_and_release atomic64_fetch_and_release -#endif -#if defined(arch_atomic64_fetch_and_relaxed) static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#endif -#if defined(arch_atomic64_andnot) static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } -#define atomic64_andnot atomic64_andnot -#endif -#if defined(arch_atomic64_fetch_andnot) static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } -#define atomic64_fetch_andnot atomic64_fetch_andnot -#endif -#if defined(arch_atomic64_fetch_andnot_acquire) static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#endif -#if defined(arch_atomic64_fetch_andnot_release) static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#endif -#if defined(arch_atomic64_fetch_andnot_relaxed) static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#endif static __always_inline void atomic64_or(s64 i, atomic64_t *v) @@ -1327,47 +955,34 @@ atomic64_or(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_or(i, v); } -#define atomic64_or atomic64_or -#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } -#define atomic64_fetch_or atomic64_fetch_or -#endif -#if defined(arch_atomic64_fetch_or_acquire) static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#endif -#if defined(arch_atomic64_fetch_or_release) static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } -#define atomic64_fetch_or_release atomic64_fetch_or_release -#endif -#if defined(arch_atomic64_fetch_or_relaxed) static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#endif static __always_inline void atomic64_xor(s64 i, atomic64_t *v) @@ -1375,129 +990,91 @@ atomic64_xor(s64 i, atomic64_t *v) instrument_atomic_read_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } -#define atomic64_xor atomic64_xor -#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } -#define atomic64_fetch_xor atomic64_fetch_xor -#endif -#if defined(arch_atomic64_fetch_xor_acquire) static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#endif -#if defined(arch_atomic64_fetch_xor_release) static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#endif -#if defined(arch_atomic64_fetch_xor_relaxed) static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#endif -#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } -#define atomic64_xchg atomic64_xchg -#endif -#if defined(arch_atomic64_xchg_acquire) static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } -#define atomic64_xchg_acquire atomic64_xchg_acquire -#endif -#if defined(arch_atomic64_xchg_release) static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } -#define atomic64_xchg_release atomic64_xchg_release -#endif -#if defined(arch_atomic64_xchg_relaxed) static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } -#define atomic64_xchg_relaxed atomic64_xchg_relaxed -#endif -#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } -#define atomic64_cmpxchg atomic64_cmpxchg -#endif -#if defined(arch_atomic64_cmpxchg_acquire) static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } -#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire -#endif -#if defined(arch_atomic64_cmpxchg_release) static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } -#define atomic64_cmpxchg_release atomic64_cmpxchg_release -#endif -#if defined(arch_atomic64_cmpxchg_relaxed) static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_try_cmpxchg) static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { @@ -1505,10 +1082,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -#endif -#if defined(arch_atomic64_try_cmpxchg_acquire) static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { @@ -1516,10 +1090,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire -#endif -#if defined(arch_atomic64_try_cmpxchg_release) static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { @@ -1527,10 +1098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release -#endif -#if defined(arch_atomic64_try_cmpxchg_relaxed) static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { @@ -1538,218 +1106,161 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) instrument_atomic_read_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed -#endif -#if defined(arch_atomic64_sub_and_test) static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } -#define atomic64_sub_and_test atomic64_sub_and_test -#endif -#if defined(arch_atomic64_dec_and_test) static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } -#define atomic64_dec_and_test atomic64_dec_and_test -#endif -#if defined(arch_atomic64_inc_and_test) static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } -#define atomic64_inc_and_test atomic64_inc_and_test -#endif -#if defined(arch_atomic64_add_negative) static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } -#define atomic64_add_negative atomic64_add_negative -#endif -#if defined(arch_atomic64_fetch_add_unless) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -#endif -#if defined(arch_atomic64_add_unless) static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } -#define atomic64_add_unless atomic64_add_unless -#endif -#if defined(arch_atomic64_inc_not_zero) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } -#define atomic64_inc_not_zero atomic64_inc_not_zero -#endif -#if defined(arch_atomic64_inc_unless_negative) static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } -#define atomic64_inc_unless_negative atomic64_inc_unless_negative -#endif -#if defined(arch_atomic64_dec_unless_positive) static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } -#define atomic64_dec_unless_positive atomic64_dec_unless_positive -#endif -#if defined(arch_atomic64_dec_if_positive) static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } -#define atomic64_dec_if_positive atomic64_dec_if_positive -#endif -#if !defined(arch_xchg_relaxed) || defined(arch_xchg) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_acquire) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_release) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_xchg_relaxed) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_acquire) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_release) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg_relaxed) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_acquire) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_release) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if defined(arch_cmpxchg64_relaxed) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) -#endif -#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg) #define try_cmpxchg(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1758,9 +1269,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_acquire) #define try_cmpxchg_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1769,9 +1278,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_release) #define try_cmpxchg_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1780,9 +1287,7 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif -#if defined(arch_try_cmpxchg_relaxed) #define try_cmpxchg_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1791,7 +1296,6 @@ atomic64_dec_if_positive(atomic64_t *v) instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#endif #define cmpxchg_local(ptr, ...) \ ({ \ @@ -1830,4 +1334,4 @@ atomic64_dec_if_positive(atomic64_t *v) }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 4bec382e44520f4d8267e42620054db26a659ea3 +// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index 5766ffcec7c5..b0c45aee19d7 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -41,34 +41,6 @@ gen_params_checks() done } -# gen_guard(meta, atomic, pfx, name, sfx, order) -gen_guard() -{ - local meta="$1"; shift - local atomic="$1"; shift - local pfx="$1"; shift - local name="$1"; shift - local sfx="$1"; shift - local order="$1"; shift - - local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}" - - local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" - - # We definitely need a preprocessor symbol for this atomic if it is an - # ordering variant, or if there's a generic fallback. - if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then - printf "defined(${atomicname})" - return - fi - - # If this is a base variant, but a relaxed variant *may* exist, then we - # only have a preprocessor symbol if the relaxed variant isn't defined - if meta_has_relaxed "${meta}"; then - printf "!defined(${atomicname}_relaxed) || defined(${atomicname})" - fi -} - #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) gen_proto_order_variant() { @@ -82,16 +54,12 @@ gen_proto_order_variant() local atomicname="${atomic}_${pfx}${name}${sfx}${order}" - local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")" - local ret="$(gen_ret_type "${meta}" "${int}")" local params="$(gen_params "${int}" "${atomic}" "$@")" local checks="$(gen_params_checks "${meta}" "$@")" local args="$(gen_args "$@")" local retstmt="$(gen_ret_stmt "${meta}")" - [ ! -z "${guard}" ] && printf "#if ${guard}\n" - cat < Date: Mon, 17 May 2021 16:03:11 +0900 Subject: kbuild: merge scripts/mkmakefile to top Makefile scripts/mkmakefile is simple enough to be merged in the Makefile. Use $(call cmd,...) to show the log instead of doing it in the shell script. Signed-off-by: Masahiro Yamada --- Makefile | 11 +++++++++-- scripts/mkmakefile | 17 ----------------- 2 files changed, 9 insertions(+), 19 deletions(-) delete mode 100755 scripts/mkmakefile (limited to 'scripts') diff --git a/Makefile b/Makefile index 4dcfe9c48d60..504327207d66 100644 --- a/Makefile +++ b/Makefile @@ -544,14 +544,21 @@ scripts_basic: $(Q)rm -f .tmp_quiet_recordmcount PHONY += outputmakefile +ifdef building_out_of_srctree # Before starting out-of-tree build, make sure the source tree is clean. # outputmakefile generates a Makefile in the output directory, if using a # separate output directory. This allows convenient use of make in the # output directory. # At the same time when output Makefile generated, generate .gitignore to # ignore whole output directory + +quiet_cmd_makefile = GEN Makefile + cmd_makefile = { \ + echo "\# Automatically generated by $(srctree)/Makefile: don't edit"; \ + echo "include $(srctree)/Makefile"; \ + } > Makefile + outputmakefile: -ifdef building_out_of_srctree $(Q)if [ -f $(srctree)/.config -o \ -d $(srctree)/include/config -o \ -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \ @@ -562,7 +569,7 @@ ifdef building_out_of_srctree false; \ fi $(Q)ln -fsn $(srctree) source - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) + $(call cmd,makefile) $(Q)test -e .gitignore || \ { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore endif diff --git a/scripts/mkmakefile b/scripts/mkmakefile deleted file mode 100755 index 1cb174751429..000000000000 --- a/scripts/mkmakefile +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# Generates a small Makefile used in the root of the output -# directory, to allow make to be started from there. -# The Makefile also allow for more convinient build of external modules - -# Usage -# $1 - Kernel src directory - -if [ "${quiet}" != "silent_" ]; then - echo " GEN Makefile" -fi - -cat << EOF > Makefile -# Automatically generated by $0: don't edit -include $1/Makefile -EOF -- cgit v1.2.3 From 174a1dcc96429efce4ef7eb2f5c4506480da2182 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 May 2021 16:03:13 +0900 Subject: kbuild: sink stdout from cmd for silent build When building with 'make -s', no output to stdout should be printed. As Arnd Bergmann reported [1], mkimage shows the detailed information of the generated images. I think this should be suppressed by the 'cmd' macro instead of by individual scripts. Insert 'exec >/dev/null;' in order to redirect stdout to /dev/null for silent builds. [Note about this implementation] 'exec >/dev/null;' may look somewhat tricky, but this has a reason. Appending '>/dev/null' at the end of command line is a common way for redirection, so I first tried this: cmd = @set -e; $(echo-cmd) $(cmd_$(1)) >/dev/null ... but it would not work if $(cmd_$(1)) itself contains a redirection. For example, cmd_wrap in scripts/Makefile.asm-generic redirects the output from the 'echo' command into the target file. It would be expanded into: echo "#include " > $@ >/dev/null Then, the target file gets empty because the string will go to /dev/null instead of $@. Next, I tried this: cmd = @set -e; $(echo-cmd) { $(cmd_$(1)); } >/dev/null The form above would be expanded into: { echo "#include " > $@; } >/dev/null This works as expected. However, it would be a syntax error if $(cmd_$(1)) is empty. When CONFIG_TRIM_UNUSED_KSYMS is disabled, $(call cmd,gen_ksymdeps) in scripts/Makefile.build would be expanded into: set -e; { ; } >/dev/null ..., which causes an syntax error. I also tried this: cmd = @set -e; $(echo-cmd) ( $(cmd_$(1)) ) >/dev/null ... but this causes a syntax error for the same reason. So, finally I adopted: cmd = @set -e; $(echo-cmd) exec >/dev/null; $(cmd_$(1)) [1]: https://lore.kernel.org/lkml/20210514135752.2910387-1-arnd@kernel.org/ Signed-off-by: Masahiro Yamada --- scripts/Kbuild.include | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 82dd1b65b7a8..f247e691562d 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -90,8 +90,13 @@ clean := -f $(srctree)/scripts/Makefile.clean obj echo-cmd = $(if $($(quiet)cmd_$(1)),\ echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';) +# sink stdout for 'make -s' + redirect := + quiet_redirect := +silent_redirect := exec >/dev/null; + # printing commands -cmd = @set -e; $(echo-cmd) $(cmd_$(1)) +cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1)) ### # if_changed - execute command if any prerequisite is newer than -- cgit v1.2.3 From c39013ee64b5083ec3202aae8a418e9c70baff7a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 17 May 2021 16:03:14 +0900 Subject: kbuild: clean up ${quiet} checks in shell scripts There were efforts to make 'make -s' really silent when it is a warning-free build. The conventional way was to let a shell script check ${quiet}, and if it is 'silent_', suppress the stdout by itself. With the previous commit, the 'cmd' takes care of it now. The 'cmd' is also invoked from if_changed, if_changed_dep, and if_changed_rule. You can omit ${quiet} checks in shell scripts when they are invoked from the 'cmd' macro. Signed-off-by: Masahiro Yamada --- kernel/gen_kheaders.sh | 4 +--- scripts/link-vmlinux.sh | 4 +--- scripts/mkcompile_h | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'scripts') diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh index 34a1dc2abc7d..1966a749e0d9 100755 --- a/kernel/gen_kheaders.sh +++ b/kernel/gen_kheaders.sh @@ -56,9 +56,7 @@ if [ -f kernel/kheaders.md5 ] && exit fi -if [ "${quiet}" != "silent_" ]; then - echo " GEN $tarfile" -fi +echo " GEN $tarfile" rm -rf $cpio_dir mkdir $cpio_dir diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index f4de4c97015b..3b342b0b0b38 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -38,9 +38,7 @@ LDFLAGS_vmlinux="$3" # Will be supressed by "make -s" info() { - if [ "${quiet}" != "silent_" ]; then - printf " %-7s %s\n" "${1}" "${2}" - fi + printf " %-7s %s\n" "${1}" "${2}" } # Generate a linker script to ensure correct ordering of initcalls. diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 4ae735039daf..06bbf4c2c66c 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -9,8 +9,6 @@ PREEMPT_RT=$5 CC_VERSION="$6" LD=$7 -vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } - # Do not expand names set -f @@ -82,7 +80,7 @@ if [ -r $TARGET ] && \ cmp -s .tmpver.1 .tmpver.2; then rm -f .tmpcompile else - vecho " UPD $TARGET" + echo " UPD $TARGET" mv -f .tmpcompile $TARGET fi rm -f .tmpver.1 .tmpver.2 -- cgit v1.2.3 From 2a73cce2dad3b6e0aa705b376bb736358b6b5e8e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 23 May 2021 12:14:24 +0900 Subject: scripts/setlocalversion: remove mercurial, svn and git-svn supports The mercurial, svn, git-svn supports were added by the following commits: - 3dce174cfcba ("kbuild: support mercurial in setlocalversion") - ba3d05fb6369 ("kbuild: add svn revision information to setlocalversion") - ff80aa97c9b4 ("setlocalversion: add git-svn support") They did not explain why they are useful for the kernel source tree. Let's revert all of them, and see if somebody will complain about it. Signed-off-by: Masahiro Yamada Reviewed-by: Greg Kroah-Hartman Reviewed-by: Nico Schottelius --- scripts/setlocalversion | 41 ----------------------------------------- 1 file changed, 41 deletions(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index db941f6d9591..879cba956e60 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -79,11 +79,6 @@ scm_version() fi fi - # Is this git on svn? - if git config --get svn-remote.svn.url >/dev/null; then - printf -- '-svn%s' "$(git svn find-rev $head)" - fi - # Check for uncommitted changes. # First, with git-status, but --no-optional-locks is only # supported in git >= 2.14, so fall back to git-diff-index if @@ -96,42 +91,6 @@ scm_version() } | grep -qvE '^(.. )?scripts/package'; then printf '%s' -dirty fi - - # All done with git - return - fi - - # Check for mercurial and a mercurial repo. - if test -d .hg && hgid=$(hg id 2>/dev/null); then - # Do we have an tagged version? If so, latesttagdistance == 1 - if [ "$(hg log -r . --template '{latesttagdistance}')" = "1" ]; then - id=$(hg log -r . --template '{latesttag}') - printf '%s%s' -hg "$id" - else - tag=$(printf '%s' "$hgid" | cut -d' ' -f2) - if [ -z "$tag" -o "$tag" = tip ]; then - id=$(printf '%s' "$hgid" | sed 's/[+ ].*//') - printf '%s%s' -hg "$id" - fi - fi - - # Are there uncommitted changes? - # These are represented by + after the changeset id. - case "$hgid" in - *+|*+\ *) printf '%s' -dirty ;; - esac - - # All done with mercurial - return - fi - - # Check for svn and a svn repo. - if rev=$(LC_ALL=C svn info 2>/dev/null | grep '^Last Changed Rev'); then - rev=$(echo $rev | awk '{print $NF}') - printf -- '-svn%s' "$rev" - - # All done with svn - return fi } -- cgit v1.2.3 From a2be76a352f1035a2e5f914a409743d65dc514c5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 23 May 2021 12:14:25 +0900 Subject: scripts/setlocalversion: remove workaround for old make-kpkg This reverts commit b052ce4c840e ("kbuild: fix false positive -dirty tag caused by make-kpkg"). If I understand correctly, this problem occurred in very old versions of make-kpkg. When I tried a newer version, make-kpkg did not touch scripts/package/Makefile. Anyway, Debian uses 'make deb-pkg' instead of make-kpkg these days. Debian handbook [1] mentions it as "the good old days": "CULTURE The good old days of kernel-package Before the Linux build system gained the ability to build proper Debian packages, the recommended way to build such packages was to use make-kpkg from the kernel-package package." [1]: https://debian-handbook.info/browse/stable/sect.kernel-compilation.html Signed-off-by: Masahiro Yamada Reviewed-by: Greg Kroah-Hartman Reviewed-by: Nico Schottelius --- scripts/setlocalversion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 879cba956e60..f3084d6bbb22 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -88,7 +88,7 @@ scm_version() if { git --no-optional-locks status -uno --porcelain 2>/dev/null || git diff-index --name-only HEAD - } | grep -qvE '^(.. )?scripts/package'; then + } | read dummy; then printf '%s' -dirty fi fi -- cgit v1.2.3 From ffaf62a8050b5f7995083ee93526b57d8d79fec4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 23 May 2021 12:14:26 +0900 Subject: scripts/setlocalversion: add more comments to -dirty flag detection This script stumbled on the read-only source tree over again: - a2bb90a08cb3 ("kbuild: fix delay in setlocalversion on readonly source") - cdf2bc632ebc ("scripts/setlocalversion on write-protected source tree") - 8ef14c2c41d9 ("Revert "scripts/setlocalversion: git: Make -dirty check more robust"") - ff64dd485730 ("scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks") Add comments to clarify that this script should never ever try to write to the source tree. 'git describe --dirty' might look as a simple solution for appending the -dirty string, but we cannot use it because it creates the .git/index.lock file. Signed-off-by: Masahiro Yamada Reviewed-by: Greg Kroah-Hartman Reviewed-by: Nico Schottelius --- scripts/setlocalversion | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index f3084d6bbb22..6865df6699c8 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -80,6 +80,10 @@ scm_version() fi # Check for uncommitted changes. + # This script must avoid any write attempt to the source tree, + # which might be read-only. + # You cannot use 'git describe --dirty' because it tries to + # create .git/index.lock . # First, with git-status, but --no-optional-locks is only # supported in git >= 2.14, so fall back to git-diff-index if # it fails. Note that git-diff-index does not refresh the -- cgit v1.2.3 From 630ff0faf84eac6448c851961d4865471a792160 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 23 May 2021 12:14:27 +0900 Subject: scripts/setlocalversion: factor out 12-chars hash construction Both of if and else parts append exactly 12 hex chars, but in different ways. Factor out the else part because we need to support it without relying on git-describe. Remove the --abbrev=12 option since we do not use the hash from git-describe anyway. Signed-off-by: Masahiro Yamada Reviewed-by: Greg Kroah-Hartman Reviewed-by: Nico Schottelius --- scripts/setlocalversion | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 6865df6699c8..62c0bcce1575 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -59,24 +59,12 @@ scm_version() fi # If we are past a tagged commit (like # "v2.6.30-rc5-302-g72357d5"), we pretty print it. - # - # Ensure the abbreviated sha1 has exactly 12 - # hex characters, to make the output - # independent of git version, local - # core.abbrev settings and/or total number of - # objects in the current repository - passing - # --abbrev=12 ensures a minimum of 12, and the - # awk substr() then picks the 'g' and first 12 - # hex chars. - if atag="$(git describe --abbrev=12 2>/dev/null)"; then - echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}' - - # If we don't have a tag at all we print -g{commitish}, - # again using exactly 12 hex chars. - else - head="$(echo $head | cut -c1-12)" - printf '%s%s' -g $head + if atag="$(git describe 2>/dev/null)"; then + echo "$atag" | awk -F- '{printf("-%05d", $(NF-1))}' fi + + # Add -g and exactly 12 hex chars. + printf '%s%s' -g "$(echo $head | cut -c1-12)" fi # Check for uncommitted changes. -- cgit v1.2.3 From 042da426f8ebde012be9429ff705232af7ad7469 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 23 May 2021 12:14:28 +0900 Subject: scripts/setlocalversion: simplify the short version part Reduce the indentation. Signed-off-by: Masahiro Yamada Reviewed-by: Greg Kroah-Hartman Reviewed-by: Nico Schottelius --- scripts/setlocalversion | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 62c0bcce1575..151f04971faa 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -131,15 +131,13 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}" if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then # full scm version string res="$res$(scm_version)" -else +elif [ -z "${LOCALVERSION}" ]; then # append a plus sign if the repository is not in a clean # annotated or signed tagged state (as git describe only # looks at signed or annotated tags - git tag -a/-s) and # LOCALVERSION= is not specified - if test "${LOCALVERSION+set}" != "set"; then - scm=$(scm_version --short) - res="$res${scm:++}" - fi + scm=$(scm_version --short) + res="$res${scm:++}" fi echo "$res" -- cgit v1.2.3 From 1cbdf60bd1b74e397d48aa877367cfc621f45ffe Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 26 May 2021 10:49:27 -0700 Subject: kasan: arm64: support specialized outlined tag mismatch checks By using outlined checks we can achieve a significant code size improvement by moving the tag-based ASAN checks into separate functions. Unlike the existing CONFIG_KASAN_OUTLINE mode these functions have a custom calling convention that preserves most registers and is specialized to the register containing the address and the type of access, and as a result we can eliminate the code size and performance overhead of a standard calling convention such as AAPCS for these functions. This change depends on a separate series of changes to Clang [1] to support outlined checks in the kernel, although the change works fine without them (we just don't get outlined checks). This is because the flag -mllvm -hwasan-inline-all-checks=0 has no effect until the Clang changes land. The flag was introduced in the Clang 9.0 timeframe as part of the support for outlined checks in userspace and because our minimum Clang version is 10.0 we can pass it unconditionally. Outlined checks require a new runtime function with a custom calling convention. Add this function to arch/arm64/lib. I measured the code size of defconfig + tag-based KASAN, as well as boot time (i.e. time to init launch) on a DragonBoard 845c with an Android arm64 GKI kernel. The results are below: code size boot time CONFIG_KASAN_INLINE=y before 92824064 6.18s CONFIG_KASAN_INLINE=y after 38822400 6.65s CONFIG_KASAN_OUTLINE=y 39215616 11.48s We can see straight away that specialized outlined checks beat the existing CONFIG_KASAN_OUTLINE=y on both code size and boot time for tag-based ASAN. As for the comparison between CONFIG_KASAN_INLINE=y before and after we saw similar performance numbers in userspace [2] and decided that since the performance overhead is minimal compared to the overhead of tag-based ASAN itself as well as compared to the code size improvements we would just replace the inlined checks with the specialized outlined checks without the option to select between them, and that is what I have implemented in this patch. Signed-off-by: Peter Collingbourne Acked-by: Andrey Konovalov Reviewed-by: Mark Rutland Tested-by: Mark Rutland Link: https://linux-review.googlesource.com/id/I1a30036c70ab3c3ee78d75ed9b87ef7cdc3fdb76 Link: [1] https://reviews.llvm.org/D90426 Link: [2] https://reviews.llvm.org/D56954 Link: https://lore.kernel.org/r/20210526174927.2477847-3-pcc@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/asm-prototypes.h | 6 +++ arch/arm64/include/asm/module.lds.h | 17 +++++++- arch/arm64/lib/Makefile | 2 + arch/arm64/lib/kasan_sw_tags.S | 76 +++++++++++++++++++++++++++++++++ mm/kasan/sw_tags.c | 7 +++ scripts/Makefile.kasan | 1 + 6 files changed, 107 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/lib/kasan_sw_tags.S (limited to 'scripts') diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h index 1c9a3a0c5fa5..ec1d9655f885 100644 --- a/arch/arm64/include/asm/asm-prototypes.h +++ b/arch/arm64/include/asm/asm-prototypes.h @@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b); long long __ashrti3(long long a, int b); long long __lshrti3(long long a, int b); +/* + * This function uses a custom calling convention and cannot be called from C so + * this prototype is not entirely accurate. + */ +void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info); + #endif /* __ASM_PROTOTYPES_H */ diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 810045628c66..a11ccadd47d2 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,7 +1,20 @@ -#ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { +#ifdef CONFIG_ARM64_MODULE_PLTS .plt 0 (NOLOAD) : { BYTE(0) } .init.plt 0 (NOLOAD) : { BYTE(0) } .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } -} #endif + +#ifdef CONFIG_KASAN_SW_TAGS + /* + * Outlined checks go into comdat-deduplicated sections named .text.hot. + * Because they are in comdats they are not combined by the linker and + * we otherwise end up with multiple sections with the same .text.hot + * name in the .ko file. The kernel module loader warns if it sees + * multiple sections with the same name so we use this sections + * directive to force them into a single section and silence the + * warning. + */ + .text.hot : { *(.text.hot) } +#endif +} diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index d31e1169d9b8..8e60d76a1b47 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -18,3 +18,5 @@ obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o obj-$(CONFIG_ARM64_MTE) += mte.o + +obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S new file mode 100644 index 000000000000..5b04464c045e --- /dev/null +++ b/arch/arm64/lib/kasan_sw_tags.S @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Google LLC + */ + +#include +#include + +/* + * Report a tag mismatch detected by tag-based KASAN. + * + * A compiler-generated thunk calls this with a non-AAPCS calling + * convention. Upon entry to this function, registers are as follows: + * + * x0: fault address (see below for restore) + * x1: fault description (see below for restore) + * x2 to x15: callee-saved + * x16 to x17: safe to clobber + * x18 to x30: callee-saved + * sp: pre-decremented by 256 bytes (see below for restore) + * + * The caller has decremented the SP by 256 bytes, and created a + * structure on the stack as follows: + * + * sp + 0..15: x0 and x1 to be restored + * sp + 16..231: free for use + * sp + 232..247: x29 and x30 (same as in GPRs) + * sp + 248..255: free for use + * + * Note that this is not a struct pt_regs. + * + * To call a regular AAPCS function we must save x2 to x15 (which we can + * store in the gaps), and create a frame record (for which we can use + * x29 and x30 spilled by the caller as those match the GPRs). + * + * The caller expects x0 and x1 to be restored from the structure, and + * for the structure to be removed from the stack (i.e. the SP must be + * incremented by 256 prior to return). + */ +SYM_CODE_START(__hwasan_tag_mismatch) +#ifdef BTI_C + BTI_C +#endif + add x29, sp, #232 + stp x2, x3, [sp, #8 * 2] + stp x4, x5, [sp, #8 * 4] + stp x6, x7, [sp, #8 * 6] + stp x8, x9, [sp, #8 * 8] + stp x10, x11, [sp, #8 * 10] + stp x12, x13, [sp, #8 * 12] + stp x14, x15, [sp, #8 * 14] +#ifndef CONFIG_SHADOW_CALL_STACK + str x18, [sp, #8 * 18] +#endif + + mov x2, x30 + bl kasan_tag_mismatch + + ldp x0, x1, [sp] + ldp x2, x3, [sp, #8 * 2] + ldp x4, x5, [sp, #8 * 4] + ldp x6, x7, [sp, #8 * 6] + ldp x8, x9, [sp, #8 * 8] + ldp x10, x11, [sp, #8 * 10] + ldp x12, x13, [sp, #8 * 12] + ldp x14, x15, [sp, #8 * 14] +#ifndef CONFIG_SHADOW_CALL_STACK + ldr x18, [sp, #8 * 18] +#endif + ldp x29, x30, [sp, #8 * 29] + + /* remove the structure from the stack */ + add sp, sp, #256 + ret +SYM_CODE_END(__hwasan_tag_mismatch) +EXPORT_SYMBOL(__hwasan_tag_mismatch) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 9df8e7f69e87..9362938abbfa 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -207,3 +207,10 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, return &alloc_meta->free_track[i]; } + +void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, + unsigned long ret_ip) +{ + kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, + ret_ip); +} diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 3d791908ed36..801c415bac59 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -50,6 +50,7 @@ endif CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \ $(call cc-param,hwasan-use-short-granules=0) \ + $(call cc-param,hwasan-inline-all-checks=0) \ $(instrumentation_flags) endif # CONFIG_KASAN_SW_TAGS -- cgit v1.2.3 From e50899122f3204946bb3559da23700c2e5b9568b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 25 May 2021 15:27:27 +0200 Subject: scripts: sphinx-pre-install: rework the sphinx install logic The sphinx-pre-install supports installing sphinx via a virtual environment using pip/pypi or directly from the distribution's package, when --no-virtualenv is used. However, even when --no-virtualenv, the current logic is still recomending to install a virtual env, due to a regression. It turns that the logic there is complex, as it depends on several different conditions. Split the code which recommends Sphinx on two separate functions, in order to clean up the code. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/9dedaec201803017b7a7dc24a074f3a4f040b72a.1621949137.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- scripts/sphinx-pre-install | 208 ++++++++++++++++++++++++++++++--------------- 1 file changed, 140 insertions(+), 68 deletions(-) (limited to 'scripts') diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index fe92020d67e3..b5fec149f473 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -24,7 +24,6 @@ my $need_symlink = 0; my $need_sphinx = 0; my $need_venv = 0; my $need_virtualenv = 0; -my $rec_sphinx_upgrade = 0; my $install = ""; my $virtenv_dir = ""; my $python_cmd = ""; @@ -33,6 +32,7 @@ my $cur_version; my $rec_version = "1.7.9"; # PDF won't build here my $min_pdf_version = "2.4.4"; # Min version where pdf builds + # # Command line arguments # @@ -319,10 +319,7 @@ sub check_sphinx() return; } - if ($cur_version lt $rec_version) { - $rec_sphinx_upgrade = 1; - return; - } + return if ($cur_version lt $rec_version); # On version check mode, just assume Sphinx has all mandatory deps exit (0) if ($version_check); @@ -701,6 +698,141 @@ sub deactivate_help() printf "\tdeactivate\n"; } +sub get_virtenv() +{ + my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate"; + my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate"; + + @activates = sort {$b cmp $a} @activates; + my ($activate, $ver); + foreach my $f (@activates) { + next if ($f lt $min_activate); + + my $sphinx_cmd = $f; + $sphinx_cmd =~ s/activate/sphinx-build/; + next if (! -f $sphinx_cmd); + + my $ver = get_sphinx_version($sphinx_cmd); + if ($need_sphinx && ($ver ge $min_version)) { + return ($f, $ver); + } elsif ($ver gt $cur_version) { + return ($f, $ver); + } + } + return ("", ""); +} + +# +# The logic here is complex, as it have to deal with different versions: +# - minimal supported version; +# - minimal PDF version; +# - recommended version. +# It also needs to work fine with both distro's package and venv/virtualenv +sub recommend_sphinx_version($) +{ + my $virtualenv_cmd = shift; + + # Avoid running sphinx-builds from venv if $cur_version is good + return if ($cur_version && ($cur_version ge $rec_version)); + + my $latest_avail_ver; + my $rec_sphinx_upgrade = 0; + + # Get the highest version from sphinx_*/bin/sphinx-build and the + # corresponding command to activate the venv/virtenv + my ($activate, $venv_ver) = get_virtenv(); + + if (($activate ne "") && ($venv_ver gt $cur_version)) { + $latest_avail_ver = $venv_ver; + } else { + $latest_avail_ver = $cur_version if ($cur_version); + } + + if (!$need_sphinx) { + # sphinx-build is present and its version is >= $min_version + + #only recommend enabling a newer virtenv version if makes sense. + if ($latest_avail_ver gt $cur_version) { + printf "\nYou may also use the newer Sphinx version $venv_ver with:\n"; + printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/); + printf "\t. $activate\n"; + deactivate_help(); + + return; + } + return if ($latest_avail_ver ge $rec_version); + } + + if (!$virtualenv) { + # No sphinx either via package or via virtenv. As we can't + # Compare the versions here, just return, recommending the + # user to install it from the package distro. + return if (!$latest_avail_ver); + + # User doesn't want a virtenv recommendation, but he already + # installed one via virtenv with a newer version. + # So, print commands to enable it + if ($latest_avail_ver gt $cur_version) { + printf "\nYou may also use the Sphinx virtualenv version $venv_ver with:\n"; + printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/); + printf "\t. $activate\n"; + deactivate_help(); + + return; + } + print "\n"; + } else { + $need++ if ($need_sphinx); + } + + # Suggest newer versions if current ones are too old + if ($latest_avail_ver && $cur_version ge $min_version) { + # If there's a good enough version, ask the user to enable it + if ($latest_avail_ver ge $rec_version) { + printf "\nNeed to activate Sphinx (version $venv_ver) on virtualenv with:\n"; + printf "\t. $activate\n"; + deactivate_help(); + + return; + } + + # Version is above the minimal required one, but may be + # below the recommended one. So, print warnings/notes + + if ($latest_avail_ver lt $rec_version) { + print "Warning: It is recommended at least Sphinx version $rec_version.\n"; + $rec_sphinx_upgrade = 1; + } + if ($latest_avail_ver lt $min_pdf_version) { + print "note: If you want pdf, you need at least $min_pdf_version.\n"; + } + } + + # At this point, either it needs Sphinx or upgrade is recommended, + # both via pip + + if ($rec_sphinx_upgrade) { + if (!$virtualenv) { + print "Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n"; + } else { + print "To upgrade Sphinx, use:\n\n"; + } + } else { + print "Sphinx needs to be installed either as a package or via pip/pypi with:\n"; + } + + $python_cmd = find_python_no_venv(); + + if ($need_venv) { + printf "\t$python_cmd -m venv $virtenv_dir\n"; + } else { + printf "\t$virtualenv_cmd $virtenv_dir\n"; + } + printf "\t. $virtenv_dir/bin/activate\n"; + printf "\tpip install -r $requirement_file\n"; + deactivate_help(); +} + sub check_needs() { # Check if Sphinx is already accessible from current environment @@ -763,8 +895,8 @@ sub check_needs() check_program("rsvg-convert", 2) if ($pdf); check_program("latexmk", 2) if ($pdf); - if ($need_sphinx || $rec_sphinx_upgrade) { - check_python_module("ensurepip", 0) if ($need_venv); + if ($need_venv) { + check_python_module("ensurepip", 0); } # Do distro-specific checks and output distro-install commands @@ -784,67 +916,7 @@ sub check_needs() which("sphinx-build-3"); } - # NOTE: if the system has a too old Sphinx version installed, - # it will recommend installing a newer version using virtualenv - - if ($need_sphinx || $rec_sphinx_upgrade) { - my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate"; - my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate"; - - if ($cur_version lt $rec_version) { - print "Warning: It is recommended at least Sphinx version $rec_version.\n"; - print " If you want pdf, you need at least $min_pdf_version.\n"; - } - if ($cur_version lt $min_pdf_version) { - print "Note: It is recommended at least Sphinx version $min_pdf_version if you need PDF support.\n"; - } - @activates = sort {$b cmp $a} @activates; - my ($activate, $ver); - foreach my $f (@activates) { - next if ($f lt $min_activate); - - my $sphinx_cmd = $f; - $sphinx_cmd =~ s/activate/sphinx-build/; - next if (! -f $sphinx_cmd); - - $ver = get_sphinx_version($sphinx_cmd); - if ($need_sphinx && ($ver ge $min_version)) { - $activate = $f; - last; - } elsif ($ver gt $cur_version) { - $activate = $f; - last; - } - } - if ($activate ne "") { - if ($need_sphinx) { - printf "\nNeed to activate Sphinx (version $ver) on virtualenv with:\n"; - printf "\t. $activate\n"; - deactivate_help(); - exit (1); - } else { - printf "\nYou may also use a newer Sphinx (version $ver) with:\n"; - printf "\tdeactivate && . $activate\n"; - } - } else { - my $rec_activate = "$virtenv_dir/bin/activate"; - - print "To upgrade Sphinx, use:\n\n" if ($rec_sphinx_upgrade); - - $python_cmd = find_python_no_venv(); - - if ($need_venv) { - printf "\t$python_cmd -m venv $virtenv_dir\n"; - } else { - printf "\t$virtualenv_cmd $virtenv_dir\n"; - } - printf "\t. $rec_activate\n"; - printf "\tpip install -r $requirement_file\n"; - deactivate_help(); - - $need++ if (!$rec_sphinx_upgrade); - } - } + recommend_sphinx_version($virtualenv_cmd); printf "\n"; print "All optional dependencies are met.\n" if (!$optional); -- cgit v1.2.3 From a5f785f1021857a889b1f5b7cc1d83efd4404336 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 25 May 2021 15:27:28 +0200 Subject: scripts: sphinx-pre-install: fix the need of virtenv packages The pip packages are only needed when the distro-provided Sphinx version is not good enough. Don't recommend installing it if not needed. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/04ce53b77b37f1e495c3abc39c2d3dc407895dc0.1621949137.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- scripts/sphinx-pre-install | 122 +++++++++++++++++++++++++++------------------ 1 file changed, 74 insertions(+), 48 deletions(-) (limited to 'scripts') diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index b5fec149f473..288e86a9d1e5 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -22,16 +22,18 @@ my $need = 0; my $optional = 0; my $need_symlink = 0; my $need_sphinx = 0; -my $need_venv = 0; +my $need_pip = 0; my $need_virtualenv = 0; +my $rec_sphinx_upgrade = 0; my $install = ""; my $virtenv_dir = ""; my $python_cmd = ""; +my $activate_cmd; my $min_version; my $cur_version; my $rec_version = "1.7.9"; # PDF won't build here my $min_pdf_version = "2.4.4"; # Min version where pdf builds - +my $latest_avail_ver; # # Command line arguments @@ -700,11 +702,12 @@ sub deactivate_help() sub get_virtenv() { + my $ver; my $min_activate = "$ENV{'PWD'}/${virtenv_prefix}${min_version}/bin/activate"; my @activates = glob "$ENV{'PWD'}/${virtenv_prefix}*/bin/activate"; @activates = sort {$b cmp $a} @activates; - my ($activate, $ver); + foreach my $f (@activates) { next if ($f lt $min_activate); @@ -722,40 +725,67 @@ sub get_virtenv() return ("", ""); } -# -# The logic here is complex, as it have to deal with different versions: -# - minimal supported version; -# - minimal PDF version; -# - recommended version. -# It also needs to work fine with both distro's package and venv/virtualenv -sub recommend_sphinx_version($) +sub recommend_sphinx_upgrade() { - my $virtualenv_cmd = shift; + my $venv_ver; # Avoid running sphinx-builds from venv if $cur_version is good - return if ($cur_version && ($cur_version ge $rec_version)); - - my $latest_avail_ver; - my $rec_sphinx_upgrade = 0; + if ($cur_version && ($cur_version ge $rec_version)) { + $latest_avail_ver = $cur_version; + return; + } # Get the highest version from sphinx_*/bin/sphinx-build and the # corresponding command to activate the venv/virtenv - my ($activate, $venv_ver) = get_virtenv(); + $activate_cmd = get_virtenv(); - if (($activate ne "") && ($venv_ver gt $cur_version)) { + # Store the highest version from Sphinx existing virtualenvs + if (($activate_cmd ne "") && ($venv_ver gt $cur_version)) { $latest_avail_ver = $venv_ver; } else { $latest_avail_ver = $cur_version if ($cur_version); } + # As we don't know package version of Sphinx, and there's no + # virtual environments, don't check if upgrades are needed + if (!$virtualenv) { + return if (!$latest_avail_ver); + } + + # Either there are already a virtual env or a new one should be created + $need_pip = 1; + + # Return if the reason is due to an upgrade or not + if ($latest_avail_ver lt $rec_version) { + $rec_sphinx_upgrade = 1; + } +} + +# +# The logic here is complex, as it have to deal with different versions: +# - minimal supported version; +# - minimal PDF version; +# - recommended version. +# It also needs to work fine with both distro's package and venv/virtualenv +sub recommend_sphinx_version($) +{ + my $virtualenv_cmd = shift; + + if ($latest_avail_ver lt $min_pdf_version) { + print "note: If you want pdf, you need at least Sphinx $min_pdf_version.\n"; + } + + # Version is OK. Nothing to do. + return if ($cur_version && ($cur_version ge $rec_version)); + if (!$need_sphinx) { # sphinx-build is present and its version is >= $min_version #only recommend enabling a newer virtenv version if makes sense. if ($latest_avail_ver gt $cur_version) { - printf "\nYou may also use the newer Sphinx version $venv_ver with:\n"; + printf "\nYou may also use the newer Sphinx version $latest_avail_ver with:\n"; printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/); - printf "\t. $activate\n"; + printf "\t. $activate_cmd\n"; deactivate_help(); return; @@ -773,9 +803,9 @@ sub recommend_sphinx_version($) # installed one via virtenv with a newer version. # So, print commands to enable it if ($latest_avail_ver gt $cur_version) { - printf "\nYou may also use the Sphinx virtualenv version $venv_ver with:\n"; + printf "\nYou may also use the Sphinx virtualenv version $latest_avail_ver with:\n"; printf "\tdeactivate\n" if ($ENV{'PWD'} =~ /${virtenv_prefix}/); - printf "\t. $activate\n"; + printf "\t. $activate_cmd\n"; deactivate_help(); return; @@ -789,8 +819,8 @@ sub recommend_sphinx_version($) if ($latest_avail_ver && $cur_version ge $min_version) { # If there's a good enough version, ask the user to enable it if ($latest_avail_ver ge $rec_version) { - printf "\nNeed to activate Sphinx (version $venv_ver) on virtualenv with:\n"; - printf "\t. $activate\n"; + printf "\nNeed to activate Sphinx (version $latest_avail_ver) on virtualenv with:\n"; + printf "\t. $activate_cmd\n"; deactivate_help(); return; @@ -801,10 +831,6 @@ sub recommend_sphinx_version($) if ($latest_avail_ver lt $rec_version) { print "Warning: It is recommended at least Sphinx version $rec_version.\n"; - $rec_sphinx_upgrade = 1; - } - if ($latest_avail_ver lt $min_pdf_version) { - print "note: If you want pdf, you need at least $min_pdf_version.\n"; } } @@ -823,11 +849,8 @@ sub recommend_sphinx_version($) $python_cmd = find_python_no_venv(); - if ($need_venv) { - printf "\t$python_cmd -m venv $virtenv_dir\n"; - } else { - printf "\t$virtualenv_cmd $virtenv_dir\n"; - } + printf "\t$virtualenv_cmd $virtenv_dir\n"; + printf "\t. $virtenv_dir/bin/activate\n"; printf "\tpip install -r $requirement_file\n"; deactivate_help(); @@ -854,15 +877,14 @@ sub check_needs() if ($virtualenv) { my $tmp = qx($python_cmd --version 2>&1); if ($tmp =~ m/(\d+\.)(\d+\.)/) { - if ($1 >= 3 && $2 >= 3) { - $need_venv = 1; # python 3.3 or upper - } else { - $need_virtualenv = 1; - } if ($1 < 3) { # Fail if it finds python2 (or worse) die "Python 3 is required to build the kernel docs\n"; } + if ($1 == 3 && $2 < 3) { + # Need Python 3.3 or upper for venv + $need_virtualenv = 1; + } } else { die "Warning: couldn't identify $python_cmd version!"; } @@ -871,14 +893,22 @@ sub check_needs() } } - # Set virtualenv command line, if python < 3.3 + recommend_sphinx_upgrade(); + my $virtualenv_cmd; - if ($need_virtualenv) { - $virtualenv_cmd = findprog("virtualenv-3"); - $virtualenv_cmd = findprog("virtualenv-3.5") if (!$virtualenv_cmd); - if (!$virtualenv_cmd) { - check_program("virtualenv", 0); - $virtualenv_cmd = "virtualenv"; + + if ($need_pip) { + # Set virtualenv command line, if python < 3.3 + if ($need_virtualenv) { + $virtualenv_cmd = findprog("virtualenv-3"); + $virtualenv_cmd = findprog("virtualenv-3.5") if (!$virtualenv_cmd); + if (!$virtualenv_cmd) { + check_program("virtualenv", 0); + $virtualenv_cmd = "virtualenv"; + } + } else { + $virtualenv_cmd = "$python_cmd -m venv"; + check_python_module("ensurepip", 0); } } @@ -895,10 +925,6 @@ sub check_needs() check_program("rsvg-convert", 2) if ($pdf); check_program("latexmk", 2) if ($pdf); - if ($need_venv) { - check_python_module("ensurepip", 0); - } - # Do distro-specific checks and output distro-install commands check_distros(); -- cgit v1.2.3 From 709dedfdf3daa8719240ecff1c0b70b278005386 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 2 Jun 2021 20:28:06 -0500 Subject: documentation-file-ref-check: Make git check work for multiple working directories With multiple git working directories, '.git' may also be a text file linking to the actual git tree instead of a directory. Cc: Mauro Carvalho Chehab Cc: Jonathan Corbet Cc: linux-doc@vger.kernel.org Signed-off-by: Rob Herring Reviewed-by: Mauro Carvalho Chehab Tested-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/20210603012806.331132-1-robh@kernel.org Signed-off-by: Jonathan Corbet --- scripts/documentation-file-ref-check | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check index c71832b2312b..7187ea5e5149 100755 --- a/scripts/documentation-file-ref-check +++ b/scripts/documentation-file-ref-check @@ -24,7 +24,7 @@ my $help = 0; my $fix = 0; my $warn = 0; -if (! -d ".git") { +if (! -e ".git") { printf "Warning: can't check if file exists, as this is not a git tree\n"; exit 0; } -- cgit v1.2.3 From 43ac711053fc6d94a3f16141c4efe20059a9d918 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 29 May 2021 02:13:21 +0900 Subject: kconfig: constify long_opts getopt_long() does not modify the long_opts structure. Signed-off-by: Masahiro Yamada --- scripts/kconfig/conf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index bfa1ea8f5f98..5d84b44a2a2a 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -678,7 +678,7 @@ static void check_conf(struct menu *menu) check_conf(child); } -static struct option long_opts[] = { +static const struct option long_opts[] = { {"help", no_argument, NULL, 'h'}, {"silent", no_argument, NULL, 's'}, {"oldaskconfig", no_argument, &input_mode_opt, oldaskconfig}, -- cgit v1.2.3 From 27f2a4db76e8d8a8b601fc1c6a7a17f88bd907ab Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 21 May 2021 18:26:24 -0700 Subject: Makefile: fix GDB warning with CONFIG_RELR GDB produces the following warning when debugging kernels built with CONFIG_RELR: BFD: /android0/linux-next/vmlinux: unknown type [0x13] section `.relr.dyn' when loading a kernel built with CONFIG_RELR into GDB. It can also prevent debugging symbols using such relocations. Peter sugguests: [That flag] means that lld will use dynamic tags and section type numbers in the OS-specific range rather than the generic range. The kernel itself doesn't care about these numbers; it determines the location of the RELR section using symbols defined by a linker script. Link: https://github.com/ClangBuiltLinux/linux/issues/1057 Suggested-by: Peter Collingbourne Reviewed-by: Nathan Chancellor Signed-off-by: Nick Desaulniers Link: https://lore.kernel.org/r/20210522012626.2811297-1-ndesaulniers@google.com Signed-off-by: Will Deacon --- Makefile | 2 +- scripts/tools-support-relr.sh | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/Makefile b/Makefile index e4468353425a..e38c74d0433c 100644 --- a/Makefile +++ b/Makefile @@ -1031,7 +1031,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,) endif ifeq ($(CONFIG_RELR),y) -LDFLAGS_vmlinux += --pack-dyn-relocs=relr +LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags endif # We never want expected sections to be placed heuristically by the diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh index 45e8aa360b45..cb55878bd5b8 100755 --- a/scripts/tools-support-relr.sh +++ b/scripts/tools-support-relr.sh @@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1 void *p = &p; END -$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file +$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \ + --use-android-relr-tags -o $tmp_file # Despite printing an error message, GNU nm still exits with exit code 0 if it # sees a relr section. So we need to check that nothing is printed to stderr. -- cgit v1.2.3 From 4a6795933a890d41504c6df04527d1e093a4cbe6 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 7 Jun 2021 15:02:06 +0100 Subject: kbuild: modpost: Explicitly warn about unprototyped symbols One common cause of modpost version generation failures is a failure to prototype exported assembly functions - the tooling requires this for exported functions even if they are not and should not be called from C code in order to do the version mangling for symbols. Unfortunately the error message is currently rather abstruse, simply saying that "version generation failed" and even diving into the code doesn't directly show what's going on since there's several steps between the problem and it being observed. Provide an explicit hint as to the likely cause of a version generation failure to help anyone who runs into this in future more readily diagnose and fix the problem. Signed-off-by: Mark Brown Signed-off-by: Masahiro Yamada --- scripts/mod/modpost.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 3e623ccc020b..270a7df898e2 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -660,8 +660,11 @@ static void handle_modversion(const struct module *mod, unsigned int crc; if (sym->st_shndx == SHN_UNDEF) { - warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n", - symname, mod->name, mod->is_vmlinux ? "" : ".ko"); + warn("EXPORT symbol \"%s\" [%s%s] version ...\n" + "Is \"%s\" prototyped in ?\n", + symname, mod->name, mod->is_vmlinux ? "" : ".ko", + symname); + return; } -- cgit v1.2.3 From a979522a1a88556e42a22ce61bccc58e304cb361 Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Sat, 12 Jun 2021 15:18:38 +0100 Subject: kbuild: mkcompile_h: consider timestamp if KBUILD_BUILD_TIMESTAMP is set To avoid unnecessary recompilations, mkcompile_h does not regenerate compile.h if just the timestamp changed. Though, if KBUILD_BUILD_TIMESTAMP is set, an explicit timestamp for the build was requested, in which case we should not ignore it. If a user follows the documentation for reproducible builds [1] and defines KBUILD_BUILD_TIMESTAMP as the git commit timestamp, a clean build will have the correct timestamp. A subsequent cherry-pick (or amend) changes the commit timestamp and if an incremental build is done with a different KBUILD_BUILD_TIMESTAMP now, that new value is not taken into consideration. But it should for reproducibility. Hence, whenever KBUILD_BUILD_TIMESTAMP is explicitly set, do not ignore UTS_VERSION when making a decision about whether the regenerated version of compile.h should be moved into place. [1] https://www.kernel.org/doc/html/latest/kbuild/reproducible-builds.html Signed-off-by: Matthias Maennich Signed-off-by: Masahiro Yamada --- scripts/mkcompile_h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 06bbf4c2c66c..6a2a04d92f42 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -68,15 +68,23 @@ UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)" # Only replace the real compile.h if the new one is different, # in order to preserve the timestamp and avoid unnecessary # recompilations. -# We don't consider the file changed if only the date/time changed. +# We don't consider the file changed if only the date/time changed, +# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for +# reproducible builds with that value referring to a commit timestamp). # A kernel config change will increase the generation number, thus # causing compile.h to be updated (including date/time) due to the # changed comment in the # first line. +if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then + IGNORE_PATTERN="UTS_VERSION" +else + IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED" +fi + if [ -r $TARGET ] && \ - grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \ - grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \ + grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \ + grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \ cmp -s .tmpver.1 .tmpver.2; then rm -f .tmpcompile else -- cgit v1.2.3 From 5e5234462756a39e56f4182694f47ec72b5abe52 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 21 Jun 2021 10:48:08 -0700 Subject: coccinelle: api: remove kobj_to_dev.cocci script Using kobj_to_dev() instead of container_of() is not universally accepted among maintainers as an improvement. The warning leads to repeated patch submissions that won't be accepted. Remove the script. Cc: Christoph Hellwig Cc: Jens Axboe Cc: Denis Efremov Cc: Julia Lawall Signed-off-by: Keith Busch Signed-off-by: Julia Lawall Acked-by: Jens Axboe Acked-by: Denis Efremov --- scripts/coccinelle/api/kobj_to_dev.cocci | 45 -------------------------------- 1 file changed, 45 deletions(-) delete mode 100644 scripts/coccinelle/api/kobj_to_dev.cocci (limited to 'scripts') diff --git a/scripts/coccinelle/api/kobj_to_dev.cocci b/scripts/coccinelle/api/kobj_to_dev.cocci deleted file mode 100644 index cd5d31c6fe76..000000000000 --- a/scripts/coccinelle/api/kobj_to_dev.cocci +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/// -/// Use kobj_to_dev() instead of container_of() -/// -// Confidence: High -// Copyright: (C) 2020 Denis Efremov ISPRAS -// Options: --no-includes --include-headers -// -// Keywords: kobj_to_dev, container_of -// - -virtual context -virtual report -virtual org -virtual patch - - -@r depends on !patch@ -expression ptr; -symbol kobj; -position p; -@@ - -* container_of(ptr, struct device, kobj)@p - - -@depends on patch@ -expression ptr; -@@ - -- container_of(ptr, struct device, kobj) -+ kobj_to_dev(ptr) - - -@script:python depends on report@ -p << r.p; -@@ - -coccilib.report.print_report(p[0], "WARNING opportunity for kobj_to_dev()") - -@script:python depends on org@ -p << r.p; -@@ - -coccilib.org.print_todo(p[0], "WARNING opportunity for kobj_to_dev()") -- cgit v1.2.3 From e2bc3e91d91ede6710801fa0737e4e4ed729b19e Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 17 Jun 2021 12:31:40 -0700 Subject: scripts/min-tool-version.sh: Raise minimum clang version to 13.0.0 for s390 clang versions prior to the current development version of 13.0.0 cannot compile s390 after commit 3abbdfde5a65 ("s390/bitops: use register pair instead of register asm") and the s390 maintainers do not intend to work around this in the kernel. Codify this in scripts/min-tool-version.sh similar to arm64 with GCC 5.1.0 so that there are no reports of broken builds. [hca@linux.ibm.com: breaking compatibility with older clang compilers is intended to finally make use of a feature which allows the compiler to allocate even/odd register pairs. This is possible since a very long time with gcc, but only since llvm-project commit d058262b1471 ("[SystemZ] Support i128 inline asm operands.") with clang. Using that feature allows to get rid of error prone register asm statements, of which the above named kernel commit is only the first of a larger not yet complete series.] Reported-by: Naresh Kamboju Signed-off-by: Nathan Chancellor Acked-by: Nick Desaulniers Acked-by: Masahiro Yamada Link: https://lore.kernel.org/r/20210617193139.856957-1-nathan@kernel.org Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- scripts/min-tool-version.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh index d22cf91212b0..319f92104f56 100755 --- a/scripts/min-tool-version.sh +++ b/scripts/min-tool-version.sh @@ -30,7 +30,12 @@ icc) echo 16.0.3 ;; llvm) - echo 10.0.1 + # https://lore.kernel.org/r/YMtib5hKVyNknZt3@osiris/ + if [ "$SRCARCH" = s390 ]; then + echo 13.0.0 + else + echo 10.0.1 + fi ;; *) echo "$1: unknown tool" >&2 -- cgit v1.2.3 From b83c8ba40cebcee1d07cb852c23d616acf8988b7 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 28 Jun 2021 19:33:44 -0700 Subject: streamline_config.pl: make spacing consistent Patch series "streamline_config.pl: Fix Perl spacing". Talking with John Hawley about how vim and emacs deal with Perl files with respect to tabs and spaces, I found that some of my Perl code in the kernel had inconsistent spacing. The way emacs handles Perl by default is to use 4 spaces per indent, but make all 8 spaces into a single tab. Vim does not do this by default. But if you add the vim variable control: # vim: softtabstop=4 to a perl file, it makes vim behave the same way as emacs. The first patch is to change all 8 spaces into a single tab (mostly from people editing the file with vim). The next patch adds the softtabstop variable to make vim act like emacs by default. This patch (of 2): As Perl code tends to have 4 space indentation, but uses tabs for every 8 spaces, make that consistent in the streamline_config.pl code. Replace all 8 spaces with a single tab. Link: https://lkml.kernel.org/r/20210322214032.133596267@goodmis.org Signed-off-by: Steven Rostedt (VMware) Cc: "John (Warthog9) Hawley" Cc: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/kconfig/streamline_config.pl | 78 ++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 39 deletions(-) (limited to 'scripts') diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 911c72a2dbc4..2e65aa9edf97 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -601,12 +601,12 @@ if (defined($ENV{'LMC_KEEP'})) { sub in_preserved_kconfigs { my $kconfig = $config2kfile{$_[0]}; if (!defined($kconfig)) { - return 0; + return 0; } foreach my $excl (@preserved_kconfigs) { - if($kconfig =~ /^$excl/) { - return 1; - } + if($kconfig =~ /^$excl/) { + return 1; + } } return 0; } @@ -629,52 +629,52 @@ foreach my $line (@config_file) { } if (/CONFIG_MODULE_SIG_KEY="(.+)"/) { - my $orig_cert = $1; - my $default_cert = "certs/signing_key.pem"; - - # Check that the logic in this script still matches the one in Kconfig - if (!defined($depends{"MODULE_SIG_KEY"}) || - $depends{"MODULE_SIG_KEY"} !~ /"\Q$default_cert\E"/) { - print STDERR "WARNING: MODULE_SIG_KEY assertion failure, ", - "update needed to ", __FILE__, " line ", __LINE__, "\n"; - print; - } elsif ($orig_cert ne $default_cert && ! -f $orig_cert) { - print STDERR "Module signature verification enabled but ", - "module signing key \"$orig_cert\" not found. Resetting ", - "signing key to default value.\n"; - print "CONFIG_MODULE_SIG_KEY=\"$default_cert\"\n"; - } else { - print; - } - next; + my $orig_cert = $1; + my $default_cert = "certs/signing_key.pem"; + + # Check that the logic in this script still matches the one in Kconfig + if (!defined($depends{"MODULE_SIG_KEY"}) || + $depends{"MODULE_SIG_KEY"} !~ /"\Q$default_cert\E"/) { + print STDERR "WARNING: MODULE_SIG_KEY assertion failure, ", + "update needed to ", __FILE__, " line ", __LINE__, "\n"; + print; + } elsif ($orig_cert ne $default_cert && ! -f $orig_cert) { + print STDERR "Module signature verification enabled but ", + "module signing key \"$orig_cert\" not found. Resetting ", + "signing key to default value.\n"; + print "CONFIG_MODULE_SIG_KEY=\"$default_cert\"\n"; + } else { + print; + } + next; } if (/CONFIG_SYSTEM_TRUSTED_KEYS="(.+)"/) { - my $orig_keys = $1; - - if (! -f $orig_keys) { - print STDERR "System keyring enabled but keys \"$orig_keys\" ", - "not found. Resetting keys to default value.\n"; - print "CONFIG_SYSTEM_TRUSTED_KEYS=\"\"\n"; - } else { - print; - } - next; + my $orig_keys = $1; + + if (! -f $orig_keys) { + print STDERR "System keyring enabled but keys \"$orig_keys\" ", + "not found. Resetting keys to default value.\n"; + print "CONFIG_SYSTEM_TRUSTED_KEYS=\"\"\n"; + } else { + print; + } + next; } if (/^(CONFIG.*)=(m|y)/) { - if (in_preserved_kconfigs($1)) { - dprint "Preserve config $1"; - print; - next; - } + if (in_preserved_kconfigs($1)) { + dprint "Preserve config $1"; + print; + next; + } if (defined($configs{$1})) { if ($localyesconfig) { - $setconfigs{$1} = 'y'; + $setconfigs{$1} = 'y'; print "$1=y\n"; next; } else { - $setconfigs{$1} = $2; + $setconfigs{$1} = $2; } } elsif ($2 eq "m") { print "# $1 is not set\n"; -- cgit v1.2.3 From d1b1f1e627c0085fb2e2b5690929a3d53879cc67 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 28 Jun 2021 19:33:47 -0700 Subject: streamline_config.pl: add softtabstop=4 for vim users The tab stop for Perl files is by default (at least in emacs) to be 4 spaces, where a tab is used for all 8 spaces. Add a local variable comment to make vim do the same by default, and this will help keep the file consistent in the future when others edit it via vim and not emacs. Link: https://lkml.kernel.org/r/20210322214032.293992979@goodmis.org Signed-off-by: Steven Rostedt (VMware) Cc: Masahiro Yamada Cc: "John (Warthog9) Hawley" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/kconfig/streamline_config.pl | 2 ++ 1 file changed, 2 insertions(+) (limited to 'scripts') diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 2e65aa9edf97..1a5fea0519eb 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -702,3 +702,5 @@ foreach my $module (keys(%modules)) { print STDERR "\n"; } } + +# vim: softtabstop=4 -- cgit v1.2.3 From c1c9142004e7e21d6d3d2cd6a339845771ce6a27 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 28 Jun 2021 19:33:50 -0700 Subject: scripts/spelling.txt: add more spellings to spelling.txt Here are some of the more common spelling mistakes and typos that I've found while fixing up spelling mistakes in the kernel in the past few months. Link: https://lkml.kernel.org/r/20210514093655.8829-1-colin.king@canonical.com Signed-off-by: Colin Ian King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/spelling.txt | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'scripts') diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 7b6a01291598..17fdc620d548 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -22,6 +22,7 @@ absolut||absolute absoulte||absolute acccess||access acceess||access +accelaration||acceleration acceleratoin||acceleration accelleration||acceleration accesing||accessing @@ -264,6 +265,7 @@ calucate||calculate calulate||calculate cancelation||cancellation cancle||cancel +canot||cannot capabilites||capabilities capabilties||capabilities capabilty||capability @@ -494,7 +496,10 @@ digial||digital dimention||dimension dimesions||dimensions diconnected||disconnected +disabed||disabled +disble||disable disgest||digest +disired||desired dispalying||displaying diplay||display directon||direction @@ -710,6 +715,7 @@ havind||having heirarchically||hierarchically heirarchy||hierarchy helpfull||helpful +hearbeat||heartbeat heterogenous||heterogeneous hexdecimal||hexadecimal hybernate||hibernate @@ -989,6 +995,7 @@ notications||notifications notifcations||notifications notifed||notified notity||notify +nubmer||number numebr||number numner||number obtaion||obtain @@ -1014,8 +1021,10 @@ ommiting||omitting ommitted||omitted onself||oneself ony||only +openning||opening operatione||operation opertaions||operations +opportunies||opportunities optionnal||optional optmizations||optimizations orientatied||orientated @@ -1111,6 +1120,7 @@ prefitler||prefilter preform||perform premption||preemption prepaired||prepared +prepate||prepare preperation||preparation preprare||prepare pressre||pressure @@ -1123,6 +1133,7 @@ privilaged||privileged privilage||privilege priviledge||privilege priviledges||privileges +privleges||privileges probaly||probably procceed||proceed proccesors||processors @@ -1167,6 +1178,7 @@ promixity||proximity psudo||pseudo psuedo||pseudo psychadelic||psychedelic +purgable||purgeable pwoer||power queing||queuing quering||querying @@ -1180,6 +1192,7 @@ receieve||receive recepient||recipient recevied||received receving||receiving +recievd||received recieved||received recieve||receive reciever||receiver @@ -1228,6 +1241,7 @@ reponse||response representaion||representation reqeust||request reqister||register +requed||requeued requestied||requested requiere||require requirment||requirement @@ -1332,6 +1346,7 @@ singal||signal singed||signed sleeped||slept sliped||slipped +softwade||software softwares||software soley||solely souce||source @@ -1510,6 +1525,7 @@ unintialized||uninitialized unitialized||uninitialized unkmown||unknown unknonw||unknown +unknouwn||unknown unknow||unknown unkown||unknown unamed||unnamed -- cgit v1.2.3 From a0b8200d06ad6450c179407baa5f0f52f8cfcc97 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 28 Jun 2021 19:41:34 -0700 Subject: kbuild: skip per-CPU BTF generation for pahole v1.18-v1.21 Commit "mm/page_alloc: convert per-cpu list protection to local_lock" will introduce a zero-sized per-CPU variable, which causes pahole to generate invalid BTF. Only pahole versions 1.18 through 1.21 are impacted, as before 1.18 pahole doesn't know anything about per-CPU variables, and 1.22 contains the proper fix for the issue. Luckily, pahole 1.18 got --skip_encoding_btf_vars option disabling BTF generation for per-CPU variables in anticipation of some unanticipated problems. So use this escape hatch to disable per-CPU var BTF info on those problematic pahole versions. Users relying on availability of per-CPU var BTFs would need to upgrade to pahole 1.22+, but everyone won't notice any regressions. Link: https://lkml.kernel.org/r/20210530002536.3193829-1-andrii@kernel.org Signed-off-by: Andrii Nakryiko Acked-by: Mel Gorman Cc: Arnaldo Carvalho de Melo Cc: Hao Luo Cc: Michal Suchanek Cc: Jiri Olsa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/link-vmlinux.sh | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'scripts') diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 0e0f6466b18d..475faa15854e 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -235,6 +235,10 @@ gen_btf() vmlinux_link ${1} + if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then + # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars" + fi if [ "${pahole_ver}" -ge "121" ]; then extra_paholeopt="${extra_paholeopt} --btf_gen_floats" fi -- cgit v1.2.3 From f9363b31d769245cb7ec8a660460800d4b466911 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 30 Jun 2021 18:56:19 -0700 Subject: checkpatch: scripts/spdxcheck.py now requires python3 Since commit d0259c42abff ("spdxcheck.py: Use Python 3"), spdxcheck.py explicitly expects to run as python3 script. If "python" still points to python v2.7 and the script is executed with "python scripts/spdxcheck.py", the following error may be seen even if git-python is installed for python3. Traceback (most recent call last): File "scripts/spdxcheck.py", line 10, in import git ImportError: No module named git To fix the problem, check for the existence of python3, check if the script is executable and not just for its existence, and execute it directly. Link: https://lkml.kernel.org/r/20210505211720.447111-1-linux@roeck-us.net Signed-off-by: Guenter Roeck Cc: Joe Perches Cc: Bert Vermeulen Cc: Dwaipayan Ray Cc: Lukas Bulwahn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 23697a6b1eaa..d65334588eb4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -1084,10 +1084,10 @@ sub is_maintained_obsolete { sub is_SPDX_License_valid { my ($license) = @_; - return 1 if (!$tree || which("python") eq "" || !(-e "$root/scripts/spdxcheck.py") || !(-e "$gitroot")); + return 1 if (!$tree || which("python3") eq "" || !(-x "$root/scripts/spdxcheck.py") || !(-e "$gitroot")); my $root_path = abs_path($root); - my $status = `cd "$root_path"; echo "$license" | python scripts/spdxcheck.py -`; + my $status = `cd "$root_path"; echo "$license" | scripts/spdxcheck.py -`; return 0 if ($status ne ""); return 1; } -- cgit v1.2.3 From 690786511b32baba073f729844779172d2ed72b6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 30 Jun 2021 18:56:22 -0700 Subject: checkpatch: improve the indented label test checkpatch identifies a label only when a terminating colon immediately follows an identifier. Bitfield definitions can appear to be labels so ignore any spaces between the identifier terminating colon and any digit that may be used to define a bitfield length. Miscellanea: o Improve the initial checkpatch comment o Use the more typical '&&' instead of 'and' o Require the initial label character to be a non-digit (Can't use $Ident here because $Ident allows ## concatenation) o Use $sline instead of $line to ignore comments o Use '$sline !~ /.../' instead of '!($line =~ /.../)' Link: https://lkml.kernel.org/r/b54d673e7cde7de5de0c9ba4dd57dd0858580ca4.camel@perches.com Signed-off-by: Joe Perches Cc: Greg Kroah-Hartman Cc: Manikishan Ghantasala Cc: Alex Elder Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index d65334588eb4..dad87c368632 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5361,9 +5361,13 @@ sub process { } } -#goto labels aren't indented, allow a single space however - if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and - !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) { +# check that goto labels aren't indented (allow a single space indentation) +# and ignore bitfield definitions like foo:1 +# Strictly, labels can have whitespace after the identifier and before the : +# but this is not allowed here as many ?: uses would appear to be labels + if ($sline =~ /^.\s+[A-Za-z_][A-Za-z\d_]*:(?!\s*\d+)/ && + $sline !~ /^. [A-Za-z\d_][A-Za-z\d_]*:/ && + $sline !~ /^.\s+default:/) { if (WARN("INDENTED_LABEL", "labels should not be indented\n" . $herecurr) && $fix) { -- cgit v1.2.3 From 46b85bf96714267ab7855683b40103c9282aaf4e Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 30 Jun 2021 18:56:25 -0700 Subject: checkpatch: do not complain about positive return values starting with EPOLL checkpatch complains about positive return values of poll functions. Example: WARNING: return of an errno should typically be negative (ie: return -EPOLLIN) + return EPOLLIN; Poll functions return positive values. The defines for the return values of poll functions all start with EPOLL, resulting in a number of false positives. An often used workaround is to assign poll function return values to variables and returning that variable, but that is a less than perfect solution. There is no error definition which starts with EPOLL, so it is safe to omit the warning for return values starting with EPOLL. Link: https://lkml.kernel.org/r/20210622004334.638680-1-linux@roeck-us.net Signed-off-by: Guenter Roeck Acked-by: Joe Perches Cc: Ricardo Ribalda Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index dad87c368632..461d4221e4a4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5462,7 +5462,7 @@ sub process { # Return of what appears to be an errno should normally be negative if ($sline =~ /\breturn(?:\s*\(+\s*|\s+)(E[A-Z]+)(?:\s*\)+\s*|\s*)[;:,]/) { my $name = $1; - if ($name ne 'EOF' && $name ne 'ERROR') { + if ($name ne 'EOF' && $name ne 'ERROR' && $name !~ /^EPOLL/) { WARN("USE_NEGATIVE_ERRNO", "return of an errno should typically be negative (ie: return -$1)\n" . $herecurr); } -- cgit v1.2.3 From 7bb7f2ac24a028b20fca466b9633847b289b156a Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 7 Jul 2021 18:08:11 -0700 Subject: arch, mm: wire up memfd_secret system call where relevant Wire up memfd_secret system call on architectures that define ARCH_HAS_SET_DIRECT_MAP, namely arm64, risc-v and x86. Link: https://lkml.kernel.org/r/20210518072034.31572-7-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Palmer Dabbelt Acked-by: Arnd Bergmann Acked-by: Catalin Marinas Acked-by: David Hildenbrand Acked-by: James Bottomley Cc: Alexander Viro Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Christopher Lameter Cc: Dan Williams Cc: Dave Hansen Cc: David Hildenbrand Cc: Elena Reshetova Cc: Hagen Paul Pfeifer Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Bottomley Cc: "Kirill A. Shutemov" Cc: Mark Rutland Cc: Matthew Wilcox Cc: Michael Kerrisk Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Rick Edgecombe Cc: Roman Gushchin Cc: Shakeel Butt Cc: Shuah Khan Cc: Thomas Gleixner Cc: Tycho Andersen Cc: Will Deacon Cc: kernel test robot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/uapi/asm/unistd.h | 1 + arch/riscv/include/asm/unistd.h | 1 + arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 1 + include/uapi/asm-generic/unistd.h | 7 ++++++- scripts/checksyscalls.sh | 4 ++++ 7 files changed, 15 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index f83a70e07df8..ce2ee8f1e361 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -20,5 +20,6 @@ #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_MEMFD_SECRET #include diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index 977ee6181dab..6c316093a1e5 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -9,6 +9,7 @@ */ #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_MEMFD_SECRET #include diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index fba2f615119a..ce763a12311c 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -451,3 +451,4 @@ 444 i386 landlock_create_ruleset sys_landlock_create_ruleset 445 i386 landlock_add_rule sys_landlock_add_rule 446 i386 landlock_restrict_self sys_landlock_restrict_self +447 i386 memfd_secret sys_memfd_secret diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index af973e400053..f6b57799c1ea 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -368,6 +368,7 @@ 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +447 common memfd_secret sys_memfd_secret # # Due to a historical design error, certain syscalls are numbered differently diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 586128d5c3b8..69c9a7010081 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1050,6 +1050,7 @@ asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr _ asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, const void __user *rule_attr, __u32 flags); asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); +asmlinkage long sys_memfd_secret(unsigned int flags); /* * Architecture-specific system calls diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f211961ce1da..a9d6fcd95f42 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -873,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) #define __NR_landlock_restrict_self 446 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) +#ifdef __ARCH_WANT_MEMFD_SECRET +#define __NR_memfd_secret 447 +__SYSCALL(__NR_memfd_secret, sys_memfd_secret) +#endif + #undef __NR_syscalls -#define __NR_syscalls 447 +#define __NR_syscalls 448 /* * 32 bit systems traditionally used different diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh index a18b47695f55..b7609958ee36 100755 --- a/scripts/checksyscalls.sh +++ b/scripts/checksyscalls.sh @@ -40,6 +40,10 @@ cat << EOF #define __IGNORE_setrlimit /* setrlimit */ #endif +#ifndef __ARCH_WANT_MEMFD_SECRET +#define __IGNORE_memfd_secret +#endif + /* Missing flags argument */ #define __IGNORE_renameat /* renameat2 */ -- cgit v1.2.3 From 26681eb3724b617c4894cfb53cad2e3740323bc2 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 7 Jul 2021 18:09:31 -0700 Subject: scripts/decode_stacktrace.sh: support debuginfod Now that stacktraces contain the build ID information we can update this script to use debuginfod-find to locate the debuginfo for the vmlinux and modules automatically. This can replace the existing code that requires specifying a path to vmlinux or tries to find the vmlinux and modules automatically by using the release number. Work it into the script as a fallback option if the vmlinux isn't specified on the commandline. Link: https://lkml.kernel.org/r/20210511003845.2429846-9-swboyd@chromium.org Signed-off-by: Stephen Boyd Cc: Jiri Olsa Cc: Alexei Starovoitov Cc: Jessica Yu Cc: Evan Green Cc: Hsin-Yi Wang Cc: Konstantin Khlebnikov Cc: Sasha Levin Cc: Petr Mladek Cc: Steven Rostedt Cc: Andy Shevchenko Cc: Matthew Wilcox Cc: Baoquan He Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Young Cc: Ingo Molnar Cc: Rasmus Villemoes Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Vivek Goyal Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/decode_stacktrace.sh | 81 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 11 deletions(-) (limited to 'scripts') diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 90398347e366..ca21f8bdf5f2 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -3,11 +3,10 @@ # (c) 2014, Sasha Levin #set -x -if [[ $# < 1 ]]; then +usage() { echo "Usage:" echo " $0 -r | [base path] [modules path]" - exit 1 -fi +} if [[ $1 == "-r" ]] ; then vmlinux="" @@ -24,6 +23,7 @@ if [[ $1 == "-r" ]] ; then if [[ $vmlinux == "" ]] ; then echo "ERROR! vmlinux image for release $release is not found" >&2 + usage exit 2 fi else @@ -31,12 +31,35 @@ else basepath=${2-auto} modpath=$3 release="" + debuginfod= + + # Can we use debuginfod-find? + if type debuginfod-find >/dev/null 2>&1 ; then + debuginfod=${1-only} + fi + + if [[ $vmlinux == "" && -z $debuginfod ]] ; then + echo "ERROR! vmlinux image must be specified" >&2 + usage + exit 1 + fi fi declare -A cache declare -A modcache find_module() { + if [[ -n $debuginfod ]] ; then + if [[ -n $modbuildid ]] ; then + debuginfod-find debuginfo $modbuildid && return + fi + + # Only using debuginfod so don't try to find vmlinux module path + if [[ $debuginfod == "only" ]] ; then + return + fi + fi + if [[ "$modpath" != "" ]] ; then for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do if readelf -WS "$fn" | grep -qwF .debug_line ; then @@ -150,6 +173,27 @@ parse_symbol() { symbol="$segment$name ($code)" } +debuginfod_get_vmlinux() { + local vmlinux_buildid=${1##* } + + if [[ $vmlinux != "" ]]; then + return + fi + + if [[ $vmlinux_buildid =~ ^[0-9a-f]+ ]]; then + vmlinux=$(debuginfod-find debuginfo $vmlinux_buildid) + if [[ $? -ne 0 ]] ; then + echo "ERROR! vmlinux image not found via debuginfod-find" >&2 + usage + exit 2 + fi + return + fi + echo "ERROR! Build ID for vmlinux not found. Try passing -r or specifying vmlinux" >&2 + usage + exit 2 +} + decode_code() { local scripts=`dirname "${BASH_SOURCE[0]}"` @@ -157,6 +201,14 @@ decode_code() { } handle_line() { + if [[ $basepath == "auto" && $vmlinux != "" ]] ; then + module="" + symbol="kernel_init+0x0/0x0" + parse_symbol + basepath=${symbol#kernel_init (} + basepath=${basepath%/init/main.c:*)} + fi + local words # Tokenize @@ -182,16 +234,28 @@ handle_line() { fi done + if [[ ${words[$last]} =~ ^[0-9a-f]+\] ]]; then + words[$last-1]="${words[$last-1]} ${words[$last]}" + unset words[$last] + last=$(( $last - 1 )) + fi + if [[ ${words[$last]} =~ \[([^]]+)\] ]]; then module=${words[$last]} module=${module#\[} module=${module%\]} + modbuildid=${module#* } + module=${module% *} + if [[ $modbuildid == $module ]]; then + modbuildid= + fi symbol=${words[$last-1]} unset words[$last-1] else # The symbol is the last element, process it symbol=${words[$last]} module= + modbuildid= fi unset words[$last] @@ -201,14 +265,6 @@ handle_line() { echo "${words[@]}" "$symbol $module" } -if [[ $basepath == "auto" ]] ; then - module="" - symbol="kernel_init+0x0/0x0" - parse_symbol - basepath=${symbol#kernel_init (} - basepath=${basepath%/init/main.c:*)} -fi - while read line; do # Let's see if we have an address in the line if [[ $line =~ \[\<([^]]+)\>\] ]] || @@ -218,6 +274,9 @@ while read line; do # Is it a code line? elif [[ $line == *Code:* ]]; then decode_code "$line" + # Is it a version line? + elif [[ -n $debuginfod && $line =~ PID:\ [0-9]+\ Comm: ]]; then + debuginfod_get_vmlinux "$line" else # Nothing special in this line, show it as is echo "$line" -- cgit v1.2.3 From 5bf0f3bc377e5f87bfd61ccc9c1efb3c6261f2c3 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 7 Jul 2021 18:09:35 -0700 Subject: scripts/decode_stacktrace.sh: silence stderr messages from addr2line/nm Sometimes if you're using tools that have linked things improperly or have new features/sections that older tools don't expect you'll see warnings printed to stderr. We don't really care about these warnings, so let's just silence these messages to cleanup output of this script. Link: https://lkml.kernel.org/r/20210511003845.2429846-10-swboyd@chromium.org Signed-off-by: Stephen Boyd Cc: Jiri Olsa Cc: Alexei Starovoitov Cc: Jessica Yu Cc: Evan Green Cc: Hsin-Yi Wang Cc: Konstantin Khlebnikov Cc: Sasha Levin Cc: Andy Shevchenko Cc: Baoquan He Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Young Cc: Ingo Molnar Cc: Matthew Wilcox Cc: Petr Mladek Cc: Rasmus Villemoes Cc: Sergey Senozhatsky Cc: Steven Rostedt Cc: Thomas Gleixner Cc: Vivek Goyal Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/decode_stacktrace.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index ca21f8bdf5f2..20b5af1ebe5e 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -74,7 +74,7 @@ find_module() { find_module && return if [[ $release == "" ]] ; then - release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" | sed -n 's/\$1 = "\(.*\)".*/\1/p') + release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" 2>/dev/null | sed -n 's/\$1 = "\(.*\)".*/\1/p') fi for dn in {/usr/lib/debug,}/lib/modules/$release ; do @@ -128,7 +128,7 @@ parse_symbol() { if [[ "${cache[$module,$name]+isset}" == "isset" ]]; then local base_addr=${cache[$module,$name]} else - local base_addr=$(nm "$objfile" | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}') + local base_addr=$(nm "$objfile" 2>/dev/null | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}') if [[ $base_addr == "" ]] ; then # address not found return @@ -152,7 +152,7 @@ parse_symbol() { if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then local code=${cache[$module,$address]} else - local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address") + local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null) cache[$module,$address]=$code fi -- cgit v1.2.3 From d5ce757d8f1bdf9def7d2f71862b48ed83d5ed12 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 7 Jul 2021 18:09:38 -0700 Subject: scripts/decode_stacktrace.sh: indicate 'auto' can be used for base path Add "auto" to the usage message so that it's a little clearer that you can pass "auto" as the second argument. When passing "auto" the script tries to find the base path automatically instead of requiring it be passed on the commandline. Also use [] to indicate the variable argument and that it is optional so that we can differentiate from the literal "auto" that should be passed. Link: https://lkml.kernel.org/r/20210511003845.2429846-11-swboyd@chromium.org Signed-off-by: Stephen Boyd Cc: Jiri Olsa Cc: Alexei Starovoitov Cc: Jessica Yu Cc: Evan Green Cc: Hsin-Yi Wang Cc: Konstantin Khlebnikov Cc: Sasha Levin Cc: Andy Shevchenko Cc: Baoquan He Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Young Cc: Ingo Molnar Cc: Matthew Wilcox Cc: Petr Mladek Cc: Rasmus Villemoes Cc: Sergey Senozhatsky Cc: Steven Rostedt Cc: Thomas Gleixner Cc: Vivek Goyal Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/decode_stacktrace.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 20b5af1ebe5e..5fbad61fe490 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -5,7 +5,7 @@ usage() { echo "Usage:" - echo " $0 -r | [base path] [modules path]" + echo " $0 -r | [|auto] []" } if [[ $1 == "-r" ]] ; then -- cgit v1.2.3 From c6a3a81d19b834e3aed819027f022c5938fca2ec Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 5 Jul 2021 15:06:54 +0900 Subject: scripts: check duplicated syscall number in syscall table Currently, syscall{hdr,tbl}.sh sorts the entire syscall table, but you can assume it is already sorted by the syscall number. The generated syscall table does not work if the same syscall number appears twice. Check it in the script. Signed-off-by: Masahiro Yamada --- scripts/syscallhdr.sh | 2 +- scripts/syscalltbl.sh | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/syscallhdr.sh b/scripts/syscallhdr.sh index 848ac2735115..22e34cd46b9b 100755 --- a/scripts/syscallhdr.sh +++ b/scripts/syscallhdr.sh @@ -69,7 +69,7 @@ guard=_UAPI_ASM_$(basename "$outfile" | sed -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g') -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | sort -n | { +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | { echo "#ifndef $guard" echo "#define $guard" echo diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh index aa6ab156301c..6abe143889ef 100755 --- a/scripts/syscalltbl.sh +++ b/scripts/syscalltbl.sh @@ -52,10 +52,15 @@ outfile="$2" nxt=0 -grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | sort -n | { +grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | { while read nr abi name native compat ; do + if [ $nxt -gt $nr ]; then + echo "error: $infile: syscall table is not sorted or duplicates the same syscall number" >&2 + exit 1 + fi + while [ $nxt -lt $nr ]; do echo "__SYSCALL($nxt, sys_ni_syscall)" nxt=$((nxt + 1)) -- cgit v1.2.3 From 27932b6a2088eac7a5afa5471963b926cfbb4de7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 6 Jul 2021 15:15:29 +0900 Subject: scripts: add generic syscallnr.sh Like syscallhdr.sh and syscalltbl.sh, add a simple script to generate the __NR_syscalls, which should not be exported to userspace. This script is useful to replace arch/mips/kernel/syscalls/syscallnr.sh, refactor arch/s390/kernel/syscalls/syscalltbl, and eliminate the code surrounded by #ifdef __KERNEL__ / #endif from exported uapi/asm/unistd_*.h files. Signed-off-by: Masahiro Yamada --- scripts/syscallnr.sh | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 scripts/syscallnr.sh (limited to 'scripts') diff --git a/scripts/syscallnr.sh b/scripts/syscallnr.sh new file mode 100644 index 000000000000..3aa29e0dcc52 --- /dev/null +++ b/scripts/syscallnr.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# +# Generate a syscall number header. +# +# Each line of the syscall table should have the following format: +# +# NR ABI NAME [NATIVE] [COMPAT] +# +# NR syscall number +# ABI ABI name +# NAME syscall name +# NATIVE native entry point (optional) +# COMPAT compat entry point (optional) +set -e + +usage() { + echo >&2 "usage: $0 [--abis ABIS] [--prefix PREFIX] INFILE OUTFILE" >&2 + echo >&2 + echo >&2 " INFILE input syscall table" + echo >&2 " OUTFILE output header file" + echo >&2 + echo >&2 "options:" + echo >&2 " --abis ABIS ABI(s) to handle (By default, all lines are handled)" + echo >&2 " --prefix PREFIX The prefix to the macro like __NR_" + exit 1 +} + +# default unless specified by options +abis= +prefix= + +while [ $# -gt 0 ] +do + case $1 in + --abis) + abis=$(echo "($2)" | tr ',' '|') + shift 2;; + --prefix) + prefix=$2 + shift 2;; + -*) + echo "$1: unknown option" >&2 + usage;; + *) + break;; + esac +done + +if [ $# -ne 2 ]; then + usage +fi + +infile="$1" +outfile="$2" + +guard=_ASM_$(basename "$outfile" | + sed -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g') + +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+$abis" "$infile" | sort -n | { + echo "#ifndef $guard" + echo "#define $guard" + echo + + max=0 + while read nr abi name native compat ; do + max=$nr + done + + echo "#define __NR_${prefix}syscalls $(($max + 1))" + echo + echo "#endif /* $guard */" +} > "$outfile" -- cgit v1.2.3 From 40751c6c9bea6a5cfede7c61ee5f3cb1ab857029 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 7 Jul 2021 15:48:40 -0500 Subject: scripts/spdxcheck.py: Strictly read license files in utf-8 Commit bc41a7f36469 ("LICENSES: Add the CC-BY-4.0 license") unfortunately introduced LICENSES/dual/CC-BY-4.0 in UTF-8 Unicode text While python will barf at it with: FAIL: 'ascii' codec can't decode byte 0xe2 in position 2109: ordinal not in range(128) Traceback (most recent call last): File "scripts/spdxcheck.py", line 244, in spdx = read_spdxdata(repo) File "scripts/spdxcheck.py", line 47, in read_spdxdata for l in open(el.path).readlines(): File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode return codecs.ascii_decode(input, self.errors)[0] UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 2109: ordinal not in range(128) While it is indeed debatable if 'Licensor.' used in the license file needs unicode quotes, instead, force spdxcheck to read utf-8. Reported-by: Rahul T R Signed-off-by: Nishanth Menon Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210707204840.30891-1-nm@ti.com Signed-off-by: Jonathan Corbet --- scripts/spdxcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py index 3e784cf9f401..ebd06ae642c9 100755 --- a/scripts/spdxcheck.py +++ b/scripts/spdxcheck.py @@ -44,7 +44,7 @@ def read_spdxdata(repo): continue exception = None - for l in open(el.path).readlines(): + for l in open(el.path, encoding="utf-8").readlines(): if l.startswith('Valid-License-Identifier:'): lid = l.split(':')[1].strip().upper() if lid in spdx.licenses: -- cgit v1.2.3 From b00628b1c7d595ae5b544e059c27b1f5828314b4 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 14 Jul 2021 17:54:09 -0700 Subject: bpf: Introduce bpf timers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce 'struct bpf_timer { __u64 :64; __u64 :64; };' that can be embedded in hash/array/lru maps as a regular field and helpers to operate on it: // Initialize the timer. // First 4 bits of 'flags' specify clockid. // Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, int flags); // Configure the timer to call 'callback_fn' static function. long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); // Arm the timer to expire 'nsec' nanoseconds from the current time. long bpf_timer_start(struct bpf_timer *timer, u64 nsec, u64 flags); // Cancel the timer and wait for callback_fn to finish if it was running. long bpf_timer_cancel(struct bpf_timer *timer); Here is how BPF program might look like: struct map_elem { int counter; struct bpf_timer timer; }; struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1000); __type(key, int); __type(value, struct map_elem); } hmap SEC(".maps"); static int timer_cb(void *map, int *key, struct map_elem *val); /* val points to particular map element that contains bpf_timer. */ SEC("fentry/bpf_fentry_test1") int BPF_PROG(test1, int a) { struct map_elem *val; int key = 0; val = bpf_map_lookup_elem(&hmap, &key); if (val) { bpf_timer_init(&val->timer, &hmap, CLOCK_REALTIME); bpf_timer_set_callback(&val->timer, timer_cb); bpf_timer_start(&val->timer, 1000 /* call timer_cb2 in 1 usec */, 0); } } This patch adds helper implementations that rely on hrtimers to call bpf functions as timers expire. The following patches add necessary safety checks. Only programs with CAP_BPF are allowed to use bpf_timer. The amount of timers used by the program is constrained by the memcg recorded at map creation time. The bpf_timer_init() helper needs explicit 'map' argument because inner maps are dynamic and not known at load time. While the bpf_timer_set_callback() is receiving hidden 'aux->prog' argument supplied by the verifier. The prog pointer is needed to do refcnting of bpf program to make sure that program doesn't get freed while the timer is armed. This approach relies on "user refcnt" scheme used in prog_array that stores bpf programs for bpf_tail_call. The bpf_timer_set_callback() will increment the prog refcnt which is paired with bpf_timer_cancel() that will drop the prog refcnt. The ops->map_release_uref is responsible for cancelling the timers and dropping prog refcnt when user space reference to a map reaches zero. This uref approach is done to make sure that Ctrl-C of user space process will not leave timers running forever unless the user space explicitly pinned a map that contained timers in bpffs. bpf_timer_init() and bpf_timer_set_callback() will return -EPERM if map doesn't have user references (is not held by open file descriptor from user space and not pinned in bpffs). The bpf_map_delete_elem() and bpf_map_update_elem() operations cancel and free the timer if given map element had it allocated. "bpftool map update" command can be used to cancel timers. The 'struct bpf_timer' is explicitly __attribute__((aligned(8))) because '__u64 :64' has 1 byte alignment of 8 byte padding. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Acked-by: Andrii Nakryiko Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/20210715005417.78572-4-alexei.starovoitov@gmail.com --- include/linux/bpf.h | 3 + include/uapi/linux/bpf.h | 73 ++++++++++ kernel/bpf/helpers.c | 324 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 109 ++++++++++++++ kernel/trace/bpf_trace.c | 2 +- scripts/bpf_doc.py | 2 + tools/include/uapi/linux/bpf.h | 73 ++++++++++ 7 files changed, 585 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4afbff308ca3..125240b7cefb 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -168,6 +168,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ + int timer_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; @@ -221,6 +222,7 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +void bpf_timer_cancel_and_free(void *timer); int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); struct bpf_offload_dev; @@ -314,6 +316,7 @@ enum bpf_arg_type { ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ __BPF_ARG_TYPE_MAX, }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bafb6282032b..3544ec5234f0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4777,6 +4777,70 @@ union bpf_attr { * Execute close syscall for given FD. * Return * A syscall result. + * + * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) + * Description + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * Return + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) + * Description + * Configure the timer to call *callback_fn* static function. + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) + * Description + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + * + * long bpf_timer_cancel(struct bpf_timer *timer) + * Description + * Cancel the timer and wait for callback_fn to finish if it was running. + * Return + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4948,6 +5012,10 @@ union bpf_attr { FN(sys_bpf), \ FN(btf_find_by_name_kind), \ FN(sys_close), \ + FN(timer_init), \ + FN(timer_set_callback), \ + FN(timer_start), \ + FN(timer_cancel), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -6074,6 +6142,11 @@ struct bpf_spin_lock { __u32 val; }; +struct bpf_timer { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 38be3cfc2f58..74b16593983d 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -999,6 +999,322 @@ const struct bpf_func_proto bpf_snprintf_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; +/* BPF map elements can contain 'struct bpf_timer'. + * Such map owns all of its BPF timers. + * 'struct bpf_timer' is allocated as part of map element allocation + * and it's zero initialized. + * That space is used to keep 'struct bpf_timer_kern'. + * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and + * remembers 'struct bpf_map *' pointer it's part of. + * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. + * bpf_timer_start() arms the timer. + * If user space reference to a map goes to zero at this point + * ops->map_release_uref callback is responsible for cancelling the timers, + * freeing their memory, and decrementing prog's refcnts. + * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. + * Inner maps can contain bpf timers as well. ops->map_release_uref is + * freeing the timers when inner map is replaced or deleted by user space. + */ +struct bpf_hrtimer { + struct hrtimer timer; + struct bpf_map *map; + struct bpf_prog *prog; + void __rcu *callback_fn; + void *value; +}; + +/* the actual struct hidden inside uapi struct bpf_timer */ +struct bpf_timer_kern { + struct bpf_hrtimer *timer; + /* bpf_spin_lock is used here instead of spinlock_t to make + * sure that it always fits into space resereved by struct bpf_timer + * regardless of LOCKDEP and spinlock debug flags. + */ + struct bpf_spin_lock lock; +} __attribute__((aligned(8))); + +static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); + +static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) +{ + struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); + struct bpf_map *map = t->map; + void *value = t->value; + void *callback_fn; + void *key; + u32 idx; + int ret; + + callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); + if (!callback_fn) + goto out; + + /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and + * cannot be preempted by another bpf_timer_cb() on the same cpu. + * Remember the timer this callback is servicing to prevent + * deadlock if callback_fn() calls bpf_timer_cancel() or + * bpf_map_delete_elem() on the same timer. + */ + this_cpu_write(hrtimer_running, t); + if (map->map_type == BPF_MAP_TYPE_ARRAY) { + struct bpf_array *array = container_of(map, struct bpf_array, map); + + /* compute the key */ + idx = ((char *)value - array->value) / array->elem_size; + key = &idx; + } else { /* hash or lru */ + key = value - round_up(map->key_size, 8); + } + + ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, + (u64)(long)key, + (u64)(long)value, 0, 0); + WARN_ON(ret != 0); /* Next patch moves this check into the verifier */ + + this_cpu_write(hrtimer_running, NULL); +out: + return HRTIMER_NORESTART; +} + +BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, + u64, flags) +{ + clockid_t clockid = flags & (MAX_CLOCKS - 1); + struct bpf_hrtimer *t; + int ret = 0; + + BUILD_BUG_ON(MAX_CLOCKS != 16); + BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); + BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); + + if (in_nmi()) + return -EOPNOTSUPP; + + if (flags >= MAX_CLOCKS || + /* similar to timerfd except _ALARM variants are not supported */ + (clockid != CLOCK_MONOTONIC && + clockid != CLOCK_REALTIME && + clockid != CLOCK_BOOTTIME)) + return -EINVAL; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (t) { + ret = -EBUSY; + goto out; + } + if (!atomic64_read(&map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. + */ + ret = -EPERM; + goto out; + } + /* allocate hrtimer via map_kmalloc to use memcg accounting */ + t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); + if (!t) { + ret = -ENOMEM; + goto out; + } + t->value = (void *)timer - map->timer_off; + t->map = map; + t->prog = NULL; + rcu_assign_pointer(t->callback_fn, NULL); + hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); + t->timer.function = bpf_timer_cb; + timer->timer = t; +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +static const struct bpf_func_proto bpf_timer_init_proto = { + .func = bpf_timer_init, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, + struct bpf_prog_aux *, aux) +{ + struct bpf_prog *prev, *prog = aux->prog; + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t) { + ret = -EINVAL; + goto out; + } + if (!atomic64_read(&t->map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. Otherwise timer might still be + * running even when bpf prog is detached and user space + * is gone, since map_release_uref won't ever be called. + */ + ret = -EPERM; + goto out; + } + prev = t->prog; + if (prev != prog) { + /* Bump prog refcnt once. Every bpf_timer_set_callback() + * can pick different callback_fn-s within the same prog. + */ + prog = bpf_prog_inc_not_zero(prog); + if (IS_ERR(prog)) { + ret = PTR_ERR(prog); + goto out; + } + if (prev) + /* Drop prev prog refcnt when swapping with new prog */ + bpf_prog_put(prev); + t->prog = prog; + } + rcu_assign_pointer(t->callback_fn, callback_fn); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +static const struct bpf_func_proto bpf_timer_set_callback_proto = { + .func = bpf_timer_set_callback, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_PTR_TO_FUNC, +}; + +BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) +{ + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + if (flags) + return -EINVAL; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t || !t->prog) { + ret = -EINVAL; + goto out; + } + hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +} + +static const struct bpf_func_proto bpf_timer_start_proto = { + .func = bpf_timer_start, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + +static void drop_prog_refcnt(struct bpf_hrtimer *t) +{ + struct bpf_prog *prog = t->prog; + + if (prog) { + bpf_prog_put(prog); + t->prog = NULL; + rcu_assign_pointer(t->callback_fn, NULL); + } +} + +BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) +{ + struct bpf_hrtimer *t; + int ret = 0; + + if (in_nmi()) + return -EOPNOTSUPP; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; + if (!t) { + ret = -EINVAL; + goto out; + } + if (this_cpu_read(hrtimer_running) == t) { + /* If bpf callback_fn is trying to bpf_timer_cancel() + * its own timer the hrtimer_cancel() will deadlock + * since it waits for callback_fn to finish + */ + ret = -EDEADLK; + goto out; + } + drop_prog_refcnt(t); +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + /* Cancel the timer and wait for associated callback to finish + * if it was running. + */ + ret = ret ?: hrtimer_cancel(&t->timer); + return ret; +} + +static const struct bpf_func_proto bpf_timer_cancel_proto = { + .func = bpf_timer_cancel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_TIMER, +}; + +/* This function is called by map_delete/update_elem for individual element and + * by ops->map_release_uref when the user space reference to a map reaches zero. + */ +void bpf_timer_cancel_and_free(void *val) +{ + struct bpf_timer_kern *timer = val; + struct bpf_hrtimer *t; + + /* Performance optimization: read timer->timer without lock first. */ + if (!READ_ONCE(timer->timer)) + return; + + __bpf_spin_lock_irqsave(&timer->lock); + /* re-read it under lock */ + t = timer->timer; + if (!t) + goto out; + drop_prog_refcnt(t); + /* The subsequent bpf_timer_start/cancel() helpers won't be able to use + * this timer, since it won't be initialized. + */ + timer->timer = NULL; +out: + __bpf_spin_unlock_irqrestore(&timer->lock); + if (!t) + return; + /* Cancel the timer and wait for callback to complete if it was running. + * If hrtimer_cancel() can be safely called it's safe to call kfree(t) + * right after for both preallocated and non-preallocated maps. + * The timer->timer = NULL was already done and no code path can + * see address 't' anymore. + * + * Check that bpf_map_delete/update_elem() wasn't called from timer + * callback_fn. In such case don't call hrtimer_cancel() (since it will + * deadlock) and don't call hrtimer_try_to_cancel() (since it will just + * return -1). Though callback_fn is still running on this cpu it's + * safe to do kfree(t) because bpf_timer_cb() read everything it needed + * from 't'. The bpf subprog callback_fn won't be able to access 't', + * since timer->timer = NULL was already done. The timer will be + * effectively cancelled because bpf_timer_cb() will return + * HRTIMER_NORESTART. + */ + if (this_cpu_read(hrtimer_running) != t) + hrtimer_cancel(&t->timer); + kfree(t); +} + const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; @@ -1065,6 +1381,14 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_per_cpu_ptr_proto; case BPF_FUNC_this_cpu_ptr: return &bpf_this_cpu_ptr_proto; + case BPF_FUNC_timer_init: + return &bpf_timer_init_proto; + case BPF_FUNC_timer_set_callback: + return &bpf_timer_set_callback_proto; + case BPF_FUNC_timer_start: + return &bpf_timer_start_proto; + case BPF_FUNC_timer_cancel: + return &bpf_timer_cancel_proto; default: break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3dbb3b40b754..e8645c819803 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4656,6 +4656,38 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, return 0; } +static int process_timer_func(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) +{ + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + bool is_const = tnum_is_const(reg->var_off); + struct bpf_map *map = reg->map_ptr; + u64 val = reg->var_off.value; + + if (!is_const) { + verbose(env, + "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", + regno); + return -EINVAL; + } + if (!map->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", + map->name); + return -EINVAL; + } + if (val) { + /* This restriction will be removed in the next patch */ + verbose(env, "bpf_timer field can only be first in the map value element\n"); + return -EINVAL; + } + if (meta->map_ptr) { + verbose(env, "verifier bug. Two map pointers in a timer helper\n"); + return -EFAULT; + } + meta->map_ptr = map; + return 0; +} + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || @@ -4788,6 +4820,7 @@ static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PER static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; +static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, @@ -4819,6 +4852,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, + [ARG_PTR_TO_TIMER] = &timer_types, }; static int check_reg_type(struct bpf_verifier_env *env, u32 regno, @@ -4948,6 +4982,10 @@ skip_type_check: if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ + if (meta->map_ptr && meta->map_ptr != reg->map_ptr) { + verbose(env, "Map pointer doesn't match bpf_timer.\n"); + return -EINVAL; + } meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: @@ -5000,6 +5038,9 @@ skip_type_check: verbose(env, "verifier internal error\n"); return -EFAULT; } + } else if (arg_type == ARG_PTR_TO_TIMER) { + if (process_timer_func(env, regno, meta)) + return -EACCES; } else if (arg_type == ARG_PTR_TO_FUNC) { meta->subprogno = reg->subprogno; } else if (arg_type_is_mem_ptr(arg_type)) { @@ -5742,6 +5783,34 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env, return 0; } +static int set_timer_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; + + /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); + * callback_fn(struct bpf_map *map, void *key, void *value); + */ + callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; + __mark_reg_known_zero(&callee->regs[BPF_REG_1]); + callee->regs[BPF_REG_1].map_ptr = map_ptr; + + callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].map_ptr = map_ptr; + + callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; + __mark_reg_known_zero(&callee->regs[BPF_REG_3]); + callee->regs[BPF_REG_3].map_ptr = map_ptr; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + return 0; +} + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; @@ -6069,6 +6138,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } + if (func_id == BPF_FUNC_timer_set_callback) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_timer_callback_state); + if (err < 0) + return -EINVAL; + } + if (func_id == BPF_FUNC_snprintf) { err = check_bpf_snprintf_call(env, regs); if (err < 0) @@ -12591,6 +12667,39 @@ static int do_misc_fixups(struct bpf_verifier_env *env) continue; } + if (insn->imm == BPF_FUNC_timer_set_callback) { + /* The verifier will process callback_fn as many times as necessary + * with different maps and the register states prepared by + * set_timer_callback_state will be accurate. + * + * The following use case is valid: + * map1 is shared by prog1, prog2, prog3. + * prog1 calls bpf_timer_init for some map1 elements + * prog2 calls bpf_timer_set_callback for some map1 elements. + * Those that were not bpf_timer_init-ed will return -EINVAL. + * prog3 calls bpf_timer_start for some map1 elements. + * Those that were not both bpf_timer_init-ed and + * bpf_timer_set_callback-ed will return -EINVAL. + */ + struct bpf_insn ld_addrs[2] = { + BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), + }; + + insn_buf[0] = ld_addrs[0]; + insn_buf[1] = ld_addrs[1]; + insn_buf[2] = *insn; + cnt = 3; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 64bd2d84367f..6c77d25137e0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1059,7 +1059,7 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_snprintf: return &bpf_snprintf_proto; default: - return NULL; + return bpf_base_func_proto(func_id); } } diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index 2d94025b38e9..00ac7b79cddb 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -547,6 +547,7 @@ class PrinterHelpers(Printer): 'struct inode', 'struct socket', 'struct file', + 'struct bpf_timer', ] known_types = { '...', @@ -594,6 +595,7 @@ class PrinterHelpers(Printer): 'struct inode', 'struct socket', 'struct file', + 'struct bpf_timer', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bafb6282032b..3544ec5234f0 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4777,6 +4777,70 @@ union bpf_attr { * Execute close syscall for given FD. * Return * A syscall result. + * + * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags) + * Description + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * Return + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn) + * Description + * Configure the timer to call *callback_fn* static function. + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + * + * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags) + * Description + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + * + * long bpf_timer_cancel(struct bpf_timer *timer) + * Description + * Cancel the timer and wait for callback_fn to finish if it was running. + * Return + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4948,6 +5012,10 @@ union bpf_attr { FN(sys_bpf), \ FN(btf_find_by_name_kind), \ FN(sys_close), \ + FN(timer_init), \ + FN(timer_set_callback), \ + FN(timer_start), \ + FN(timer_cancel), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -6074,6 +6142,11 @@ struct bpf_spin_lock { __u32 val; }; +struct bpf_timer { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. -- cgit v1.2.3 From 47401d94947d507ff9f33fccf490baf47638fb69 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Jul 2021 11:52:49 +0100 Subject: locking/atomic: simplify ifdef generation In gen-atomic-fallback.sh's gen_proto_order_variants(), we generate some ifdeferry with: | local basename="${arch}${atomic}_${pfx}${name}${sfx}" | ... | printf "#ifdef ${basename}\n" | ... | printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n" For clarity, use ${basename} for both sides, rather than open-coding the string generation. There is no change to any of the generated headers as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210713105253.7615-2-mark.rutland@arm.com --- scripts/atomic/gen-atomic-fallback.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh index 317a6cec76e1..2601ff4f9468 100755 --- a/scripts/atomic/gen-atomic-fallback.sh +++ b/scripts/atomic/gen-atomic-fallback.sh @@ -128,7 +128,7 @@ gen_proto_order_variants() gen_basic_fallbacks "${basename}" if [ ! -z "${template}" ]; then - printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n" + printf "#endif /* ${basename} */\n\n" gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" -- cgit v1.2.3 From f3e615b4db1fb7034f1d76dc307b77cc848f040e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Jul 2021 11:52:50 +0100 Subject: locking/atomic: remove ARCH_ATOMIC remanants Now that gen-atomic-fallback.sh is only used to generate the arch_* fallbacks, we don't need to also generate the non-arch_* forms, and can removethe infrastructure this needed. There is no change to any of the generated headers as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com --- scripts/atomic/fallbacks/acquire | 4 +- scripts/atomic/fallbacks/add_negative | 6 +-- scripts/atomic/fallbacks/add_unless | 6 +-- scripts/atomic/fallbacks/andnot | 4 +- scripts/atomic/fallbacks/dec | 4 +- scripts/atomic/fallbacks/dec_and_test | 6 +-- scripts/atomic/fallbacks/dec_if_positive | 6 +-- scripts/atomic/fallbacks/dec_unless_positive | 6 +-- scripts/atomic/fallbacks/fence | 4 +- scripts/atomic/fallbacks/fetch_add_unless | 8 ++-- scripts/atomic/fallbacks/inc | 4 +- scripts/atomic/fallbacks/inc_and_test | 6 +-- scripts/atomic/fallbacks/inc_not_zero | 6 +-- scripts/atomic/fallbacks/inc_unless_negative | 6 +-- scripts/atomic/fallbacks/read_acquire | 2 +- scripts/atomic/fallbacks/release | 4 +- scripts/atomic/fallbacks/set_release | 2 +- scripts/atomic/fallbacks/sub_and_test | 6 +-- scripts/atomic/fallbacks/try_cmpxchg | 4 +- scripts/atomic/gen-atomic-fallback.sh | 66 ++++++++++------------------ scripts/atomic/gen-atomics.sh | 2 +- 21 files changed, 71 insertions(+), 91 deletions(-) (limited to 'scripts') diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire index 59c00529dc7c..ef764085c79a 100755 --- a/scripts/atomic/fallbacks/acquire +++ b/scripts/atomic/fallbacks/acquire @@ -1,8 +1,8 @@ cat < 0)) return false; - } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1)); + } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1)); return true; } diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence index 3764fc8ce945..07757d8e338e 100755 --- a/scripts/atomic/fallbacks/fence +++ b/scripts/atomic/fallbacks/fence @@ -1,10 +1,10 @@ cat <counter); } diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release index f8906d537c0f..b46feb56d69c 100755 --- a/scripts/atomic/fallbacks/release +++ b/scripts/atomic/fallbacks/release @@ -1,8 +1,8 @@ cat <counter, i); } diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test index c580f4c2136e..260f37341c88 100755 --- a/scripts/atomic/fallbacks/sub_and_test +++ b/scripts/atomic/fallbacks/sub_and_test @@ -1,6 +1,6 @@ cat < ${LINUXDIR}/include/${header} -- cgit v1.2.3 From e3d18cee258b898017b298b5b93f8134dd62aee3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Jul 2021 11:52:51 +0100 Subject: locking/atomic: centralize generated headers The generated atomic headers are only intended to be included directly by , but are spread across include/linux/ and include/asm-generic/, where people mnay be encouraged to include them. This patch centralizes them under include/linux/atomic/. Other than the header guards and hashes, there is no change to any of the generated headers as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210713105253.7615-4-mark.rutland@arm.com --- include/asm-generic/atomic-instrumented.h | 1337 --------------- include/asm-generic/atomic-long.h | 1014 ------------ include/linux/atomic-arch-fallback.h | 2361 --------------------------- include/linux/atomic.h | 7 +- include/linux/atomic/atomic-arch-fallback.h | 2361 +++++++++++++++++++++++++++ include/linux/atomic/atomic-instrumented.h | 1337 +++++++++++++++ include/linux/atomic/atomic-long.h | 1014 ++++++++++++ scripts/atomic/check-atomics.sh | 6 +- scripts/atomic/gen-atomic-instrumented.sh | 6 +- scripts/atomic/gen-atomic-long.sh | 6 +- scripts/atomic/gen-atomics.sh | 6 +- 11 files changed, 4727 insertions(+), 4728 deletions(-) delete mode 100644 include/asm-generic/atomic-instrumented.h delete mode 100644 include/asm-generic/atomic-long.h delete mode 100644 include/linux/atomic-arch-fallback.h create mode 100644 include/linux/atomic/atomic-arch-fallback.h create mode 100644 include/linux/atomic/atomic-instrumented.h create mode 100644 include/linux/atomic/atomic-long.h (limited to 'scripts') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h deleted file mode 100644 index bc45af52c93b..000000000000 --- a/include/asm-generic/atomic-instrumented.h +++ /dev/null @@ -1,1337 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-instrumented.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -/* - * This file provides wrappers with KASAN instrumentation for atomic operations. - * To use this functionality an arch's atomic.h file needs to define all - * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include - * this file at the end. This file provides atomic_read() that forwards to - * arch_atomic_read() for actual atomic operation. - * Note: if an arch atomic operation is implemented by means of other atomic - * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use - * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid - * double instrumentation. - */ -#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H -#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H - -#include -#include -#include - -static __always_inline int -atomic_read(const atomic_t *v) -{ - instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_read(v); -} - -static __always_inline int -atomic_read_acquire(const atomic_t *v) -{ - instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_read_acquire(v); -} - -static __always_inline void -atomic_set(atomic_t *v, int i) -{ - instrument_atomic_write(v, sizeof(*v)); - arch_atomic_set(v, i); -} - -static __always_inline void -atomic_set_release(atomic_t *v, int i) -{ - instrument_atomic_write(v, sizeof(*v)); - arch_atomic_set_release(v, i); -} - -static __always_inline void -atomic_add(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_add(i, v); -} - -static __always_inline int -atomic_add_return(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return(i, v); -} - -static __always_inline int -atomic_add_return_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_acquire(i, v); -} - -static __always_inline int -atomic_add_return_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_release(i, v); -} - -static __always_inline int -atomic_add_return_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_relaxed(i, v); -} - -static __always_inline int -atomic_fetch_add(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add(i, v); -} - -static __always_inline int -atomic_fetch_add_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_acquire(i, v); -} - -static __always_inline int -atomic_fetch_add_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_release(i, v); -} - -static __always_inline int -atomic_fetch_add_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_sub(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_sub(i, v); -} - -static __always_inline int -atomic_sub_return(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return(i, v); -} - -static __always_inline int -atomic_sub_return_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_acquire(i, v); -} - -static __always_inline int -atomic_sub_return_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_release(i, v); -} - -static __always_inline int -atomic_sub_return_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_relaxed(i, v); -} - -static __always_inline int -atomic_fetch_sub(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub(i, v); -} - -static __always_inline int -atomic_fetch_sub_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_acquire(i, v); -} - -static __always_inline int -atomic_fetch_sub_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_release(i, v); -} - -static __always_inline int -atomic_fetch_sub_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_inc(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_inc(v); -} - -static __always_inline int -atomic_inc_return(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return(v); -} - -static __always_inline int -atomic_inc_return_acquire(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_acquire(v); -} - -static __always_inline int -atomic_inc_return_release(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_release(v); -} - -static __always_inline int -atomic_inc_return_relaxed(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_relaxed(v); -} - -static __always_inline int -atomic_fetch_inc(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc(v); -} - -static __always_inline int -atomic_fetch_inc_acquire(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_acquire(v); -} - -static __always_inline int -atomic_fetch_inc_release(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_release(v); -} - -static __always_inline int -atomic_fetch_inc_relaxed(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_dec(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_dec(v); -} - -static __always_inline int -atomic_dec_return(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return(v); -} - -static __always_inline int -atomic_dec_return_acquire(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_acquire(v); -} - -static __always_inline int -atomic_dec_return_release(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_release(v); -} - -static __always_inline int -atomic_dec_return_relaxed(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_relaxed(v); -} - -static __always_inline int -atomic_fetch_dec(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec(v); -} - -static __always_inline int -atomic_fetch_dec_acquire(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_acquire(v); -} - -static __always_inline int -atomic_fetch_dec_release(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_release(v); -} - -static __always_inline int -atomic_fetch_dec_relaxed(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_and(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_and(i, v); -} - -static __always_inline int -atomic_fetch_and(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and(i, v); -} - -static __always_inline int -atomic_fetch_and_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_acquire(i, v); -} - -static __always_inline int -atomic_fetch_and_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_release(i, v); -} - -static __always_inline int -atomic_fetch_and_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_andnot(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_andnot(i, v); -} - -static __always_inline int -atomic_fetch_andnot(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot(i, v); -} - -static __always_inline int -atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_acquire(i, v); -} - -static __always_inline int -atomic_fetch_andnot_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_release(i, v); -} - -static __always_inline int -atomic_fetch_andnot_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_or(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_or(i, v); -} - -static __always_inline int -atomic_fetch_or(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or(i, v); -} - -static __always_inline int -atomic_fetch_or_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_acquire(i, v); -} - -static __always_inline int -atomic_fetch_or_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_release(i, v); -} - -static __always_inline int -atomic_fetch_or_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_xor(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_xor(i, v); -} - -static __always_inline int -atomic_fetch_xor(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor(i, v); -} - -static __always_inline int -atomic_fetch_xor_acquire(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_acquire(i, v); -} - -static __always_inline int -atomic_fetch_xor_release(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_release(i, v); -} - -static __always_inline int -atomic_fetch_xor_relaxed(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_relaxed(i, v); -} - -static __always_inline int -atomic_xchg(atomic_t *v, int i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg(v, i); -} - -static __always_inline int -atomic_xchg_acquire(atomic_t *v, int i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_acquire(v, i); -} - -static __always_inline int -atomic_xchg_release(atomic_t *v, int i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_release(v, i); -} - -static __always_inline int -atomic_xchg_relaxed(atomic_t *v, int i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_relaxed(v, i); -} - -static __always_inline int -atomic_cmpxchg(atomic_t *v, int old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg(v, old, new); -} - -static __always_inline int -atomic_cmpxchg_acquire(atomic_t *v, int old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_acquire(v, old, new); -} - -static __always_inline int -atomic_cmpxchg_release(atomic_t *v, int old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_release(v, old, new); -} - -static __always_inline int -atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg(v, old, new); -} - -static __always_inline bool -atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_acquire(v, old, new); -} - -static __always_inline bool -atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_release(v, old, new); -} - -static __always_inline bool -atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_sub_and_test(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_and_test(i, v); -} - -static __always_inline bool -atomic_dec_and_test(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_and_test(v); -} - -static __always_inline bool -atomic_inc_and_test(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_and_test(v); -} - -static __always_inline bool -atomic_add_negative(int i, atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_negative(i, v); -} - -static __always_inline int -atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_add_unless(atomic_t *v, int a, int u) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_unless(v, a, u); -} - -static __always_inline bool -atomic_inc_not_zero(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_not_zero(v); -} - -static __always_inline bool -atomic_inc_unless_negative(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_unless_negative(v); -} - -static __always_inline bool -atomic_dec_unless_positive(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_unless_positive(v); -} - -static __always_inline int -atomic_dec_if_positive(atomic_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_if_positive(v); -} - -static __always_inline s64 -atomic64_read(const atomic64_t *v) -{ - instrument_atomic_read(v, sizeof(*v)); - return arch_atomic64_read(v); -} - -static __always_inline s64 -atomic64_read_acquire(const atomic64_t *v) -{ - instrument_atomic_read(v, sizeof(*v)); - return arch_atomic64_read_acquire(v); -} - -static __always_inline void -atomic64_set(atomic64_t *v, s64 i) -{ - instrument_atomic_write(v, sizeof(*v)); - arch_atomic64_set(v, i); -} - -static __always_inline void -atomic64_set_release(atomic64_t *v, s64 i) -{ - instrument_atomic_write(v, sizeof(*v)); - arch_atomic64_set_release(v, i); -} - -static __always_inline void -atomic64_add(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_add(i, v); -} - -static __always_inline s64 -atomic64_add_return(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return(i, v); -} - -static __always_inline s64 -atomic64_add_return_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_acquire(i, v); -} - -static __always_inline s64 -atomic64_add_return_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_release(i, v); -} - -static __always_inline s64 -atomic64_add_return_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_relaxed(i, v); -} - -static __always_inline s64 -atomic64_fetch_add(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add(i, v); -} - -static __always_inline s64 -atomic64_fetch_add_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_add_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic64_sub(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_sub(i, v); -} - -static __always_inline s64 -atomic64_sub_return(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return(i, v); -} - -static __always_inline s64 -atomic64_sub_return_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_acquire(i, v); -} - -static __always_inline s64 -atomic64_sub_return_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_release(i, v); -} - -static __always_inline s64 -atomic64_sub_return_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_relaxed(i, v); -} - -static __always_inline s64 -atomic64_fetch_sub(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub(i, v); -} - -static __always_inline s64 -atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_sub_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic64_inc(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_inc(v); -} - -static __always_inline s64 -atomic64_inc_return(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return(v); -} - -static __always_inline s64 -atomic64_inc_return_acquire(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_acquire(v); -} - -static __always_inline s64 -atomic64_inc_return_release(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_release(v); -} - -static __always_inline s64 -atomic64_inc_return_relaxed(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_relaxed(v); -} - -static __always_inline s64 -atomic64_fetch_inc(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc(v); -} - -static __always_inline s64 -atomic64_fetch_inc_acquire(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_acquire(v); -} - -static __always_inline s64 -atomic64_fetch_inc_release(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_release(v); -} - -static __always_inline s64 -atomic64_fetch_inc_relaxed(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic64_dec(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_dec(v); -} - -static __always_inline s64 -atomic64_dec_return(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return(v); -} - -static __always_inline s64 -atomic64_dec_return_acquire(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_acquire(v); -} - -static __always_inline s64 -atomic64_dec_return_release(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_release(v); -} - -static __always_inline s64 -atomic64_dec_return_relaxed(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_relaxed(v); -} - -static __always_inline s64 -atomic64_fetch_dec(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec(v); -} - -static __always_inline s64 -atomic64_fetch_dec_acquire(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_acquire(v); -} - -static __always_inline s64 -atomic64_fetch_dec_release(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_release(v); -} - -static __always_inline s64 -atomic64_fetch_dec_relaxed(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic64_and(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_and(i, v); -} - -static __always_inline s64 -atomic64_fetch_and(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and(i, v); -} - -static __always_inline s64 -atomic64_fetch_and_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_and_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic64_andnot(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_andnot(i, v); -} - -static __always_inline s64 -atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot(i, v); -} - -static __always_inline s64 -atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic64_or(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_or(i, v); -} - -static __always_inline s64 -atomic64_fetch_or(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or(i, v); -} - -static __always_inline s64 -atomic64_fetch_or_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_or_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic64_xor(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_xor(i, v); -} - -static __always_inline s64 -atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor(i, v); -} - -static __always_inline s64 -atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline s64 -atomic64_fetch_xor_release(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_release(i, v); -} - -static __always_inline s64 -atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline s64 -atomic64_xchg(atomic64_t *v, s64 i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg(v, i); -} - -static __always_inline s64 -atomic64_xchg_acquire(atomic64_t *v, s64 i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_acquire(v, i); -} - -static __always_inline s64 -atomic64_xchg_release(atomic64_t *v, s64 i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_release(v, i); -} - -static __always_inline s64 -atomic64_xchg_relaxed(atomic64_t *v, s64 i) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_relaxed(v, i); -} - -static __always_inline s64 -atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg(v, old, new); -} - -static __always_inline s64 -atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline s64 -atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline s64 -atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg(v, old, new); -} - -static __always_inline bool -atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_acquire(v, old, new); -} - -static __always_inline bool -atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_release(v, old, new); -} - -static __always_inline bool -atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) -{ - instrument_atomic_read_write(v, sizeof(*v)); - instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic64_sub_and_test(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_and_test(i, v); -} - -static __always_inline bool -atomic64_dec_and_test(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_and_test(v); -} - -static __always_inline bool -atomic64_inc_and_test(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_and_test(v); -} - -static __always_inline bool -atomic64_add_negative(s64 i, atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_negative(i, v); -} - -static __always_inline s64 -atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic64_add_unless(atomic64_t *v, s64 a, s64 u) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_unless(v, a, u); -} - -static __always_inline bool -atomic64_inc_not_zero(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_not_zero(v); -} - -static __always_inline bool -atomic64_inc_unless_negative(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_unless_negative(v); -} - -static __always_inline bool -atomic64_dec_unless_positive(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_unless_positive(v); -} - -static __always_inline s64 -atomic64_dec_if_positive(atomic64_t *v) -{ - instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_if_positive(v); -} - -#define xchg(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg(__ai_ptr, __VA_ARGS__); \ -}) - -#define xchg_acquire(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ -}) - -#define xchg_release(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_release(__ai_ptr, __VA_ARGS__); \ -}) - -#define xchg_relaxed(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg_acquire(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg_release(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg_relaxed(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg64(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg64_acquire(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg64_release(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg64_relaxed(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ -}) - -#define try_cmpxchg(ptr, oldp, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ -}) - -#define try_cmpxchg_acquire(ptr, oldp, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ -}) - -#define try_cmpxchg_release(ptr, oldp, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ -}) - -#define try_cmpxchg_relaxed(ptr, oldp, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ -}) - -#define cmpxchg_local(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg64_local(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ -}) - -#define sync_cmpxchg(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ -}) - -#define cmpxchg_double(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ - arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ -}) - - -#define cmpxchg_double_local(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ - arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ -}) - -#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h deleted file mode 100644 index 073cf40f431b..000000000000 --- a/include/asm-generic/atomic-long.h +++ /dev/null @@ -1,1014 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-long.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _ASM_GENERIC_ATOMIC_LONG_H -#define _ASM_GENERIC_ATOMIC_LONG_H - -#include -#include - -#ifdef CONFIG_64BIT -typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed -#else -typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed -#endif - -#ifdef CONFIG_64BIT - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic64_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic64_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic64_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic64_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic64_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic64_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic64_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic64_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic64_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic64_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic64_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic64_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic64_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic64_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic64_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic64_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic64_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic64_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic64_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic64_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic64_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic64_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic64_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic64_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic64_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic64_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic64_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic64_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic64_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic64_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic64_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic64_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic64_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic64_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic64_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic64_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic64_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_release(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic64_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic64_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic64_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic64_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic64_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic64_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic64_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic64_dec_if_positive(v); -} - -#else /* CONFIG_64BIT */ - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_acquire(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_release(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_relaxed(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic_dec_if_positive(v); -} - -#endif /* CONFIG_64BIT */ -#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h deleted file mode 100644 index a3dba31df01e..000000000000 --- a/include/linux/atomic-arch-fallback.h +++ /dev/null @@ -1,2361 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-fallback.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _LINUX_ATOMIC_FALLBACK_H -#define _LINUX_ATOMIC_FALLBACK_H - -#include - -#ifndef arch_xchg_relaxed -#define arch_xchg_acquire arch_xchg -#define arch_xchg_release arch_xchg -#define arch_xchg_relaxed arch_xchg -#else /* arch_xchg_relaxed */ - -#ifndef arch_xchg_acquire -#define arch_xchg_acquire(...) \ - __atomic_op_acquire(arch_xchg, __VA_ARGS__) -#endif - -#ifndef arch_xchg_release -#define arch_xchg_release(...) \ - __atomic_op_release(arch_xchg, __VA_ARGS__) -#endif - -#ifndef arch_xchg -#define arch_xchg(...) \ - __atomic_op_fence(arch_xchg, __VA_ARGS__) -#endif - -#endif /* arch_xchg_relaxed */ - -#ifndef arch_cmpxchg_relaxed -#define arch_cmpxchg_acquire arch_cmpxchg -#define arch_cmpxchg_release arch_cmpxchg -#define arch_cmpxchg_relaxed arch_cmpxchg -#else /* arch_cmpxchg_relaxed */ - -#ifndef arch_cmpxchg_acquire -#define arch_cmpxchg_acquire(...) \ - __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) -#endif - -#ifndef arch_cmpxchg_release -#define arch_cmpxchg_release(...) \ - __atomic_op_release(arch_cmpxchg, __VA_ARGS__) -#endif - -#ifndef arch_cmpxchg -#define arch_cmpxchg(...) \ - __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) -#endif - -#endif /* arch_cmpxchg_relaxed */ - -#ifndef arch_cmpxchg64_relaxed -#define arch_cmpxchg64_acquire arch_cmpxchg64 -#define arch_cmpxchg64_release arch_cmpxchg64 -#define arch_cmpxchg64_relaxed arch_cmpxchg64 -#else /* arch_cmpxchg64_relaxed */ - -#ifndef arch_cmpxchg64_acquire -#define arch_cmpxchg64_acquire(...) \ - __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) -#endif - -#ifndef arch_cmpxchg64_release -#define arch_cmpxchg64_release(...) \ - __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) -#endif - -#ifndef arch_cmpxchg64 -#define arch_cmpxchg64(...) \ - __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) -#endif - -#endif /* arch_cmpxchg64_relaxed */ - -#ifndef arch_try_cmpxchg_relaxed -#ifdef arch_try_cmpxchg -#define arch_try_cmpxchg_acquire arch_try_cmpxchg -#define arch_try_cmpxchg_release arch_try_cmpxchg -#define arch_try_cmpxchg_relaxed arch_try_cmpxchg -#endif /* arch_try_cmpxchg */ - -#ifndef arch_try_cmpxchg -#define arch_try_cmpxchg(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* arch_try_cmpxchg */ - -#ifndef arch_try_cmpxchg_acquire -#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* arch_try_cmpxchg_acquire */ - -#ifndef arch_try_cmpxchg_release -#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* arch_try_cmpxchg_release */ - -#ifndef arch_try_cmpxchg_relaxed -#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ -({ \ - typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \ - if (unlikely(___r != ___o)) \ - *___op = ___r; \ - likely(___r == ___o); \ -}) -#endif /* arch_try_cmpxchg_relaxed */ - -#else /* arch_try_cmpxchg_relaxed */ - -#ifndef arch_try_cmpxchg_acquire -#define arch_try_cmpxchg_acquire(...) \ - __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef arch_try_cmpxchg_release -#define arch_try_cmpxchg_release(...) \ - __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) -#endif - -#ifndef arch_try_cmpxchg -#define arch_try_cmpxchg(...) \ - __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) -#endif - -#endif /* arch_try_cmpxchg_relaxed */ - -#ifndef arch_atomic_read_acquire -static __always_inline int -arch_atomic_read_acquire(const atomic_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define arch_atomic_read_acquire arch_atomic_read_acquire -#endif - -#ifndef arch_atomic_set_release -static __always_inline void -arch_atomic_set_release(atomic_t *v, int i) -{ - smp_store_release(&(v)->counter, i); -} -#define arch_atomic_set_release arch_atomic_set_release -#endif - -#ifndef arch_atomic_add_return_relaxed -#define arch_atomic_add_return_acquire arch_atomic_add_return -#define arch_atomic_add_return_release arch_atomic_add_return -#define arch_atomic_add_return_relaxed arch_atomic_add_return -#else /* arch_atomic_add_return_relaxed */ - -#ifndef arch_atomic_add_return_acquire -static __always_inline int -arch_atomic_add_return_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire -#endif - -#ifndef arch_atomic_add_return_release -static __always_inline int -arch_atomic_add_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_add_return_relaxed(i, v); -} -#define arch_atomic_add_return_release arch_atomic_add_return_release -#endif - -#ifndef arch_atomic_add_return -static __always_inline int -arch_atomic_add_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_add_return arch_atomic_add_return -#endif - -#endif /* arch_atomic_add_return_relaxed */ - -#ifndef arch_atomic_fetch_add_relaxed -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add -#define arch_atomic_fetch_add_release arch_atomic_fetch_add -#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add -#else /* arch_atomic_fetch_add_relaxed */ - -#ifndef arch_atomic_fetch_add_acquire -static __always_inline int -arch_atomic_fetch_add_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire -#endif - -#ifndef arch_atomic_fetch_add_release -static __always_inline int -arch_atomic_fetch_add_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_add_relaxed(i, v); -} -#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release -#endif - -#ifndef arch_atomic_fetch_add -static __always_inline int -arch_atomic_fetch_add(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_add arch_atomic_fetch_add -#endif - -#endif /* arch_atomic_fetch_add_relaxed */ - -#ifndef arch_atomic_sub_return_relaxed -#define arch_atomic_sub_return_acquire arch_atomic_sub_return -#define arch_atomic_sub_return_release arch_atomic_sub_return -#define arch_atomic_sub_return_relaxed arch_atomic_sub_return -#else /* arch_atomic_sub_return_relaxed */ - -#ifndef arch_atomic_sub_return_acquire -static __always_inline int -arch_atomic_sub_return_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire -#endif - -#ifndef arch_atomic_sub_return_release -static __always_inline int -arch_atomic_sub_return_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_sub_return_relaxed(i, v); -} -#define arch_atomic_sub_return_release arch_atomic_sub_return_release -#endif - -#ifndef arch_atomic_sub_return -static __always_inline int -arch_atomic_sub_return(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_sub_return arch_atomic_sub_return -#endif - -#endif /* arch_atomic_sub_return_relaxed */ - -#ifndef arch_atomic_fetch_sub_relaxed -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub -#else /* arch_atomic_fetch_sub_relaxed */ - -#ifndef arch_atomic_fetch_sub_acquire -static __always_inline int -arch_atomic_fetch_sub_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire -#endif - -#ifndef arch_atomic_fetch_sub_release -static __always_inline int -arch_atomic_fetch_sub_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_sub_relaxed(i, v); -} -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release -#endif - -#ifndef arch_atomic_fetch_sub -static __always_inline int -arch_atomic_fetch_sub(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_sub arch_atomic_fetch_sub -#endif - -#endif /* arch_atomic_fetch_sub_relaxed */ - -#ifndef arch_atomic_inc -static __always_inline void -arch_atomic_inc(atomic_t *v) -{ - arch_atomic_add(1, v); -} -#define arch_atomic_inc arch_atomic_inc -#endif - -#ifndef arch_atomic_inc_return_relaxed -#ifdef arch_atomic_inc_return -#define arch_atomic_inc_return_acquire arch_atomic_inc_return -#define arch_atomic_inc_return_release arch_atomic_inc_return -#define arch_atomic_inc_return_relaxed arch_atomic_inc_return -#endif /* arch_atomic_inc_return */ - -#ifndef arch_atomic_inc_return -static __always_inline int -arch_atomic_inc_return(atomic_t *v) -{ - return arch_atomic_add_return(1, v); -} -#define arch_atomic_inc_return arch_atomic_inc_return -#endif - -#ifndef arch_atomic_inc_return_acquire -static __always_inline int -arch_atomic_inc_return_acquire(atomic_t *v) -{ - return arch_atomic_add_return_acquire(1, v); -} -#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire -#endif - -#ifndef arch_atomic_inc_return_release -static __always_inline int -arch_atomic_inc_return_release(atomic_t *v) -{ - return arch_atomic_add_return_release(1, v); -} -#define arch_atomic_inc_return_release arch_atomic_inc_return_release -#endif - -#ifndef arch_atomic_inc_return_relaxed -static __always_inline int -arch_atomic_inc_return_relaxed(atomic_t *v) -{ - return arch_atomic_add_return_relaxed(1, v); -} -#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed -#endif - -#else /* arch_atomic_inc_return_relaxed */ - -#ifndef arch_atomic_inc_return_acquire -static __always_inline int -arch_atomic_inc_return_acquire(atomic_t *v) -{ - int ret = arch_atomic_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire -#endif - -#ifndef arch_atomic_inc_return_release -static __always_inline int -arch_atomic_inc_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_inc_return_relaxed(v); -} -#define arch_atomic_inc_return_release arch_atomic_inc_return_release -#endif - -#ifndef arch_atomic_inc_return -static __always_inline int -arch_atomic_inc_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_inc_return arch_atomic_inc_return -#endif - -#endif /* arch_atomic_inc_return_relaxed */ - -#ifndef arch_atomic_fetch_inc_relaxed -#ifdef arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc -#endif /* arch_atomic_fetch_inc */ - -#ifndef arch_atomic_fetch_inc -static __always_inline int -arch_atomic_fetch_inc(atomic_t *v) -{ - return arch_atomic_fetch_add(1, v); -} -#define arch_atomic_fetch_inc arch_atomic_fetch_inc -#endif - -#ifndef arch_atomic_fetch_inc_acquire -static __always_inline int -arch_atomic_fetch_inc_acquire(atomic_t *v) -{ - return arch_atomic_fetch_add_acquire(1, v); -} -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire -#endif - -#ifndef arch_atomic_fetch_inc_release -static __always_inline int -arch_atomic_fetch_inc_release(atomic_t *v) -{ - return arch_atomic_fetch_add_release(1, v); -} -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release -#endif - -#ifndef arch_atomic_fetch_inc_relaxed -static __always_inline int -arch_atomic_fetch_inc_relaxed(atomic_t *v) -{ - return arch_atomic_fetch_add_relaxed(1, v); -} -#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed -#endif - -#else /* arch_atomic_fetch_inc_relaxed */ - -#ifndef arch_atomic_fetch_inc_acquire -static __always_inline int -arch_atomic_fetch_inc_acquire(atomic_t *v) -{ - int ret = arch_atomic_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire -#endif - -#ifndef arch_atomic_fetch_inc_release -static __always_inline int -arch_atomic_fetch_inc_release(atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_inc_relaxed(v); -} -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release -#endif - -#ifndef arch_atomic_fetch_inc -static __always_inline int -arch_atomic_fetch_inc(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_inc arch_atomic_fetch_inc -#endif - -#endif /* arch_atomic_fetch_inc_relaxed */ - -#ifndef arch_atomic_dec -static __always_inline void -arch_atomic_dec(atomic_t *v) -{ - arch_atomic_sub(1, v); -} -#define arch_atomic_dec arch_atomic_dec -#endif - -#ifndef arch_atomic_dec_return_relaxed -#ifdef arch_atomic_dec_return -#define arch_atomic_dec_return_acquire arch_atomic_dec_return -#define arch_atomic_dec_return_release arch_atomic_dec_return -#define arch_atomic_dec_return_relaxed arch_atomic_dec_return -#endif /* arch_atomic_dec_return */ - -#ifndef arch_atomic_dec_return -static __always_inline int -arch_atomic_dec_return(atomic_t *v) -{ - return arch_atomic_sub_return(1, v); -} -#define arch_atomic_dec_return arch_atomic_dec_return -#endif - -#ifndef arch_atomic_dec_return_acquire -static __always_inline int -arch_atomic_dec_return_acquire(atomic_t *v) -{ - return arch_atomic_sub_return_acquire(1, v); -} -#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire -#endif - -#ifndef arch_atomic_dec_return_release -static __always_inline int -arch_atomic_dec_return_release(atomic_t *v) -{ - return arch_atomic_sub_return_release(1, v); -} -#define arch_atomic_dec_return_release arch_atomic_dec_return_release -#endif - -#ifndef arch_atomic_dec_return_relaxed -static __always_inline int -arch_atomic_dec_return_relaxed(atomic_t *v) -{ - return arch_atomic_sub_return_relaxed(1, v); -} -#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed -#endif - -#else /* arch_atomic_dec_return_relaxed */ - -#ifndef arch_atomic_dec_return_acquire -static __always_inline int -arch_atomic_dec_return_acquire(atomic_t *v) -{ - int ret = arch_atomic_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire -#endif - -#ifndef arch_atomic_dec_return_release -static __always_inline int -arch_atomic_dec_return_release(atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_dec_return_relaxed(v); -} -#define arch_atomic_dec_return_release arch_atomic_dec_return_release -#endif - -#ifndef arch_atomic_dec_return -static __always_inline int -arch_atomic_dec_return(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_dec_return arch_atomic_dec_return -#endif - -#endif /* arch_atomic_dec_return_relaxed */ - -#ifndef arch_atomic_fetch_dec_relaxed -#ifdef arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec -#endif /* arch_atomic_fetch_dec */ - -#ifndef arch_atomic_fetch_dec -static __always_inline int -arch_atomic_fetch_dec(atomic_t *v) -{ - return arch_atomic_fetch_sub(1, v); -} -#define arch_atomic_fetch_dec arch_atomic_fetch_dec -#endif - -#ifndef arch_atomic_fetch_dec_acquire -static __always_inline int -arch_atomic_fetch_dec_acquire(atomic_t *v) -{ - return arch_atomic_fetch_sub_acquire(1, v); -} -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire -#endif - -#ifndef arch_atomic_fetch_dec_release -static __always_inline int -arch_atomic_fetch_dec_release(atomic_t *v) -{ - return arch_atomic_fetch_sub_release(1, v); -} -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release -#endif - -#ifndef arch_atomic_fetch_dec_relaxed -static __always_inline int -arch_atomic_fetch_dec_relaxed(atomic_t *v) -{ - return arch_atomic_fetch_sub_relaxed(1, v); -} -#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed -#endif - -#else /* arch_atomic_fetch_dec_relaxed */ - -#ifndef arch_atomic_fetch_dec_acquire -static __always_inline int -arch_atomic_fetch_dec_acquire(atomic_t *v) -{ - int ret = arch_atomic_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire -#endif - -#ifndef arch_atomic_fetch_dec_release -static __always_inline int -arch_atomic_fetch_dec_release(atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_dec_relaxed(v); -} -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release -#endif - -#ifndef arch_atomic_fetch_dec -static __always_inline int -arch_atomic_fetch_dec(atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_dec arch_atomic_fetch_dec -#endif - -#endif /* arch_atomic_fetch_dec_relaxed */ - -#ifndef arch_atomic_fetch_and_relaxed -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and -#define arch_atomic_fetch_and_release arch_atomic_fetch_and -#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and -#else /* arch_atomic_fetch_and_relaxed */ - -#ifndef arch_atomic_fetch_and_acquire -static __always_inline int -arch_atomic_fetch_and_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire -#endif - -#ifndef arch_atomic_fetch_and_release -static __always_inline int -arch_atomic_fetch_and_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_and_relaxed(i, v); -} -#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release -#endif - -#ifndef arch_atomic_fetch_and -static __always_inline int -arch_atomic_fetch_and(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_and arch_atomic_fetch_and -#endif - -#endif /* arch_atomic_fetch_and_relaxed */ - -#ifndef arch_atomic_andnot -static __always_inline void -arch_atomic_andnot(int i, atomic_t *v) -{ - arch_atomic_and(~i, v); -} -#define arch_atomic_andnot arch_atomic_andnot -#endif - -#ifndef arch_atomic_fetch_andnot_relaxed -#ifdef arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot -#endif /* arch_atomic_fetch_andnot */ - -#ifndef arch_atomic_fetch_andnot -static __always_inline int -arch_atomic_fetch_andnot(int i, atomic_t *v) -{ - return arch_atomic_fetch_and(~i, v); -} -#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot -#endif - -#ifndef arch_atomic_fetch_andnot_acquire -static __always_inline int -arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - return arch_atomic_fetch_and_acquire(~i, v); -} -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire -#endif - -#ifndef arch_atomic_fetch_andnot_release -static __always_inline int -arch_atomic_fetch_andnot_release(int i, atomic_t *v) -{ - return arch_atomic_fetch_and_release(~i, v); -} -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release -#endif - -#ifndef arch_atomic_fetch_andnot_relaxed -static __always_inline int -arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) -{ - return arch_atomic_fetch_and_relaxed(~i, v); -} -#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed -#endif - -#else /* arch_atomic_fetch_andnot_relaxed */ - -#ifndef arch_atomic_fetch_andnot_acquire -static __always_inline int -arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire -#endif - -#ifndef arch_atomic_fetch_andnot_release -static __always_inline int -arch_atomic_fetch_andnot_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_andnot_relaxed(i, v); -} -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release -#endif - -#ifndef arch_atomic_fetch_andnot -static __always_inline int -arch_atomic_fetch_andnot(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot -#endif - -#endif /* arch_atomic_fetch_andnot_relaxed */ - -#ifndef arch_atomic_fetch_or_relaxed -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or -#define arch_atomic_fetch_or_release arch_atomic_fetch_or -#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or -#else /* arch_atomic_fetch_or_relaxed */ - -#ifndef arch_atomic_fetch_or_acquire -static __always_inline int -arch_atomic_fetch_or_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire -#endif - -#ifndef arch_atomic_fetch_or_release -static __always_inline int -arch_atomic_fetch_or_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_or_relaxed(i, v); -} -#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release -#endif - -#ifndef arch_atomic_fetch_or -static __always_inline int -arch_atomic_fetch_or(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_or arch_atomic_fetch_or -#endif - -#endif /* arch_atomic_fetch_or_relaxed */ - -#ifndef arch_atomic_fetch_xor_relaxed -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor -#else /* arch_atomic_fetch_xor_relaxed */ - -#ifndef arch_atomic_fetch_xor_acquire -static __always_inline int -arch_atomic_fetch_xor_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire -#endif - -#ifndef arch_atomic_fetch_xor_release -static __always_inline int -arch_atomic_fetch_xor_release(int i, atomic_t *v) -{ - __atomic_release_fence(); - return arch_atomic_fetch_xor_relaxed(i, v); -} -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release -#endif - -#ifndef arch_atomic_fetch_xor -static __always_inline int -arch_atomic_fetch_xor(int i, atomic_t *v) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_xor arch_atomic_fetch_xor -#endif - -#endif /* arch_atomic_fetch_xor_relaxed */ - -#ifndef arch_atomic_xchg_relaxed -#define arch_atomic_xchg_acquire arch_atomic_xchg -#define arch_atomic_xchg_release arch_atomic_xchg -#define arch_atomic_xchg_relaxed arch_atomic_xchg -#else /* arch_atomic_xchg_relaxed */ - -#ifndef arch_atomic_xchg_acquire -static __always_inline int -arch_atomic_xchg_acquire(atomic_t *v, int i) -{ - int ret = arch_atomic_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire -#endif - -#ifndef arch_atomic_xchg_release -static __always_inline int -arch_atomic_xchg_release(atomic_t *v, int i) -{ - __atomic_release_fence(); - return arch_atomic_xchg_relaxed(v, i); -} -#define arch_atomic_xchg_release arch_atomic_xchg_release -#endif - -#ifndef arch_atomic_xchg -static __always_inline int -arch_atomic_xchg(atomic_t *v, int i) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_xchg arch_atomic_xchg -#endif - -#endif /* arch_atomic_xchg_relaxed */ - -#ifndef arch_atomic_cmpxchg_relaxed -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg -#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg -#else /* arch_atomic_cmpxchg_relaxed */ - -#ifndef arch_atomic_cmpxchg_acquire -static __always_inline int -arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) -{ - int ret = arch_atomic_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire -#endif - -#ifndef arch_atomic_cmpxchg_release -static __always_inline int -arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) -{ - __atomic_release_fence(); - return arch_atomic_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release -#endif - -#ifndef arch_atomic_cmpxchg -static __always_inline int -arch_atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_cmpxchg arch_atomic_cmpxchg -#endif - -#endif /* arch_atomic_cmpxchg_relaxed */ - -#ifndef arch_atomic_try_cmpxchg_relaxed -#ifdef arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg -#endif /* arch_atomic_try_cmpxchg */ - -#ifndef arch_atomic_try_cmpxchg -static __always_inline bool -arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = arch_atomic_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg -#endif - -#ifndef arch_atomic_try_cmpxchg_acquire -static __always_inline bool -arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = arch_atomic_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire -#endif - -#ifndef arch_atomic_try_cmpxchg_release -static __always_inline bool -arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = arch_atomic_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release -#endif - -#ifndef arch_atomic_try_cmpxchg_relaxed -static __always_inline bool -arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) -{ - int r, o = *old; - r = arch_atomic_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed -#endif - -#else /* arch_atomic_try_cmpxchg_relaxed */ - -#ifndef arch_atomic_try_cmpxchg_acquire -static __always_inline bool -arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire -#endif - -#ifndef arch_atomic_try_cmpxchg_release -static __always_inline bool -arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - __atomic_release_fence(); - return arch_atomic_try_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release -#endif - -#ifndef arch_atomic_try_cmpxchg -static __always_inline bool -arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg -#endif - -#endif /* arch_atomic_try_cmpxchg_relaxed */ - -#ifndef arch_atomic_sub_and_test -/** - * arch_atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_atomic_sub_and_test(int i, atomic_t *v) -{ - return arch_atomic_sub_return(i, v) == 0; -} -#define arch_atomic_sub_and_test arch_atomic_sub_and_test -#endif - -#ifndef arch_atomic_dec_and_test -/** - * arch_atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -arch_atomic_dec_and_test(atomic_t *v) -{ - return arch_atomic_dec_return(v) == 0; -} -#define arch_atomic_dec_and_test arch_atomic_dec_and_test -#endif - -#ifndef arch_atomic_inc_and_test -/** - * arch_atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_atomic_inc_and_test(atomic_t *v) -{ - return arch_atomic_inc_return(v) == 0; -} -#define arch_atomic_inc_and_test arch_atomic_inc_and_test -#endif - -#ifndef arch_atomic_add_negative -/** - * arch_atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -arch_atomic_add_negative(int i, atomic_t *v) -{ - return arch_atomic_add_return(i, v) < 0; -} -#define arch_atomic_add_negative arch_atomic_add_negative -#endif - -#ifndef arch_atomic_fetch_add_unless -/** - * arch_atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline int -arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - int c = arch_atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -#endif - -#ifndef arch_atomic_add_unless -/** - * arch_atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -arch_atomic_add_unless(atomic_t *v, int a, int u) -{ - return arch_atomic_fetch_add_unless(v, a, u) != u; -} -#define arch_atomic_add_unless arch_atomic_add_unless -#endif - -#ifndef arch_atomic_inc_not_zero -/** - * arch_atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -arch_atomic_inc_not_zero(atomic_t *v) -{ - return arch_atomic_add_unless(v, 1, 0); -} -#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero -#endif - -#ifndef arch_atomic_inc_unless_negative -static __always_inline bool -arch_atomic_inc_unless_negative(atomic_t *v) -{ - int c = arch_atomic_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative -#endif - -#ifndef arch_atomic_dec_unless_positive -static __always_inline bool -arch_atomic_dec_unless_positive(atomic_t *v) -{ - int c = arch_atomic_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive -#endif - -#ifndef arch_atomic_dec_if_positive -static __always_inline int -arch_atomic_dec_if_positive(atomic_t *v) -{ - int dec, c = arch_atomic_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!arch_atomic_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive -#endif - -#ifdef CONFIG_GENERIC_ATOMIC64 -#include -#endif - -#ifndef arch_atomic64_read_acquire -static __always_inline s64 -arch_atomic64_read_acquire(const atomic64_t *v) -{ - return smp_load_acquire(&(v)->counter); -} -#define arch_atomic64_read_acquire arch_atomic64_read_acquire -#endif - -#ifndef arch_atomic64_set_release -static __always_inline void -arch_atomic64_set_release(atomic64_t *v, s64 i) -{ - smp_store_release(&(v)->counter, i); -} -#define arch_atomic64_set_release arch_atomic64_set_release -#endif - -#ifndef arch_atomic64_add_return_relaxed -#define arch_atomic64_add_return_acquire arch_atomic64_add_return -#define arch_atomic64_add_return_release arch_atomic64_add_return -#define arch_atomic64_add_return_relaxed arch_atomic64_add_return -#else /* arch_atomic64_add_return_relaxed */ - -#ifndef arch_atomic64_add_return_acquire -static __always_inline s64 -arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire -#endif - -#ifndef arch_atomic64_add_return_release -static __always_inline s64 -arch_atomic64_add_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_add_return_relaxed(i, v); -} -#define arch_atomic64_add_return_release arch_atomic64_add_return_release -#endif - -#ifndef arch_atomic64_add_return -static __always_inline s64 -arch_atomic64_add_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_add_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_add_return arch_atomic64_add_return -#endif - -#endif /* arch_atomic64_add_return_relaxed */ - -#ifndef arch_atomic64_fetch_add_relaxed -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add -#else /* arch_atomic64_fetch_add_relaxed */ - -#ifndef arch_atomic64_fetch_add_acquire -static __always_inline s64 -arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_add_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire -#endif - -#ifndef arch_atomic64_fetch_add_release -static __always_inline s64 -arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_add_relaxed(i, v); -} -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release -#endif - -#ifndef arch_atomic64_fetch_add -static __always_inline s64 -arch_atomic64_fetch_add(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_add_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_add arch_atomic64_fetch_add -#endif - -#endif /* arch_atomic64_fetch_add_relaxed */ - -#ifndef arch_atomic64_sub_return_relaxed -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return -#define arch_atomic64_sub_return_release arch_atomic64_sub_return -#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return -#else /* arch_atomic64_sub_return_relaxed */ - -#ifndef arch_atomic64_sub_return_acquire -static __always_inline s64 -arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_sub_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire -#endif - -#ifndef arch_atomic64_sub_return_release -static __always_inline s64 -arch_atomic64_sub_return_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_sub_return_relaxed(i, v); -} -#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release -#endif - -#ifndef arch_atomic64_sub_return -static __always_inline s64 -arch_atomic64_sub_return(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_sub_return_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_sub_return arch_atomic64_sub_return -#endif - -#endif /* arch_atomic64_sub_return_relaxed */ - -#ifndef arch_atomic64_fetch_sub_relaxed -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub -#else /* arch_atomic64_fetch_sub_relaxed */ - -#ifndef arch_atomic64_fetch_sub_acquire -static __always_inline s64 -arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire -#endif - -#ifndef arch_atomic64_fetch_sub_release -static __always_inline s64 -arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_sub_relaxed(i, v); -} -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release -#endif - -#ifndef arch_atomic64_fetch_sub -static __always_inline s64 -arch_atomic64_fetch_sub(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_sub_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub -#endif - -#endif /* arch_atomic64_fetch_sub_relaxed */ - -#ifndef arch_atomic64_inc -static __always_inline void -arch_atomic64_inc(atomic64_t *v) -{ - arch_atomic64_add(1, v); -} -#define arch_atomic64_inc arch_atomic64_inc -#endif - -#ifndef arch_atomic64_inc_return_relaxed -#ifdef arch_atomic64_inc_return -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return -#define arch_atomic64_inc_return_release arch_atomic64_inc_return -#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return -#endif /* arch_atomic64_inc_return */ - -#ifndef arch_atomic64_inc_return -static __always_inline s64 -arch_atomic64_inc_return(atomic64_t *v) -{ - return arch_atomic64_add_return(1, v); -} -#define arch_atomic64_inc_return arch_atomic64_inc_return -#endif - -#ifndef arch_atomic64_inc_return_acquire -static __always_inline s64 -arch_atomic64_inc_return_acquire(atomic64_t *v) -{ - return arch_atomic64_add_return_acquire(1, v); -} -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire -#endif - -#ifndef arch_atomic64_inc_return_release -static __always_inline s64 -arch_atomic64_inc_return_release(atomic64_t *v) -{ - return arch_atomic64_add_return_release(1, v); -} -#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release -#endif - -#ifndef arch_atomic64_inc_return_relaxed -static __always_inline s64 -arch_atomic64_inc_return_relaxed(atomic64_t *v) -{ - return arch_atomic64_add_return_relaxed(1, v); -} -#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed -#endif - -#else /* arch_atomic64_inc_return_relaxed */ - -#ifndef arch_atomic64_inc_return_acquire -static __always_inline s64 -arch_atomic64_inc_return_acquire(atomic64_t *v) -{ - s64 ret = arch_atomic64_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire -#endif - -#ifndef arch_atomic64_inc_return_release -static __always_inline s64 -arch_atomic64_inc_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_inc_return_relaxed(v); -} -#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release -#endif - -#ifndef arch_atomic64_inc_return -static __always_inline s64 -arch_atomic64_inc_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_inc_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_inc_return arch_atomic64_inc_return -#endif - -#endif /* arch_atomic64_inc_return_relaxed */ - -#ifndef arch_atomic64_fetch_inc_relaxed -#ifdef arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc -#endif /* arch_atomic64_fetch_inc */ - -#ifndef arch_atomic64_fetch_inc -static __always_inline s64 -arch_atomic64_fetch_inc(atomic64_t *v) -{ - return arch_atomic64_fetch_add(1, v); -} -#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc -#endif - -#ifndef arch_atomic64_fetch_inc_acquire -static __always_inline s64 -arch_atomic64_fetch_inc_acquire(atomic64_t *v) -{ - return arch_atomic64_fetch_add_acquire(1, v); -} -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire -#endif - -#ifndef arch_atomic64_fetch_inc_release -static __always_inline s64 -arch_atomic64_fetch_inc_release(atomic64_t *v) -{ - return arch_atomic64_fetch_add_release(1, v); -} -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release -#endif - -#ifndef arch_atomic64_fetch_inc_relaxed -static __always_inline s64 -arch_atomic64_fetch_inc_relaxed(atomic64_t *v) -{ - return arch_atomic64_fetch_add_relaxed(1, v); -} -#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed -#endif - -#else /* arch_atomic64_fetch_inc_relaxed */ - -#ifndef arch_atomic64_fetch_inc_acquire -static __always_inline s64 -arch_atomic64_fetch_inc_acquire(atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_inc_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire -#endif - -#ifndef arch_atomic64_fetch_inc_release -static __always_inline s64 -arch_atomic64_fetch_inc_release(atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_inc_relaxed(v); -} -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release -#endif - -#ifndef arch_atomic64_fetch_inc -static __always_inline s64 -arch_atomic64_fetch_inc(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc -#endif - -#endif /* arch_atomic64_fetch_inc_relaxed */ - -#ifndef arch_atomic64_dec -static __always_inline void -arch_atomic64_dec(atomic64_t *v) -{ - arch_atomic64_sub(1, v); -} -#define arch_atomic64_dec arch_atomic64_dec -#endif - -#ifndef arch_atomic64_dec_return_relaxed -#ifdef arch_atomic64_dec_return -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return -#define arch_atomic64_dec_return_release arch_atomic64_dec_return -#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return -#endif /* arch_atomic64_dec_return */ - -#ifndef arch_atomic64_dec_return -static __always_inline s64 -arch_atomic64_dec_return(atomic64_t *v) -{ - return arch_atomic64_sub_return(1, v); -} -#define arch_atomic64_dec_return arch_atomic64_dec_return -#endif - -#ifndef arch_atomic64_dec_return_acquire -static __always_inline s64 -arch_atomic64_dec_return_acquire(atomic64_t *v) -{ - return arch_atomic64_sub_return_acquire(1, v); -} -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire -#endif - -#ifndef arch_atomic64_dec_return_release -static __always_inline s64 -arch_atomic64_dec_return_release(atomic64_t *v) -{ - return arch_atomic64_sub_return_release(1, v); -} -#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release -#endif - -#ifndef arch_atomic64_dec_return_relaxed -static __always_inline s64 -arch_atomic64_dec_return_relaxed(atomic64_t *v) -{ - return arch_atomic64_sub_return_relaxed(1, v); -} -#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed -#endif - -#else /* arch_atomic64_dec_return_relaxed */ - -#ifndef arch_atomic64_dec_return_acquire -static __always_inline s64 -arch_atomic64_dec_return_acquire(atomic64_t *v) -{ - s64 ret = arch_atomic64_dec_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire -#endif - -#ifndef arch_atomic64_dec_return_release -static __always_inline s64 -arch_atomic64_dec_return_release(atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_dec_return_relaxed(v); -} -#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release -#endif - -#ifndef arch_atomic64_dec_return -static __always_inline s64 -arch_atomic64_dec_return(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_dec_return_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_dec_return arch_atomic64_dec_return -#endif - -#endif /* arch_atomic64_dec_return_relaxed */ - -#ifndef arch_atomic64_fetch_dec_relaxed -#ifdef arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec -#endif /* arch_atomic64_fetch_dec */ - -#ifndef arch_atomic64_fetch_dec -static __always_inline s64 -arch_atomic64_fetch_dec(atomic64_t *v) -{ - return arch_atomic64_fetch_sub(1, v); -} -#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec -#endif - -#ifndef arch_atomic64_fetch_dec_acquire -static __always_inline s64 -arch_atomic64_fetch_dec_acquire(atomic64_t *v) -{ - return arch_atomic64_fetch_sub_acquire(1, v); -} -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire -#endif - -#ifndef arch_atomic64_fetch_dec_release -static __always_inline s64 -arch_atomic64_fetch_dec_release(atomic64_t *v) -{ - return arch_atomic64_fetch_sub_release(1, v); -} -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release -#endif - -#ifndef arch_atomic64_fetch_dec_relaxed -static __always_inline s64 -arch_atomic64_fetch_dec_relaxed(atomic64_t *v) -{ - return arch_atomic64_fetch_sub_relaxed(1, v); -} -#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed -#endif - -#else /* arch_atomic64_fetch_dec_relaxed */ - -#ifndef arch_atomic64_fetch_dec_acquire -static __always_inline s64 -arch_atomic64_fetch_dec_acquire(atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire -#endif - -#ifndef arch_atomic64_fetch_dec_release -static __always_inline s64 -arch_atomic64_fetch_dec_release(atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_dec_relaxed(v); -} -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release -#endif - -#ifndef arch_atomic64_fetch_dec -static __always_inline s64 -arch_atomic64_fetch_dec(atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_dec_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec -#endif - -#endif /* arch_atomic64_fetch_dec_relaxed */ - -#ifndef arch_atomic64_fetch_and_relaxed -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and -#else /* arch_atomic64_fetch_and_relaxed */ - -#ifndef arch_atomic64_fetch_and_acquire -static __always_inline s64 -arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_and_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire -#endif - -#ifndef arch_atomic64_fetch_and_release -static __always_inline s64 -arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_and_relaxed(i, v); -} -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release -#endif - -#ifndef arch_atomic64_fetch_and -static __always_inline s64 -arch_atomic64_fetch_and(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_and arch_atomic64_fetch_and -#endif - -#endif /* arch_atomic64_fetch_and_relaxed */ - -#ifndef arch_atomic64_andnot -static __always_inline void -arch_atomic64_andnot(s64 i, atomic64_t *v) -{ - arch_atomic64_and(~i, v); -} -#define arch_atomic64_andnot arch_atomic64_andnot -#endif - -#ifndef arch_atomic64_fetch_andnot_relaxed -#ifdef arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot -#endif /* arch_atomic64_fetch_andnot */ - -#ifndef arch_atomic64_fetch_andnot -static __always_inline s64 -arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and(~i, v); -} -#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot -#endif - -#ifndef arch_atomic64_fetch_andnot_acquire -static __always_inline s64 -arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and_acquire(~i, v); -} -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire -#endif - -#ifndef arch_atomic64_fetch_andnot_release -static __always_inline s64 -arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and_release(~i, v); -} -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release -#endif - -#ifndef arch_atomic64_fetch_andnot_relaxed -static __always_inline s64 -arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and_relaxed(~i, v); -} -#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed -#endif - -#else /* arch_atomic64_fetch_andnot_relaxed */ - -#ifndef arch_atomic64_fetch_andnot_acquire -static __always_inline s64 -arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire -#endif - -#ifndef arch_atomic64_fetch_andnot_release -static __always_inline s64 -arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_andnot_relaxed(i, v); -} -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release -#endif - -#ifndef arch_atomic64_fetch_andnot -static __always_inline s64 -arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_andnot_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot -#endif - -#endif /* arch_atomic64_fetch_andnot_relaxed */ - -#ifndef arch_atomic64_fetch_or_relaxed -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or -#else /* arch_atomic64_fetch_or_relaxed */ - -#ifndef arch_atomic64_fetch_or_acquire -static __always_inline s64 -arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_or_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire -#endif - -#ifndef arch_atomic64_fetch_or_release -static __always_inline s64 -arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_or_relaxed(i, v); -} -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release -#endif - -#ifndef arch_atomic64_fetch_or -static __always_inline s64 -arch_atomic64_fetch_or(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_or_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_or arch_atomic64_fetch_or -#endif - -#endif /* arch_atomic64_fetch_or_relaxed */ - -#ifndef arch_atomic64_fetch_xor_relaxed -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor -#else /* arch_atomic64_fetch_xor_relaxed */ - -#ifndef arch_atomic64_fetch_xor_acquire -static __always_inline s64 -arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire -#endif - -#ifndef arch_atomic64_fetch_xor_release -static __always_inline s64 -arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) -{ - __atomic_release_fence(); - return arch_atomic64_fetch_xor_relaxed(i, v); -} -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release -#endif - -#ifndef arch_atomic64_fetch_xor -static __always_inline s64 -arch_atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_xor_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -#endif - -#endif /* arch_atomic64_fetch_xor_relaxed */ - -#ifndef arch_atomic64_xchg_relaxed -#define arch_atomic64_xchg_acquire arch_atomic64_xchg -#define arch_atomic64_xchg_release arch_atomic64_xchg -#define arch_atomic64_xchg_relaxed arch_atomic64_xchg -#else /* arch_atomic64_xchg_relaxed */ - -#ifndef arch_atomic64_xchg_acquire -static __always_inline s64 -arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) -{ - s64 ret = arch_atomic64_xchg_relaxed(v, i); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire -#endif - -#ifndef arch_atomic64_xchg_release -static __always_inline s64 -arch_atomic64_xchg_release(atomic64_t *v, s64 i) -{ - __atomic_release_fence(); - return arch_atomic64_xchg_relaxed(v, i); -} -#define arch_atomic64_xchg_release arch_atomic64_xchg_release -#endif - -#ifndef arch_atomic64_xchg -static __always_inline s64 -arch_atomic64_xchg(atomic64_t *v, s64 i) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_xchg_relaxed(v, i); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_xchg arch_atomic64_xchg -#endif - -#endif /* arch_atomic64_xchg_relaxed */ - -#ifndef arch_atomic64_cmpxchg_relaxed -#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg -#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg -#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg -#else /* arch_atomic64_cmpxchg_relaxed */ - -#ifndef arch_atomic64_cmpxchg_acquire -static __always_inline s64 -arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) -{ - s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire -#endif - -#ifndef arch_atomic64_cmpxchg_release -static __always_inline s64 -arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) -{ - __atomic_release_fence(); - return arch_atomic64_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release -#endif - -#ifndef arch_atomic64_cmpxchg -static __always_inline s64 -arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) -{ - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg -#endif - -#endif /* arch_atomic64_cmpxchg_relaxed */ - -#ifndef arch_atomic64_try_cmpxchg_relaxed -#ifdef arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg -#endif /* arch_atomic64_try_cmpxchg */ - -#ifndef arch_atomic64_try_cmpxchg -static __always_inline bool -arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = arch_atomic64_cmpxchg(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg -#endif - -#ifndef arch_atomic64_try_cmpxchg_acquire -static __always_inline bool -arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = arch_atomic64_cmpxchg_acquire(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire -#endif - -#ifndef arch_atomic64_try_cmpxchg_release -static __always_inline bool -arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = arch_atomic64_cmpxchg_release(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release -#endif - -#ifndef arch_atomic64_try_cmpxchg_relaxed -static __always_inline bool -arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) -{ - s64 r, o = *old; - r = arch_atomic64_cmpxchg_relaxed(v, o, new); - if (unlikely(r != o)) - *old = r; - return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed -#endif - -#else /* arch_atomic64_try_cmpxchg_relaxed */ - -#ifndef arch_atomic64_try_cmpxchg_acquire -static __always_inline bool -arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire -#endif - -#ifndef arch_atomic64_try_cmpxchg_release -static __always_inline bool -arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - __atomic_release_fence(); - return arch_atomic64_try_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release -#endif - -#ifndef arch_atomic64_try_cmpxchg -static __always_inline bool -arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg -#endif - -#endif /* arch_atomic64_try_cmpxchg_relaxed */ - -#ifndef arch_atomic64_sub_and_test -/** - * arch_atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_atomic64_sub_and_test(s64 i, atomic64_t *v) -{ - return arch_atomic64_sub_return(i, v) == 0; -} -#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test -#endif - -#ifndef arch_atomic64_dec_and_test -/** - * arch_atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -arch_atomic64_dec_and_test(atomic64_t *v) -{ - return arch_atomic64_dec_return(v) == 0; -} -#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test -#endif - -#ifndef arch_atomic64_inc_and_test -/** - * arch_atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_atomic64_inc_and_test(atomic64_t *v) -{ - return arch_atomic64_inc_return(v) == 0; -} -#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test -#endif - -#ifndef arch_atomic64_add_negative -/** - * arch_atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __always_inline bool -arch_atomic64_add_negative(s64 i, atomic64_t *v) -{ - return arch_atomic64_add_return(i, v) < 0; -} -#define arch_atomic64_add_negative arch_atomic64_add_negative -#endif - -#ifndef arch_atomic64_fetch_add_unless -/** - * arch_atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline s64 -arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) -{ - s64 c = arch_atomic64_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); - - return c; -} -#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless -#endif - -#ifndef arch_atomic64_add_unless -/** - * arch_atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) -{ - return arch_atomic64_fetch_add_unless(v, a, u) != u; -} -#define arch_atomic64_add_unless arch_atomic64_add_unless -#endif - -#ifndef arch_atomic64_inc_not_zero -/** - * arch_atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -arch_atomic64_inc_not_zero(atomic64_t *v) -{ - return arch_atomic64_add_unless(v, 1, 0); -} -#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero -#endif - -#ifndef arch_atomic64_inc_unless_negative -static __always_inline bool -arch_atomic64_inc_unless_negative(atomic64_t *v) -{ - s64 c = arch_atomic64_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative -#endif - -#ifndef arch_atomic64_dec_unless_positive -static __always_inline bool -arch_atomic64_dec_unless_positive(atomic64_t *v) -{ - s64 c = arch_atomic64_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive -#endif - -#ifndef arch_atomic64_dec_if_positive -static __always_inline s64 -arch_atomic64_dec_if_positive(atomic64_t *v) -{ - s64 dec, c = arch_atomic64_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); - - return dec; -} -#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -#endif - -#endif /* _LINUX_ATOMIC_FALLBACK_H */ -// cca554917d7ea73d5e3e7397dd70c484cad9b2c4 diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ed1d3ffd5b9d..1896a58b5aba 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,9 +77,8 @@ __ret; \ }) -#include -#include - -#include +#include +#include +#include #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h new file mode 100644 index 000000000000..a3dba31df01e --- /dev/null +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -0,0 +1,2361 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#include + +#ifndef arch_xchg_relaxed +#define arch_xchg_acquire arch_xchg +#define arch_xchg_release arch_xchg +#define arch_xchg_relaxed arch_xchg +#else /* arch_xchg_relaxed */ + +#ifndef arch_xchg_acquire +#define arch_xchg_acquire(...) \ + __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg_release +#define arch_xchg_release(...) \ + __atomic_op_release(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg +#define arch_xchg(...) \ + __atomic_op_fence(arch_xchg, __VA_ARGS__) +#endif + +#endif /* arch_xchg_relaxed */ + +#ifndef arch_cmpxchg_relaxed +#define arch_cmpxchg_acquire arch_cmpxchg +#define arch_cmpxchg_release arch_cmpxchg +#define arch_cmpxchg_relaxed arch_cmpxchg +#else /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg_acquire +#define arch_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg_release +#define arch_cmpxchg_release(...) \ + __atomic_op_release(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg +#define arch_cmpxchg(...) \ + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg64_relaxed +#define arch_cmpxchg64_acquire arch_cmpxchg64 +#define arch_cmpxchg64_release arch_cmpxchg64 +#define arch_cmpxchg64_relaxed arch_cmpxchg64 +#else /* arch_cmpxchg64_relaxed */ + +#ifndef arch_cmpxchg64_acquire +#define arch_cmpxchg64_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64_release +#define arch_cmpxchg64_release(...) \ + __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64 +#define arch_cmpxchg64(...) \ + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg64_relaxed */ + +#ifndef arch_try_cmpxchg_relaxed +#ifdef arch_try_cmpxchg +#define arch_try_cmpxchg_acquire arch_try_cmpxchg +#define arch_try_cmpxchg_release arch_try_cmpxchg +#define arch_try_cmpxchg_relaxed arch_try_cmpxchg +#endif /* arch_try_cmpxchg */ + +#ifndef arch_try_cmpxchg +#define arch_try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg */ + +#ifndef arch_try_cmpxchg_acquire +#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_acquire */ + +#ifndef arch_try_cmpxchg_release +#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_release */ + +#ifndef arch_try_cmpxchg_relaxed +#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_relaxed */ + +#else /* arch_try_cmpxchg_relaxed */ + +#ifndef arch_try_cmpxchg_acquire +#define arch_try_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg_release +#define arch_try_cmpxchg_release(...) \ + __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg +#define arch_try_cmpxchg(...) \ + __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_read_acquire +static __always_inline int +arch_atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic_read_acquire arch_atomic_read_acquire +#endif + +#ifndef arch_atomic_set_release +static __always_inline void +arch_atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic_set_release arch_atomic_set_release +#endif + +#ifndef arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return +#else /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_add_return_acquire +static __always_inline int +arch_atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#endif + +#ifndef arch_atomic_add_return_release +static __always_inline int +arch_atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_add_return_relaxed(i, v); +} +#define arch_atomic_add_return_release arch_atomic_add_return_release +#endif + +#ifndef arch_atomic_add_return +static __always_inline int +arch_atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_add_return arch_atomic_add_return +#endif + +#endif /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add +#else /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_fetch_add_acquire +static __always_inline int +arch_atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#endif + +#ifndef arch_atomic_fetch_add_release +static __always_inline int +arch_atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#endif + +#ifndef arch_atomic_fetch_add +static __always_inline int +arch_atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_add arch_atomic_fetch_add +#endif + +#endif /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return +#else /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_sub_return_acquire +static __always_inline int +arch_atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#endif + +#ifndef arch_atomic_sub_return_release +static __always_inline int +arch_atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_sub_return_relaxed(i, v); +} +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#endif + +#ifndef arch_atomic_sub_return +static __always_inline int +arch_atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_sub_return arch_atomic_sub_return +#endif + +#endif /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub +#else /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_fetch_sub_acquire +static __always_inline int +arch_atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#endif + +#ifndef arch_atomic_fetch_sub_release +static __always_inline int +arch_atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#endif + +#ifndef arch_atomic_fetch_sub +static __always_inline int +arch_atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#endif + +#endif /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_inc +static __always_inline void +arch_atomic_inc(atomic_t *v) +{ + arch_atomic_add(1, v); +} +#define arch_atomic_inc arch_atomic_inc +#endif + +#ifndef arch_atomic_inc_return_relaxed +#ifdef arch_atomic_inc_return +#define arch_atomic_inc_return_acquire arch_atomic_inc_return +#define arch_atomic_inc_return_release arch_atomic_inc_return +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return +#endif /* arch_atomic_inc_return */ + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + return arch_atomic_add_return_acquire(1, v); +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + return arch_atomic_add_return_release(1, v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return_relaxed +static __always_inline int +arch_atomic_inc_return_relaxed(atomic_t *v) +{ + return arch_atomic_add_return_relaxed(1, v); +} +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed +#endif + +#else /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_inc_return_relaxed(v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#endif /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_fetch_inc_relaxed +#ifdef arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc +#endif /* arch_atomic_fetch_inc */ + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + return arch_atomic_fetch_add_acquire(1, v); +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + return arch_atomic_fetch_add_release(1, v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc_relaxed +static __always_inline int +arch_atomic_fetch_inc_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_add_relaxed(1, v); +} +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed +#endif + +#else /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_inc_relaxed(v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#endif /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_dec +static __always_inline void +arch_atomic_dec(atomic_t *v) +{ + arch_atomic_sub(1, v); +} +#define arch_atomic_dec arch_atomic_dec +#endif + +#ifndef arch_atomic_dec_return_relaxed +#ifdef arch_atomic_dec_return +#define arch_atomic_dec_return_acquire arch_atomic_dec_return +#define arch_atomic_dec_return_release arch_atomic_dec_return +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return +#endif /* arch_atomic_dec_return */ + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + return arch_atomic_sub_return_acquire(1, v); +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + return arch_atomic_sub_return_release(1, v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return_relaxed +static __always_inline int +arch_atomic_dec_return_relaxed(atomic_t *v) +{ + return arch_atomic_sub_return_relaxed(1, v); +} +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed +#endif + +#else /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_dec_return_relaxed(v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#endif /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_fetch_dec_relaxed +#ifdef arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec +#endif /* arch_atomic_fetch_dec */ + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + return arch_atomic_fetch_sub_acquire(1, v); +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + return arch_atomic_fetch_sub_release(1, v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec_relaxed +static __always_inline int +arch_atomic_fetch_dec_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_sub_relaxed(1, v); +} +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed +#endif + +#else /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_dec_relaxed(v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#endif /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and +#else /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_fetch_and_acquire +static __always_inline int +arch_atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#endif + +#ifndef arch_atomic_fetch_and_release +static __always_inline int +arch_atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_and_relaxed(i, v); +} +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#endif + +#ifndef arch_atomic_fetch_and +static __always_inline int +arch_atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_and arch_atomic_fetch_and +#endif + +#endif /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_andnot +static __always_inline void +arch_atomic_andnot(int i, atomic_t *v) +{ + arch_atomic_and(~i, v); +} +#define arch_atomic_andnot arch_atomic_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +#ifdef arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot +#endif /* arch_atomic_fetch_andnot */ + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_acquire(~i, v); +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_release(~i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +static __always_inline int +arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_relaxed(~i, v); +} +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#endif + +#else /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#endif /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or +#else /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_or_acquire +static __always_inline int +arch_atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#endif + +#ifndef arch_atomic_fetch_or_release +static __always_inline int +arch_atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_or_relaxed(i, v); +} +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#endif + +#ifndef arch_atomic_fetch_or +static __always_inline int +arch_atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_or arch_atomic_fetch_or +#endif + +#endif /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor +#else /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_fetch_xor_acquire +static __always_inline int +arch_atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#endif + +#ifndef arch_atomic_fetch_xor_release +static __always_inline int +arch_atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_xor_relaxed(i, v); +} +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#endif + +#ifndef arch_atomic_fetch_xor +static __always_inline int +arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#endif + +#endif /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg +#define arch_atomic_xchg_release arch_atomic_xchg +#define arch_atomic_xchg_relaxed arch_atomic_xchg +#else /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_xchg_acquire +static __always_inline int +arch_atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = arch_atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#endif + +#ifndef arch_atomic_xchg_release +static __always_inline int +arch_atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return arch_atomic_xchg_relaxed(v, i); +} +#define arch_atomic_xchg_release arch_atomic_xchg_release +#endif + +#ifndef arch_atomic_xchg +static __always_inline int +arch_atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_xchg arch_atomic_xchg +#endif + +#endif /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg +#else /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_acquire +static __always_inline int +arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#endif + +#ifndef arch_atomic_cmpxchg_release +static __always_inline int +arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#endif + +#ifndef arch_atomic_cmpxchg +static __always_inline int +arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg +#endif + +#endif /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_relaxed +#ifdef arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg +#endif /* arch_atomic_try_cmpxchg */ + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#endif /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_sub_and_test +/** + * arch_atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return arch_atomic_sub_return(i, v) == 0; +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test +#endif + +#ifndef arch_atomic_dec_and_test +/** + * arch_atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic_dec_and_test(atomic_t *v) +{ + return arch_atomic_dec_return(v) == 0; +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test +#endif + +#ifndef arch_atomic_inc_and_test +/** + * arch_atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_inc_and_test(atomic_t *v) +{ + return arch_atomic_inc_return(v) == 0; +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test +#endif + +#ifndef arch_atomic_add_negative +/** + * arch_atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative(int i, atomic_t *v) +{ + return arch_atomic_add_return(i, v) < 0; +} +#define arch_atomic_add_negative arch_atomic_add_negative +#endif + +#ifndef arch_atomic_fetch_add_unless +/** + * arch_atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#endif + +#ifndef arch_atomic_add_unless +/** + * arch_atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic_add_unless(atomic_t *v, int a, int u) +{ + return arch_atomic_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic_add_unless arch_atomic_add_unless +#endif + +#ifndef arch_atomic_inc_not_zero +/** + * arch_atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic_inc_not_zero(atomic_t *v) +{ + return arch_atomic_add_unless(v, 1, 0); +} +#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero +#endif + +#ifndef arch_atomic_inc_unless_negative +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#endif + +#ifndef arch_atomic_dec_unless_positive +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#endif + +#ifndef arch_atomic_dec_if_positive +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = arch_atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + +#ifndef arch_atomic64_read_acquire +static __always_inline s64 +arch_atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic64_read_acquire arch_atomic64_read_acquire +#endif + +#ifndef arch_atomic64_set_release +static __always_inline void +arch_atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic64_set_release arch_atomic64_set_release +#endif + +#ifndef arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return +#else /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_add_return_acquire +static __always_inline s64 +arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#endif + +#ifndef arch_atomic64_add_return_release +static __always_inline s64 +arch_atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_add_return_relaxed(i, v); +} +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#endif + +#ifndef arch_atomic64_add_return +static __always_inline s64 +arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_add_return arch_atomic64_add_return +#endif + +#endif /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add +#else /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_fetch_add_acquire +static __always_inline s64 +arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#endif + +#ifndef arch_atomic64_fetch_add_release +static __always_inline s64 +arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#endif + +#ifndef arch_atomic64_fetch_add +static __always_inline s64 +arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#endif + +#endif /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return +#else /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_sub_return_acquire +static __always_inline s64 +arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#endif + +#ifndef arch_atomic64_sub_return_release +static __always_inline s64 +arch_atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#endif + +#ifndef arch_atomic64_sub_return +static __always_inline s64 +arch_atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_sub_return arch_atomic64_sub_return +#endif + +#endif /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub +#else /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_fetch_sub_acquire +static __always_inline s64 +arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#endif + +#ifndef arch_atomic64_fetch_sub_release +static __always_inline s64 +arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#endif + +#ifndef arch_atomic64_fetch_sub +static __always_inline s64 +arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#endif + +#endif /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_inc +static __always_inline void +arch_atomic64_inc(atomic64_t *v) +{ + arch_atomic64_add(1, v); +} +#define arch_atomic64_inc arch_atomic64_inc +#endif + +#ifndef arch_atomic64_inc_return_relaxed +#ifdef arch_atomic64_inc_return +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return +#define arch_atomic64_inc_return_release arch_atomic64_inc_return +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return +#endif /* arch_atomic64_inc_return */ + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + return arch_atomic64_add_return_acquire(1, v); +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + return arch_atomic64_add_return_release(1, v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return_relaxed +static __always_inline s64 +arch_atomic64_inc_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_add_return_relaxed(1, v); +} +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed +#endif + +#else /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_inc_return_relaxed(v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#endif /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_fetch_inc_relaxed +#ifdef arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc +#endif /* arch_atomic64_fetch_inc */ + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_add_acquire(1, v); +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + return arch_atomic64_fetch_add_release(1, v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc_relaxed +static __always_inline s64 +arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_add_relaxed(1, v); +} +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed +#endif + +#else /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#endif /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_dec +static __always_inline void +arch_atomic64_dec(atomic64_t *v) +{ + arch_atomic64_sub(1, v); +} +#define arch_atomic64_dec arch_atomic64_dec +#endif + +#ifndef arch_atomic64_dec_return_relaxed +#ifdef arch_atomic64_dec_return +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return +#define arch_atomic64_dec_return_release arch_atomic64_dec_return +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return +#endif /* arch_atomic64_dec_return */ + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + return arch_atomic64_sub_return_acquire(1, v); +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + return arch_atomic64_sub_return_release(1, v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return_relaxed +static __always_inline s64 +arch_atomic64_dec_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_sub_return_relaxed(1, v); +} +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed +#endif + +#else /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_dec_return_relaxed(v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#endif /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_fetch_dec_relaxed +#ifdef arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec +#endif /* arch_atomic64_fetch_dec */ + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_acquire(1, v); +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_release(1, v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec_relaxed +static __always_inline s64 +arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(1, v); +} +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed +#endif + +#else /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#endif /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and +#else /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_fetch_and_acquire +static __always_inline s64 +arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#endif + +#ifndef arch_atomic64_fetch_and_release +static __always_inline s64 +arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#endif + +#ifndef arch_atomic64_fetch_and +static __always_inline s64 +arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#endif + +#endif /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_andnot +static __always_inline void +arch_atomic64_andnot(s64 i, atomic64_t *v) +{ + arch_atomic64_and(~i, v); +} +#define arch_atomic64_andnot arch_atomic64_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +#ifdef arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot +#endif /* arch_atomic64_fetch_andnot */ + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_acquire(~i, v); +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_release(~i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +static __always_inline s64 +arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_relaxed(~i, v); +} +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#endif + +#else /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#endif /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or +#else /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_or_acquire +static __always_inline s64 +arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#endif + +#ifndef arch_atomic64_fetch_or_release +static __always_inline s64 +arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#endif + +#ifndef arch_atomic64_fetch_or +static __always_inline s64 +arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#endif + +#endif /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor +#else /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_fetch_xor_acquire +static __always_inline s64 +arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#endif + +#ifndef arch_atomic64_fetch_xor_release +static __always_inline s64 +arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#endif + +#ifndef arch_atomic64_fetch_xor +static __always_inline s64 +arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#endif + +#endif /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic64_xchg +#define arch_atomic64_xchg_release arch_atomic64_xchg +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg +#else /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_xchg_acquire +static __always_inline s64 +arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire +#endif + +#ifndef arch_atomic64_xchg_release +static __always_inline s64 +arch_atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return arch_atomic64_xchg_relaxed(v, i); +} +#define arch_atomic64_xchg_release arch_atomic64_xchg_release +#endif + +#ifndef arch_atomic64_xchg +static __always_inline s64 +arch_atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_xchg arch_atomic64_xchg +#endif + +#endif /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg +#else /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_acquire +static __always_inline s64 +arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_cmpxchg_release +static __always_inline s64 +arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release +#endif + +#ifndef arch_atomic64_cmpxchg +static __always_inline s64 +arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +#endif + +#endif /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_relaxed +#ifdef arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg +#endif /* arch_atomic64_try_cmpxchg */ + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#endif /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_sub_and_test +/** + * arch_atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return arch_atomic64_sub_return(i, v) == 0; +} +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test +#endif + +#ifndef arch_atomic64_dec_and_test +/** + * arch_atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic64_dec_and_test(atomic64_t *v) +{ + return arch_atomic64_dec_return(v) == 0; +} +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test +#endif + +#ifndef arch_atomic64_inc_and_test +/** + * arch_atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_inc_and_test(atomic64_t *v) +{ + return arch_atomic64_inc_return(v) == 0; +} +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test +#endif + +#ifndef arch_atomic64_add_negative +/** + * arch_atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return(i, v) < 0; +} +#define arch_atomic64_add_negative arch_atomic64_add_negative +#endif + +#ifndef arch_atomic64_fetch_add_unless +/** + * arch_atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline s64 +arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#endif + +#ifndef arch_atomic64_add_unless +/** + * arch_atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return arch_atomic64_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic64_add_unless arch_atomic64_add_unless +#endif + +#ifndef arch_atomic64_inc_not_zero +/** + * arch_atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic64_inc_not_zero(atomic64_t *v) +{ + return arch_atomic64_add_unless(v, 1, 0); +} +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero +#endif + +#ifndef arch_atomic64_inc_unless_negative +static __always_inline bool +arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative +#endif + +#ifndef arch_atomic64_dec_unless_positive +static __always_inline bool +arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive +#endif + +#ifndef arch_atomic64_dec_if_positive +static __always_inline s64 +arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = arch_atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive +#endif + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// cca554917d7ea73d5e3e7397dd70c484cad9b2c4 diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h new file mode 100644 index 000000000000..f6fe36c428df --- /dev/null +++ b/include/linux/atomic/atomic-instrumented.h @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-instrumented.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +/* + * This file provides wrappers with KASAN instrumentation for atomic operations. + * To use this functionality an arch's atomic.h file needs to define all + * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include + * this file at the end. This file provides atomic_read() that forwards to + * arch_atomic_read() for actual atomic operation. + * Note: if an arch atomic operation is implemented by means of other atomic + * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use + * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid + * double instrumentation. + */ +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H + +#include +#include +#include + +static __always_inline int +atomic_read(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read(v); +} + +static __always_inline int +atomic_read_acquire(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read_acquire(v); +} + +static __always_inline void +atomic_set(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} + +static __always_inline void +atomic_set_release(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set_release(v, i); +} + +static __always_inline void +atomic_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} + +static __always_inline int +atomic_add_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + +static __always_inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline int +atomic_add_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_release(i, v); +} + +static __always_inline int +atomic_add_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + +static __always_inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} + +static __always_inline int +atomic_sub_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + +static __always_inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_inc(v); +} + +static __always_inline int +atomic_inc_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} + +static __always_inline int +atomic_inc_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline int +atomic_inc_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} + +static __always_inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} + +static __always_inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline int +atomic_fetch_inc_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_dec(v); +} + +static __always_inline int +atomic_dec_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} + +static __always_inline int +atomic_dec_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline int +atomic_dec_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_release(v); +} + +static __always_inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec(v); +} + +static __always_inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline int +atomic_fetch_dec_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} + +static __always_inline int +atomic_fetch_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + +static __always_inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} + +static __always_inline int +atomic_fetch_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + +static __always_inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline int +atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline int +atomic_xchg(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + +static __always_inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline int +atomic_xchg_release(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_release(v, i); +} + +static __always_inline int +atomic_xchg_relaxed(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +atomic_dec_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +atomic_inc_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +atomic_add_negative(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} + +static __always_inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +atomic_inc_not_zero(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline int +atomic_dec_if_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} + +static __always_inline s64 +atomic64_read(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} + +static __always_inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +atomic64_set(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} + +static __always_inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); +} + +static __always_inline void +atomic64_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} + +static __always_inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + +static __always_inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic64_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} + +static __always_inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + +static __always_inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic64_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} + +static __always_inline s64 +atomic64_inc_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} + +static __always_inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} + +static __always_inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} + +static __always_inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic64_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} + +static __always_inline s64 +atomic64_dec_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} + +static __always_inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} + +static __always_inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} + +static __always_inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic64_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic64_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic64_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + +static __always_inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} + +#define xchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define try_cmpxchg(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_acquire(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_release(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_relaxed(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define cmpxchg_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define sync_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_double(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ +}) + + +#define cmpxchg_double_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ +}) + +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +// 5edd72f105b6f54b7e9492d794abee88e6912d29 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h new file mode 100644 index 000000000000..e40e480e175f --- /dev/null +++ b/include/linux/atomic/atomic-long.h @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_LONG_H +#define _LINUX_ATOMIC_LONG_H + +#include +#include + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic64_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic64_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic64_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic64_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic64_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic64_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic64_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic64_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic64_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic64_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic64_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic64_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic64_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic64_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic64_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic64_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic64_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic64_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic64_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic64_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic64_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic64_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic64_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic64_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic64_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic64_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic64_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic64_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic64_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic64_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic64_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic64_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic64_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic64_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic64_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic64_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic64_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic64_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic64_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic64_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic64_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic64_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic64_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic64_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg(v, (int *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _LINUX_ATOMIC_LONG_H */ +// c5552b5d78a0c7584dfd03cba985e78a1a86bbed diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh index 9c7fbd4bcbce..0e7bab3eb0d1 100755 --- a/scripts/atomic/check-atomics.sh +++ b/scripts/atomic/check-atomics.sh @@ -14,9 +14,9 @@ if [ $? -ne 0 ]; then fi cat < #include @@ -158,5 +158,5 @@ gen_xchg "cmpxchg_double_local" "2 * " cat < #include @@ -98,5 +98,5 @@ done cat < ${LINUXDIR}/include/${header} -- cgit v1.2.3 From 67d1b0de258ad066e1fc85d0ceaa75e107fb45bb Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Jul 2021 11:52:52 +0100 Subject: locking/atomic: add arch_atomic_long*() Now that all architectures provide arch_{atomic,atomic64}_*(), we can build arch_atomic_long_*() atop these, which can be safely used in noinstr code. The regular atomic_long_*() wrappers are built atop these, as we do for {atomic,atomic64}_*() atop arch_{atomic,atomic64}_*(). We don't provide arch_* versions of the cond_read*() variants, as we don't have arch_* versions of the underlying atomic/atomic64 functions (nor the smp_cond_load*() helpers these are typically based on). Note that the headers in this patch under include/linux/atomic/ are generated by the scripts in scripts/atomic/. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210713105253.7615-5-mark.rutland@arm.com --- include/linux/atomic.h | 2 +- include/linux/atomic/atomic-instrumented.h | 580 ++++++++++++++++++++++++- include/linux/atomic/atomic-long.h | 658 ++++++++++++++--------------- scripts/atomic/gen-atomic-instrumented.sh | 5 + scripts/atomic/gen-atomic-long.sh | 4 +- 5 files changed, 916 insertions(+), 333 deletions(-) (limited to 'scripts') diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 1896a58b5aba..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -78,7 +78,7 @@ }) #include -#include #include +#include #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index f6fe36c428df..a0f654370da3 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -1177,6 +1177,584 @@ atomic64_dec_if_positive(atomic64_t *v) return arch_atomic64_dec_if_positive(v); } +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_if_positive(v); +} + #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1334,4 +1912,4 @@ atomic64_dec_if_positive(atomic64_t *v) }) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 5edd72f105b6f54b7e9492d794abee88e6912d29 +// 2a9553f0a9d5619f19151092df5cabbbf16ce835 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index e40e480e175f..800b8c35992d 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -24,991 +24,991 @@ typedef atomic_t atomic_long_t; #ifdef CONFIG_64BIT static __always_inline long -atomic_long_read(const atomic_long_t *v) +arch_atomic_long_read(const atomic_long_t *v) { - return atomic64_read(v); + return arch_atomic64_read(v); } static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) +arch_atomic_long_read_acquire(const atomic_long_t *v) { - return atomic64_read_acquire(v); + return arch_atomic64_read_acquire(v); } static __always_inline void -atomic_long_set(atomic_long_t *v, long i) +arch_atomic_long_set(atomic_long_t *v, long i) { - atomic64_set(v, i); + arch_atomic64_set(v, i); } static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) +arch_atomic_long_set_release(atomic_long_t *v, long i) { - atomic64_set_release(v, i); + arch_atomic64_set_release(v, i); } static __always_inline void -atomic_long_add(long i, atomic_long_t *v) +arch_atomic_long_add(long i, atomic_long_t *v) { - atomic64_add(i, v); + arch_atomic64_add(i, v); } static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) +arch_atomic_long_add_return(long i, atomic_long_t *v) { - return atomic64_add_return(i, v); + return arch_atomic64_add_return(i, v); } static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) { - return atomic64_add_return_acquire(i, v); + return arch_atomic64_add_return_acquire(i, v); } static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) +arch_atomic_long_add_return_release(long i, atomic_long_t *v) { - return atomic64_add_return_release(i, v); + return arch_atomic64_add_return_release(i, v); } static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) { - return atomic64_add_return_relaxed(i, v); + return arch_atomic64_add_return_relaxed(i, v); } static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) +arch_atomic_long_fetch_add(long i, atomic_long_t *v) { - return atomic64_fetch_add(i, v); + return arch_atomic64_fetch_add(i, v); } static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_add_acquire(i, v); + return arch_atomic64_fetch_add_acquire(i, v); } static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) { - return atomic64_fetch_add_release(i, v); + return arch_atomic64_fetch_add_release(i, v); } static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_add_relaxed(i, v); + return arch_atomic64_fetch_add_relaxed(i, v); } static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) +arch_atomic_long_sub(long i, atomic_long_t *v) { - atomic64_sub(i, v); + arch_atomic64_sub(i, v); } static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) +arch_atomic_long_sub_return(long i, atomic_long_t *v) { - return atomic64_sub_return(i, v); + return arch_atomic64_sub_return(i, v); } static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) { - return atomic64_sub_return_acquire(i, v); + return arch_atomic64_sub_return_acquire(i, v); } static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) { - return atomic64_sub_return_release(i, v); + return arch_atomic64_sub_return_release(i, v); } static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { - return atomic64_sub_return_relaxed(i, v); + return arch_atomic64_sub_return_relaxed(i, v); } static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) { - return atomic64_fetch_sub(i, v); + return arch_atomic64_fetch_sub(i, v); } static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_sub_acquire(i, v); + return arch_atomic64_fetch_sub_acquire(i, v); } static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) { - return atomic64_fetch_sub_release(i, v); + return arch_atomic64_fetch_sub_release(i, v); } static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_sub_relaxed(i, v); + return arch_atomic64_fetch_sub_relaxed(i, v); } static __always_inline void -atomic_long_inc(atomic_long_t *v) +arch_atomic_long_inc(atomic_long_t *v) { - atomic64_inc(v); + arch_atomic64_inc(v); } static __always_inline long -atomic_long_inc_return(atomic_long_t *v) +arch_atomic_long_inc_return(atomic_long_t *v) { - return atomic64_inc_return(v); + return arch_atomic64_inc_return(v); } static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) +arch_atomic_long_inc_return_acquire(atomic_long_t *v) { - return atomic64_inc_return_acquire(v); + return arch_atomic64_inc_return_acquire(v); } static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) +arch_atomic_long_inc_return_release(atomic_long_t *v) { - return atomic64_inc_return_release(v); + return arch_atomic64_inc_return_release(v); } static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) { - return atomic64_inc_return_relaxed(v); + return arch_atomic64_inc_return_relaxed(v); } static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) +arch_atomic_long_fetch_inc(atomic_long_t *v) { - return atomic64_fetch_inc(v); + return arch_atomic64_fetch_inc(v); } static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) { - return atomic64_fetch_inc_acquire(v); + return arch_atomic64_fetch_inc_acquire(v); } static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) +arch_atomic_long_fetch_inc_release(atomic_long_t *v) { - return atomic64_fetch_inc_release(v); + return arch_atomic64_fetch_inc_release(v); } static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) { - return atomic64_fetch_inc_relaxed(v); + return arch_atomic64_fetch_inc_relaxed(v); } static __always_inline void -atomic_long_dec(atomic_long_t *v) +arch_atomic_long_dec(atomic_long_t *v) { - atomic64_dec(v); + arch_atomic64_dec(v); } static __always_inline long -atomic_long_dec_return(atomic_long_t *v) +arch_atomic_long_dec_return(atomic_long_t *v) { - return atomic64_dec_return(v); + return arch_atomic64_dec_return(v); } static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) +arch_atomic_long_dec_return_acquire(atomic_long_t *v) { - return atomic64_dec_return_acquire(v); + return arch_atomic64_dec_return_acquire(v); } static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) +arch_atomic_long_dec_return_release(atomic_long_t *v) { - return atomic64_dec_return_release(v); + return arch_atomic64_dec_return_release(v); } static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) { - return atomic64_dec_return_relaxed(v); + return arch_atomic64_dec_return_relaxed(v); } static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) +arch_atomic_long_fetch_dec(atomic_long_t *v) { - return atomic64_fetch_dec(v); + return arch_atomic64_fetch_dec(v); } static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) { - return atomic64_fetch_dec_acquire(v); + return arch_atomic64_fetch_dec_acquire(v); } static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) +arch_atomic_long_fetch_dec_release(atomic_long_t *v) { - return atomic64_fetch_dec_release(v); + return arch_atomic64_fetch_dec_release(v); } static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) { - return atomic64_fetch_dec_relaxed(v); + return arch_atomic64_fetch_dec_relaxed(v); } static __always_inline void -atomic_long_and(long i, atomic_long_t *v) +arch_atomic_long_and(long i, atomic_long_t *v) { - atomic64_and(i, v); + arch_atomic64_and(i, v); } static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) +arch_atomic_long_fetch_and(long i, atomic_long_t *v) { - return atomic64_fetch_and(i, v); + return arch_atomic64_fetch_and(i, v); } static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_and_acquire(i, v); + return arch_atomic64_fetch_and_acquire(i, v); } static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) { - return atomic64_fetch_and_release(i, v); + return arch_atomic64_fetch_and_release(i, v); } static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_and_relaxed(i, v); + return arch_atomic64_fetch_and_relaxed(i, v); } static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) +arch_atomic_long_andnot(long i, atomic_long_t *v) { - atomic64_andnot(i, v); + arch_atomic64_andnot(i, v); } static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) { - return atomic64_fetch_andnot(i, v); + return arch_atomic64_fetch_andnot(i, v); } static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_andnot_acquire(i, v); + return arch_atomic64_fetch_andnot_acquire(i, v); } static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { - return atomic64_fetch_andnot_release(i, v); + return arch_atomic64_fetch_andnot_release(i, v); } static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_andnot_relaxed(i, v); + return arch_atomic64_fetch_andnot_relaxed(i, v); } static __always_inline void -atomic_long_or(long i, atomic_long_t *v) +arch_atomic_long_or(long i, atomic_long_t *v) { - atomic64_or(i, v); + arch_atomic64_or(i, v); } static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) +arch_atomic_long_fetch_or(long i, atomic_long_t *v) { - return atomic64_fetch_or(i, v); + return arch_atomic64_fetch_or(i, v); } static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_or_acquire(i, v); + return arch_atomic64_fetch_or_acquire(i, v); } static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) { - return atomic64_fetch_or_release(i, v); + return arch_atomic64_fetch_or_release(i, v); } static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_or_relaxed(i, v); + return arch_atomic64_fetch_or_relaxed(i, v); } static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) +arch_atomic_long_xor(long i, atomic_long_t *v) { - atomic64_xor(i, v); + arch_atomic64_xor(i, v); } static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) { - return atomic64_fetch_xor(i, v); + return arch_atomic64_fetch_xor(i, v); } static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { - return atomic64_fetch_xor_acquire(i, v); + return arch_atomic64_fetch_xor_acquire(i, v); } static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) { - return atomic64_fetch_xor_release(i, v); + return arch_atomic64_fetch_xor_release(i, v); } static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { - return atomic64_fetch_xor_relaxed(i, v); + return arch_atomic64_fetch_xor_relaxed(i, v); } static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) +arch_atomic_long_xchg(atomic_long_t *v, long i) { - return atomic64_xchg(v, i); + return arch_atomic64_xchg(v, i); } static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) { - return atomic64_xchg_acquire(v, i); + return arch_atomic64_xchg_acquire(v, i); } static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) +arch_atomic_long_xchg_release(atomic_long_t *v, long i) { - return atomic64_xchg_release(v, i); + return arch_atomic64_xchg_release(v, i); } static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) { - return atomic64_xchg_relaxed(v, i); + return arch_atomic64_xchg_relaxed(v, i); } static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { - return atomic64_cmpxchg(v, old, new); + return arch_atomic64_cmpxchg(v, old, new); } static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { - return atomic64_cmpxchg_acquire(v, old, new); + return arch_atomic64_cmpxchg_acquire(v, old, new); } static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { - return atomic64_cmpxchg_release(v, old, new); + return arch_atomic64_cmpxchg_release(v, old, new); } static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { - return atomic64_cmpxchg_relaxed(v, old, new); + return arch_atomic64_cmpxchg_relaxed(v, old, new); } static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { - return atomic64_try_cmpxchg(v, (s64 *)old, new); + return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { - return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); + return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { - return atomic64_try_cmpxchg_release(v, (s64 *)old, new); + return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { - return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); + return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); } static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) { - return atomic64_sub_and_test(i, v); + return arch_atomic64_sub_and_test(i, v); } static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) +arch_atomic_long_dec_and_test(atomic_long_t *v) { - return atomic64_dec_and_test(v); + return arch_atomic64_dec_and_test(v); } static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) +arch_atomic_long_inc_and_test(atomic_long_t *v) { - return atomic64_inc_and_test(v); + return arch_atomic64_inc_and_test(v); } static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) +arch_atomic_long_add_negative(long i, atomic_long_t *v) { - return atomic64_add_negative(i, v); + return arch_atomic64_add_negative(i, v); } static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { - return atomic64_fetch_add_unless(v, a, u); + return arch_atomic64_fetch_add_unless(v, a, u); } static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) { - return atomic64_add_unless(v, a, u); + return arch_atomic64_add_unless(v, a, u); } static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) +arch_atomic_long_inc_not_zero(atomic_long_t *v) { - return atomic64_inc_not_zero(v); + return arch_atomic64_inc_not_zero(v); } static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) +arch_atomic_long_inc_unless_negative(atomic_long_t *v) { - return atomic64_inc_unless_negative(v); + return arch_atomic64_inc_unless_negative(v); } static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) +arch_atomic_long_dec_unless_positive(atomic_long_t *v) { - return atomic64_dec_unless_positive(v); + return arch_atomic64_dec_unless_positive(v); } static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) +arch_atomic_long_dec_if_positive(atomic_long_t *v) { - return atomic64_dec_if_positive(v); + return arch_atomic64_dec_if_positive(v); } #else /* CONFIG_64BIT */ static __always_inline long -atomic_long_read(const atomic_long_t *v) +arch_atomic_long_read(const atomic_long_t *v) { - return atomic_read(v); + return arch_atomic_read(v); } static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) +arch_atomic_long_read_acquire(const atomic_long_t *v) { - return atomic_read_acquire(v); + return arch_atomic_read_acquire(v); } static __always_inline void -atomic_long_set(atomic_long_t *v, long i) +arch_atomic_long_set(atomic_long_t *v, long i) { - atomic_set(v, i); + arch_atomic_set(v, i); } static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) +arch_atomic_long_set_release(atomic_long_t *v, long i) { - atomic_set_release(v, i); + arch_atomic_set_release(v, i); } static __always_inline void -atomic_long_add(long i, atomic_long_t *v) +arch_atomic_long_add(long i, atomic_long_t *v) { - atomic_add(i, v); + arch_atomic_add(i, v); } static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) +arch_atomic_long_add_return(long i, atomic_long_t *v) { - return atomic_add_return(i, v); + return arch_atomic_add_return(i, v); } static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) { - return atomic_add_return_acquire(i, v); + return arch_atomic_add_return_acquire(i, v); } static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) +arch_atomic_long_add_return_release(long i, atomic_long_t *v) { - return atomic_add_return_release(i, v); + return arch_atomic_add_return_release(i, v); } static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) { - return atomic_add_return_relaxed(i, v); + return arch_atomic_add_return_relaxed(i, v); } static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) +arch_atomic_long_fetch_add(long i, atomic_long_t *v) { - return atomic_fetch_add(i, v); + return arch_atomic_fetch_add(i, v); } static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { - return atomic_fetch_add_acquire(i, v); + return arch_atomic_fetch_add_acquire(i, v); } static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) { - return atomic_fetch_add_release(i, v); + return arch_atomic_fetch_add_release(i, v); } static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_add_relaxed(i, v); + return arch_atomic_fetch_add_relaxed(i, v); } static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) +arch_atomic_long_sub(long i, atomic_long_t *v) { - atomic_sub(i, v); + arch_atomic_sub(i, v); } static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) +arch_atomic_long_sub_return(long i, atomic_long_t *v) { - return atomic_sub_return(i, v); + return arch_atomic_sub_return(i, v); } static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) { - return atomic_sub_return_acquire(i, v); + return arch_atomic_sub_return_acquire(i, v); } static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) { - return atomic_sub_return_release(i, v); + return arch_atomic_sub_return_release(i, v); } static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { - return atomic_sub_return_relaxed(i, v); + return arch_atomic_sub_return_relaxed(i, v); } static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) { - return atomic_fetch_sub(i, v); + return arch_atomic_fetch_sub(i, v); } static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { - return atomic_fetch_sub_acquire(i, v); + return arch_atomic_fetch_sub_acquire(i, v); } static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) { - return atomic_fetch_sub_release(i, v); + return arch_atomic_fetch_sub_release(i, v); } static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_sub_relaxed(i, v); + return arch_atomic_fetch_sub_relaxed(i, v); } static __always_inline void -atomic_long_inc(atomic_long_t *v) +arch_atomic_long_inc(atomic_long_t *v) { - atomic_inc(v); + arch_atomic_inc(v); } static __always_inline long -atomic_long_inc_return(atomic_long_t *v) +arch_atomic_long_inc_return(atomic_long_t *v) { - return atomic_inc_return(v); + return arch_atomic_inc_return(v); } static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) +arch_atomic_long_inc_return_acquire(atomic_long_t *v) { - return atomic_inc_return_acquire(v); + return arch_atomic_inc_return_acquire(v); } static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) +arch_atomic_long_inc_return_release(atomic_long_t *v) { - return atomic_inc_return_release(v); + return arch_atomic_inc_return_release(v); } static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) { - return atomic_inc_return_relaxed(v); + return arch_atomic_inc_return_relaxed(v); } static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) +arch_atomic_long_fetch_inc(atomic_long_t *v) { - return atomic_fetch_inc(v); + return arch_atomic_fetch_inc(v); } static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) { - return atomic_fetch_inc_acquire(v); + return arch_atomic_fetch_inc_acquire(v); } static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) +arch_atomic_long_fetch_inc_release(atomic_long_t *v) { - return atomic_fetch_inc_release(v); + return arch_atomic_fetch_inc_release(v); } static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) { - return atomic_fetch_inc_relaxed(v); + return arch_atomic_fetch_inc_relaxed(v); } static __always_inline void -atomic_long_dec(atomic_long_t *v) +arch_atomic_long_dec(atomic_long_t *v) { - atomic_dec(v); + arch_atomic_dec(v); } static __always_inline long -atomic_long_dec_return(atomic_long_t *v) +arch_atomic_long_dec_return(atomic_long_t *v) { - return atomic_dec_return(v); + return arch_atomic_dec_return(v); } static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) +arch_atomic_long_dec_return_acquire(atomic_long_t *v) { - return atomic_dec_return_acquire(v); + return arch_atomic_dec_return_acquire(v); } static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) +arch_atomic_long_dec_return_release(atomic_long_t *v) { - return atomic_dec_return_release(v); + return arch_atomic_dec_return_release(v); } static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) { - return atomic_dec_return_relaxed(v); + return arch_atomic_dec_return_relaxed(v); } static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) +arch_atomic_long_fetch_dec(atomic_long_t *v) { - return atomic_fetch_dec(v); + return arch_atomic_fetch_dec(v); } static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) { - return atomic_fetch_dec_acquire(v); + return arch_atomic_fetch_dec_acquire(v); } static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) +arch_atomic_long_fetch_dec_release(atomic_long_t *v) { - return atomic_fetch_dec_release(v); + return arch_atomic_fetch_dec_release(v); } static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) { - return atomic_fetch_dec_relaxed(v); + return arch_atomic_fetch_dec_relaxed(v); } static __always_inline void -atomic_long_and(long i, atomic_long_t *v) +arch_atomic_long_and(long i, atomic_long_t *v) { - atomic_and(i, v); + arch_atomic_and(i, v); } static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) +arch_atomic_long_fetch_and(long i, atomic_long_t *v) { - return atomic_fetch_and(i, v); + return arch_atomic_fetch_and(i, v); } static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { - return atomic_fetch_and_acquire(i, v); + return arch_atomic_fetch_and_acquire(i, v); } static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) { - return atomic_fetch_and_release(i, v); + return arch_atomic_fetch_and_release(i, v); } static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_and_relaxed(i, v); + return arch_atomic_fetch_and_relaxed(i, v); } static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) +arch_atomic_long_andnot(long i, atomic_long_t *v) { - atomic_andnot(i, v); + arch_atomic_andnot(i, v); } static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) { - return atomic_fetch_andnot(i, v); + return arch_atomic_fetch_andnot(i, v); } static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { - return atomic_fetch_andnot_acquire(i, v); + return arch_atomic_fetch_andnot_acquire(i, v); } static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { - return atomic_fetch_andnot_release(i, v); + return arch_atomic_fetch_andnot_release(i, v); } static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_andnot_relaxed(i, v); + return arch_atomic_fetch_andnot_relaxed(i, v); } static __always_inline void -atomic_long_or(long i, atomic_long_t *v) +arch_atomic_long_or(long i, atomic_long_t *v) { - atomic_or(i, v); + arch_atomic_or(i, v); } static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) +arch_atomic_long_fetch_or(long i, atomic_long_t *v) { - return atomic_fetch_or(i, v); + return arch_atomic_fetch_or(i, v); } static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { - return atomic_fetch_or_acquire(i, v); + return arch_atomic_fetch_or_acquire(i, v); } static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) { - return atomic_fetch_or_release(i, v); + return arch_atomic_fetch_or_release(i, v); } static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_or_relaxed(i, v); + return arch_atomic_fetch_or_relaxed(i, v); } static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) +arch_atomic_long_xor(long i, atomic_long_t *v) { - atomic_xor(i, v); + arch_atomic_xor(i, v); } static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) { - return atomic_fetch_xor(i, v); + return arch_atomic_fetch_xor(i, v); } static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { - return atomic_fetch_xor_acquire(i, v); + return arch_atomic_fetch_xor_acquire(i, v); } static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) { - return atomic_fetch_xor_release(i, v); + return arch_atomic_fetch_xor_release(i, v); } static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { - return atomic_fetch_xor_relaxed(i, v); + return arch_atomic_fetch_xor_relaxed(i, v); } static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) +arch_atomic_long_xchg(atomic_long_t *v, long i) { - return atomic_xchg(v, i); + return arch_atomic_xchg(v, i); } static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) { - return atomic_xchg_acquire(v, i); + return arch_atomic_xchg_acquire(v, i); } static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) +arch_atomic_long_xchg_release(atomic_long_t *v, long i) { - return atomic_xchg_release(v, i); + return arch_atomic_xchg_release(v, i); } static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) { - return atomic_xchg_relaxed(v, i); + return arch_atomic_xchg_relaxed(v, i); } static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { - return atomic_cmpxchg(v, old, new); + return arch_atomic_cmpxchg(v, old, new); } static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { - return atomic_cmpxchg_acquire(v, old, new); + return arch_atomic_cmpxchg_acquire(v, old, new); } static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { - return atomic_cmpxchg_release(v, old, new); + return arch_atomic_cmpxchg_release(v, old, new); } static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { - return atomic_cmpxchg_relaxed(v, old, new); + return arch_atomic_cmpxchg_relaxed(v, old, new); } static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { - return atomic_try_cmpxchg(v, (int *)old, new); + return arch_atomic_try_cmpxchg(v, (int *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { - return atomic_try_cmpxchg_acquire(v, (int *)old, new); + return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { - return atomic_try_cmpxchg_release(v, (int *)old, new); + return arch_atomic_try_cmpxchg_release(v, (int *)old, new); } static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { - return atomic_try_cmpxchg_relaxed(v, (int *)old, new); + return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); } static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) { - return atomic_sub_and_test(i, v); + return arch_atomic_sub_and_test(i, v); } static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) +arch_atomic_long_dec_and_test(atomic_long_t *v) { - return atomic_dec_and_test(v); + return arch_atomic_dec_and_test(v); } static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) +arch_atomic_long_inc_and_test(atomic_long_t *v) { - return atomic_inc_and_test(v); + return arch_atomic_inc_and_test(v); } static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) +arch_atomic_long_add_negative(long i, atomic_long_t *v) { - return atomic_add_negative(i, v); + return arch_atomic_add_negative(i, v); } static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { - return atomic_fetch_add_unless(v, a, u); + return arch_atomic_fetch_add_unless(v, a, u); } static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) { - return atomic_add_unless(v, a, u); + return arch_atomic_add_unless(v, a, u); } static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) +arch_atomic_long_inc_not_zero(atomic_long_t *v) { - return atomic_inc_not_zero(v); + return arch_atomic_inc_not_zero(v); } static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) +arch_atomic_long_inc_unless_negative(atomic_long_t *v) { - return atomic_inc_unless_negative(v); + return arch_atomic_inc_unless_negative(v); } static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) +arch_atomic_long_dec_unless_positive(atomic_long_t *v) { - return atomic_dec_unless_positive(v); + return arch_atomic_dec_unless_positive(v); } static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) +arch_atomic_long_dec_if_positive(atomic_long_t *v) { - return atomic_dec_if_positive(v); + return arch_atomic_dec_if_positive(v); } #endif /* CONFIG_64BIT */ #endif /* _LINUX_ATOMIC_LONG_H */ -// c5552b5d78a0c7584dfd03cba985e78a1a86bbed +// e8f0e08ff072b74d180eabe2ad001282b38c2c88 diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index 6fc1ab772e40..035ceb4ee85c 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -138,6 +138,11 @@ grep '^[a-z]' "$1" | while read name meta args; do gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} done +grep '^[a-z]' "$1" | while read name meta args; do + gen_proto "${meta}" "${name}" "atomic_long" "long" ${args} +done + + for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do for order in "" "_acquire" "_release" "_relaxed"; do gen_xchg "${xchg}${order}" "" diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh index db69572609df..eda89cea6e1d 100755 --- a/scripts/atomic/gen-atomic-long.sh +++ b/scripts/atomic/gen-atomic-long.sh @@ -47,9 +47,9 @@ gen_proto_order_variant() cat < Date: Mon, 12 Jul 2021 15:35:46 -0400 Subject: scripts/setlocalversion: fix a bug when LOCALVERSION is empty The commit 042da426f8eb ("scripts/setlocalversion: simplify the short version part") reduces indentation. Unfortunately, it also changes behavior in a subtle way - if the user has empty "LOCALVERSION" variable, the plus sign is appended to the kernel version. It wasn't appended before. This patch reverts to the old behavior - we append the plus sign only if the LOCALVERSION variable is not set. Fixes: 042da426f8eb ("scripts/setlocalversion: simplify the short version part") Signed-off-by: Mikulas Patocka Signed-off-by: Masahiro Yamada --- scripts/setlocalversion | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 151f04971faa..6b54e46a0f12 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -131,11 +131,14 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}" if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then # full scm version string res="$res$(scm_version)" -elif [ -z "${LOCALVERSION}" ]; then - # append a plus sign if the repository is not in a clean - # annotated or signed tagged state (as git describe only - # looks at signed or annotated tags - git tag -a/-s) and - # LOCALVERSION= is not specified +elif [ "${LOCALVERSION+set}" != "set" ]; then + # If the variable LOCALVERSION is not set, append a plus + # sign if the repository is not in a clean annotated or + # signed tagged state (as git describe only looks at signed + # or annotated tags - git tag -a/-s). + # + # If the variable LOCALVERSION is set (including being set + # to an empty string), we don't want to append a plus sign. scm=$(scm_version --short) res="$res${scm:++}" fi -- cgit v1.2.3 From 1d11053dc63094075bf9e4809fffd3bb5e72f9a6 Mon Sep 17 00:00:00 2001 From: Lecopzer Chen Date: Thu, 15 Jul 2021 15:37:16 +0800 Subject: Kbuild: lto: fix module versionings mismatch in GNU make 3.X When building modules(CONFIG_...=m), I found some of module versions are incorrect and set to 0. This can be found in build log for first clean build which shows WARNING: EXPORT symbol "XXXX" [drivers/XXX/XXX.ko] version generation failed, symbol will not be versioned. But in second build(incremental build), the WARNING disappeared and the module version becomes valid CRC and make someone who want to change modules without updating kernel image can't insert their modules. The problematic code is + $(foreach n, $(filter-out FORCE,$^), \ + $(if $(wildcard $(n).symversions), \ + ; cat $(n).symversions >> $@.symversions)) For example: rm -f fs/notify/built-in.a.symversions ; rm -f fs/notify/built-in.a; \ llvm-ar cDPrST fs/notify/built-in.a fs/notify/fsnotify.o \ fs/notify/notification.o fs/notify/group.o ... `foreach n` shows nothing to `cat` into $(n).symversions because `if $(wildcard $(n).symversions)` return nothing, but actually they do exist during this line was executed. -rw-r--r-- 1 root root 168580 Jun 13 19:10 fs/notify/fsnotify.o -rw-r--r-- 1 root root 111 Jun 13 19:10 fs/notify/fsnotify.o.symversions The reason is the $(n).symversions are generated at runtime, but Makefile wildcard function expends and checks the file exist or not during parsing the Makefile. Thus fix this by use `test` shell command to check the file existence in runtime. Rebase from both: 1. [https://lore.kernel.org/lkml/20210616080252.32046-1-lecopzer.chen@mediatek.com/] 2. [https://lore.kernel.org/lkml/20210702032943.7865-1-lecopzer.chen@mediatek.com/] Fixes: 38e891849003 ("kbuild: lto: fix module versioning") Co-developed-by: Sami Tolvanen Signed-off-by: Lecopzer Chen Signed-off-by: Masahiro Yamada --- scripts/Makefile.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 10b2f2380d6f..02197cb8e3a7 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -386,7 +386,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y) cmd_update_lto_symversions = \ rm -f $@.symversions \ $(foreach n, $(filter-out FORCE,$^), \ - $(if $(wildcard $(n).symversions), \ + $(if $(shell test -s $(n).symversions && echo y), \ ; cat $(n).symversions >> $@.symversions)) else cmd_update_lto_symversions = echo >/dev/null -- cgit v1.2.3 From 4c5afb74d9450edc2e2e37243b469cc278b120d4 Mon Sep 17 00:00:00 2001 From: Reiner Huober Date: Tue, 6 Jul 2021 15:02:52 +0200 Subject: module: combine constructors in module linker script The constructor code for modules must be aware of init code inside different sections. Newer GCC compilers write constructors in more than one section, e.g. ".ctors.65435". These must be combined into a single ".ctors" section. In the module loader, only the ".ctors" section is searched and the constructors therein are initialized, when CONFIG_CONSTRUCTORS=y is set. Other constructors are ignored. This change combines all ".ctors.*" and the ".ctors" section, if any, in .ko into a single ."ctors" section. For code coverage in GCC, this is necessary to show the code coverage for modules, since code coverage uses such constructors when initializing a module in newer version of GCC. Signed-off-by: Reiner Huober Signed-off-by: Jessica Yu --- scripts/module.lds.S | 1 + 1 file changed, 1 insertion(+) (limited to 'scripts') diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 04c5685c25cf..1d0e1e4dc3d2 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -24,6 +24,7 @@ SECTIONS { __kcrctab 0 : { *(SORT(___kcrctab+*)) } __kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) } + .ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) } .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) } __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } -- cgit v1.2.3 From 03b1292d1c0ea195e025e667555d74db7da82026 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 7 Jul 2021 16:06:00 -0500 Subject: scripts/spdxcheck-test.sh: Drop python2 Since commit d0259c42abff ("spdxcheck.py: Use Python 3"), spdxcheck.py explicitly expects to run as python3 script, there is no further point in attempting to test with python2. Cc: Bert Vermeulen Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/20210707210600.7266-1-nm@ti.com Signed-off-by: Greg Kroah-Hartman --- scripts/spdxcheck-test.sh | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'scripts') diff --git a/scripts/spdxcheck-test.sh b/scripts/spdxcheck-test.sh index cfea6a0d1cc0..cb76324756bd 100644 --- a/scripts/spdxcheck-test.sh +++ b/scripts/spdxcheck-test.sh @@ -1,12 +1,10 @@ #!/bin/sh -for PYTHON in python2 python3; do - # run check on a text and a binary file - for FILE in Makefile Documentation/logo.gif; do - $PYTHON scripts/spdxcheck.py $FILE - $PYTHON scripts/spdxcheck.py - < $FILE - done - - # run check on complete tree to catch any other issues - $PYTHON scripts/spdxcheck.py > /dev/null +# run check on a text and a binary file +for FILE in Makefile Documentation/logo.gif; do + python3 scripts/spdxcheck.py $FILE + python3 scripts/spdxcheck.py - < $FILE done + +# run check on complete tree to catch any other issues +python3 scripts/spdxcheck.py > /dev/null -- cgit v1.2.3 From b18b851ba85a5855cb53865fcff3cd2c17b44b0b Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 2 Aug 2021 14:03:07 -0700 Subject: scripts/recordmcount.pl: Remove check_objcopy() and $can_use_local When building ARCH=riscv allmodconfig with llvm-objcopy, the objcopy version warning from this script appears: WARNING: could not find objcopy version or version is less than 2.17. Local function references are disabled. The check_objcopy() function in scripts/recordmcount.pl is set up to parse GNU objcopy's version string, not llvm-objcopy's, which triggers the warning. Commit 799c43415442 ("kbuild: thin archives make default for all archs") made binutils 2.20 mandatory and commit ba64beb17493 ("kbuild: check the minimum assembler version in Kconfig") enforces this at configuration time so just remove check_objcopy() and $can_use_local instead, assuming --globalize-symbol is always available. llvm-objcopy has supported --globalize-symbol since LLVM 7.0.0 in 2018 and the minimum version for building the kernel with LLVM is 10.0.1 so there is no issue introduced: Link: https://github.com/llvm/llvm-project/commit/ee5be798dae30d5f9414b01f76ff807edbc881aa Link: https://lkml.kernel.org/r/20210802210307.3202472-1-nathan@kernel.org Reviewed-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Steven Rostedt (VMware) --- Makefile | 1 - scripts/recordmcount.pl | 40 ---------------------------------------- 2 files changed, 41 deletions(-) (limited to 'scripts') diff --git a/Makefile b/Makefile index e4f5895badb5..d6915f361aa4 100644 --- a/Makefile +++ b/Makefile @@ -546,7 +546,6 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ PHONY += scripts_basic scripts_basic: $(Q)$(MAKE) $(build)=scripts/basic - $(Q)rm -f .tmp_quiet_recordmcount PHONY += outputmakefile ifdef building_out_of_srctree diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index c17e48020ec3..8f6b13ae46bf 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -173,39 +173,6 @@ my $mcount_regex; # Find the call site to mcount (return offset) my $mcount_adjust; # Address adjustment to mcount offset my $alignment; # The .align value to use for $mcount_section my $section_type; # Section header plus possible alignment command -my $can_use_local = 0; # If we can use local function references - -# Shut up recordmcount if user has older objcopy -my $quiet_recordmcount = ".tmp_quiet_recordmcount"; -my $print_warning = 1; -$print_warning = 0 if ( -f $quiet_recordmcount); - -## -# check_objcopy - whether objcopy supports --globalize-symbols -# -# --globalize-symbols came out in 2.17, we must test the version -# of objcopy, and if it is less than 2.17, then we can not -# record local functions. -sub check_objcopy -{ - open (IN, "$objcopy --version |") or die "error running $objcopy"; - while () { - if (/objcopy.*\s(\d+)\.(\d+)/) { - $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17)); - last; - } - } - close (IN); - - if (!$can_use_local && $print_warning) { - print STDERR "WARNING: could not find objcopy version or version " . - "is less than 2.17.\n" . - "\tLocal function references are disabled.\n"; - open (QUIET, ">$quiet_recordmcount"); - printf QUIET "Disables the warning from recordmcount.pl\n"; - close QUIET; - } -} if ($arch =~ /(x86(_64)?)|(i386)/) { if ($bits == 64) { @@ -434,8 +401,6 @@ if ($filename =~ m,^(.*)(\.\S),) { my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; -check_objcopy(); - # # Step 1: find all the local (static functions) and weak symbols. # 't' is local, 'w/W' is weak @@ -473,11 +438,6 @@ sub update_funcs # is this function static? If so, note this fact. if (defined $locals{$ref_func}) { - - # only use locals if objcopy supports globalize-symbols - if (!$can_use_local) { - return; - } $convert{$ref_func} = 1; } -- cgit v1.2.3 From 1c0cec64a7cc545eb49f374a43e9f7190a14defa Mon Sep 17 00:00:00 2001 From: Hui Su Date: Fri, 11 Jun 2021 10:21:07 +0800 Subject: scripts/tracing: fix the bug that can't parse raw_trace_func Since commit 77271ce4b2c0 ("tracing: Add irq, preempt-count and need resched info to default trace output"), the default trace output format has been changed to: -0 [009] d.h. 22420.068695: _raw_spin_lock_irqsave <-hrtimer_interrupt -0 [000] ..s. 22420.068695: _nohz_idle_balance <-run_rebalance_domains -0 [011] d.h. 22420.068695: account_process_tick <-update_process_times origin trace output format:(before v3.2.0) # tracer: nop # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule The draw_functrace.py(introduced in v2.6.28) can't parse the new version format trace_func, So we need modify draw_functrace.py to adapt the new version trace output format. Link: https://lkml.kernel.org/r/20210611022107.608787-1-suhui@zeku.com Cc: stable@vger.kernel.org Fixes: 77271ce4b2c0 tracing: Add irq, preempt-count and need resched info to default trace output Signed-off-by: Hui Su Signed-off-by: Steven Rostedt (VMware) --- scripts/tracing/draw_functrace.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py index 74f8aadfd4cb..7011fbe003ff 100755 --- a/scripts/tracing/draw_functrace.py +++ b/scripts/tracing/draw_functrace.py @@ -17,7 +17,7 @@ Usage: $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) - $ scripts/draw_functrace.py < raw_trace_func > draw_functrace + $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ @@ -103,10 +103,10 @@ def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException - m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) + m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException - return (m.group(1), m.group(2), m.group(3)) + return (m.group(2), m.group(3), m.group(4)) def main(): -- cgit v1.2.3 From 9eec0792024983276871baeb201609abb07dd35d Mon Sep 17 00:00:00 2001 From: Weizhao Ouyang Date: Thu, 5 Aug 2021 17:58:23 +0800 Subject: coccinelle: api: rename kzfree to kfree_sensitive Commit 453431a54934 ("mm, treewide: rename kzfree() to kfree_sensitive()") renamed kzfree() to kfree_sensitive(), it should be applied to coccinelle. Signed-off-by: Weizhao Ouyang Signed-off-by: Julia Lawall Acked-by: Denis Efremov --- scripts/coccinelle/api/kvmalloc.cocci | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/coccinelle/api/kvmalloc.cocci b/scripts/coccinelle/api/kvmalloc.cocci index c30dab718a49..5ddcb76b76b0 100644 --- a/scripts/coccinelle/api/kvmalloc.cocci +++ b/scripts/coccinelle/api/kvmalloc.cocci @@ -79,7 +79,7 @@ position p : script:python() { relevant(p) }; } else { ... when != krealloc(E, ...) when any -* \(kfree\|kzfree\)(E) +* \(kfree\|kfree_sensitive\)(E) ... } -- cgit v1.2.3 From 54eacba0e3bbda9777788b44b45a5186918569f2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 26 Jul 2021 19:57:37 -0700 Subject: scripts: checkversion: modernize linux/version.h search strings Update scripts/checkversion.pl to recognize the current contents of and both of its current locations. Also update my email address. Signed-off-by: Randy Dunlap Signed-off-by: Masahiro Yamada --- scripts/checkversion.pl | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'scripts') diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl index f67b125c5269..94cd49eff605 100755 --- a/scripts/checkversion.pl +++ b/scripts/checkversion.pl @@ -1,10 +1,10 @@ #! /usr/bin/env perl # SPDX-License-Identifier: GPL-2.0 # -# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION -# without including , or cases of -# including that don't need it. -# Copyright (C) 2003, Randy Dunlap +# checkversion finds uses of all macros in +# where the source files do not #include ; or cases +# of including where it is not needed. +# Copyright (C) 2003, Randy Dunlap use strict; @@ -13,7 +13,8 @@ $| = 1; my $debugging; foreach my $file (@ARGV) { - next if $file =~ "include/linux/version\.h"; + next if $file =~ "include/generated/uapi/linux/version\.h"; + next if $file =~ "usr/include/linux/version\.h"; # Open this file. open( my $f, '<', $file ) or die "Can't open $file: $!\n"; @@ -41,8 +42,11 @@ foreach my $file (@ARGV) { $iLinuxVersion = $. if m/^\s*#\s*include\s*/o; } - # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE - if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) { + # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, + # LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL + if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) || + ($_ =~ /LINUX_VERSION_MAJOR/) || ($_ =~ /LINUX_VERSION_PATCHLEVEL/) || + ($_ =~ /LINUX_VERSION_SUBLEVEL/)) { $fUseVersion = 1; last if $iLinuxVersion; } -- cgit v1.2.3 From e71ec0bc06038cdfa18cbd23f5cea71fe4785d35 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Fri, 30 Jul 2021 10:58:56 +0100 Subject: scripts: coccinelle: allow list_entry_is_head() to use pos Currently use_after_iter.cocci generates false positives for code of the following form: ~~~ list_for_each_entry(d, &ddata->irq_list, node) { if (irq == d->irq) break; } if (list_entry_is_head(d, &ddata->irq_list, node)) return IRQ_NONE; ~~~ [This specific example comes from drivers/power/supply/cpcap-battery.c] Most list macros use list_entry_is_head() as loop exit condition meaning it is not unsafe to reuse pos (a.k.a. d) in the code above. Let's avoid reporting these cases. Signed-off-by: Daniel Thompson Signed-off-by: Julia Lawall --- scripts/coccinelle/iterators/use_after_iter.cocci | 2 ++ 1 file changed, 2 insertions(+) (limited to 'scripts') diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci index 9be48b520879..676edd562eef 100644 --- a/scripts/coccinelle/iterators/use_after_iter.cocci +++ b/scripts/coccinelle/iterators/use_after_iter.cocci @@ -123,6 +123,8 @@ hlist_for_each_entry_safe(c,...) S | list_remove_head(x,c,...) | +list_entry_is_head(c,...) +| sizeof(<+...c...+>) | &c->member -- cgit v1.2.3 From a325db2d8f1d7e33cdc0152b61c3f14fb06f9893 Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Wed, 2 Dec 2020 15:12:39 +0000 Subject: scripts: merge_config: add strict mode to fail upon any redefinition When merging configuration fragments, it might be of interest to identify mismatches (redefinitions) programmatically. Hence add the option -s (strict mode) to instruct merge_config.sh to bail out in case any redefinition has been detected. With strict mode, warnings are emitted as before, but the script terminates with rc=1. If -y is set to define "builtin having precedence over modules", fragments are still allowed to set =m (while the base config has =y). Strict mode will tolerate that as demotions from =y to =m are ignored when setting -y. Signed-off-by: Matthias Maennich Reviewed-by: Lee Jones Signed-off-by: Masahiro Yamada --- scripts/kconfig/merge_config.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'scripts') diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 63c8565206a4..e5b46980c22a 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -28,6 +28,7 @@ usage() { echo " -r list redundant entries when merging fragments" echo " -y make builtin have precedence over modules" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." + echo " -s strict mode. Fail if the fragment redefines any value." echo echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." } @@ -37,6 +38,7 @@ ALLTARGET=alldefconfig WARNREDUN=false BUILTIN=false OUTPUT=. +STRICT=false CONFIG_PREFIX=${CONFIG_-CONFIG_} while true; do @@ -75,6 +77,11 @@ while true; do shift 2 continue ;; + "-s") + STRICT=true + shift + continue + ;; *) break ;; @@ -141,6 +148,9 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do echo Previous value: $PREV_VAL echo New value: $NEW_VAL echo + if [ "$STRICT" = "true" ]; then + STRICT_MODE_VIOLATED=true + fi elif [ "$WARNREDUN" = "true" ]; then echo Value of $CFG is redundant by fragment $ORIG_MERGE_FILE: fi @@ -153,6 +163,11 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do cat $MERGE_FILE >> $TMP_FILE done +if [ "$STRICT_MODE_VIOLATED" = "true" ]; then + echo "The fragment redefined a value and strict mode had been passed." + exit 1 +fi + if [ "$RUNMAKE" = "false" ]; then cp -T -- "$TMP_FILE" "$KCONFIG_CONFIG" echo "#" -- cgit v1.2.3 From 0058d07ec6aac8b1379f817b31839caa4ac8e448 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 28 Jul 2021 00:39:24 +0900 Subject: scripts: make some scripts executable Set the x bit to some scripts to make them directly executable. Especially, scripts/checkdeclares.pl is not hooked by anyone. It should be executable since it is tedious to type 'perl scripts/checkdeclares.pl'. The original patch [1] set the x bit properly, but it was lost when it was merged as commit 21917bded72c ("scripts: a new script for checking duplicate struct declaration"). [1] https://lore.kernel.org/lkml/20210401110943.1010796-1-wanjiabing@vivo.com/ Signed-off-by: Masahiro Yamada --- scripts/checkdeclares.pl | 0 scripts/gcc-plugins/gen-random-seed.sh | 0 scripts/syscallnr.sh | 0 scripts/xen-hypercalls.sh | 0 4 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/checkdeclares.pl mode change 100644 => 100755 scripts/gcc-plugins/gen-random-seed.sh mode change 100644 => 100755 scripts/syscallnr.sh mode change 100644 => 100755 scripts/xen-hypercalls.sh (limited to 'scripts') diff --git a/scripts/checkdeclares.pl b/scripts/checkdeclares.pl old mode 100644 new mode 100755 diff --git a/scripts/gcc-plugins/gen-random-seed.sh b/scripts/gcc-plugins/gen-random-seed.sh old mode 100644 new mode 100755 diff --git a/scripts/syscallnr.sh b/scripts/syscallnr.sh old mode 100644 new mode 100755 diff --git a/scripts/xen-hypercalls.sh b/scripts/xen-hypercalls.sh old mode 100644 new mode 100755 -- cgit v1.2.3 From 6f5b41a2f5a6314614e286274eb8e985248aac60 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 2 Aug 2021 11:39:08 -0700 Subject: Makefile: move initial clang flag handling into scripts/Makefile.clang With some of the changes we'd like to make to CROSS_COMPILE, the initial block of clang flag handling which controls things like the target triple, whether or not to use the integrated assembler and how to find GAS, and erroring on unknown warnings is becoming unwieldy. Move it into its own file under scripts/. Reviewed-by: Nathan Chancellor Signed-off-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- MAINTAINERS | 1 + Makefile | 15 +-------------- scripts/Makefile.clang | 14 ++++++++++++++ 3 files changed, 16 insertions(+), 14 deletions(-) create mode 100644 scripts/Makefile.clang (limited to 'scripts') diff --git a/MAINTAINERS b/MAINTAINERS index c9467d2839f5..3105fc57689e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4501,6 +4501,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h +F: scripts/Makefile.clang F: scripts/clang-tools/ K: \b(?i:clang|llvm)\b diff --git a/Makefile b/Makefile index a0376430da20..f5419ce69ce8 100644 --- a/Makefile +++ b/Makefile @@ -584,20 +584,7 @@ endif CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1)) ifneq ($(findstring clang,$(CC_VERSION_TEXT)),) -ifneq ($(CROSS_COMPILE),) -CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) -endif -ifeq ($(LLVM_IAS),1) -CLANG_FLAGS += -integrated-as -else -CLANG_FLAGS += -no-integrated-as -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) -endif -CLANG_FLAGS += -Werror=unknown-warning-option -KBUILD_CFLAGS += $(CLANG_FLAGS) -KBUILD_AFLAGS += $(CLANG_FLAGS) -export CLANG_FLAGS +include $(srctree)/scripts/Makefile.clang endif # Include this also for config targets because some architectures need diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang new file mode 100644 index 000000000000..297932e973d4 --- /dev/null +++ b/scripts/Makefile.clang @@ -0,0 +1,14 @@ +ifneq ($(CROSS_COMPILE),) +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +endif +ifeq ($(LLVM_IAS),1) +CLANG_FLAGS += -integrated-as +else +CLANG_FLAGS += -no-integrated-as +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) +endif +CLANG_FLAGS += -Werror=unknown-warning-option +KBUILD_CFLAGS += $(CLANG_FLAGS) +KBUILD_AFLAGS += $(CLANG_FLAGS) +export CLANG_FLAGS -- cgit v1.2.3 From 231ad7f409f16b9f9505f69e058dff488a7e6bde Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 2 Aug 2021 11:39:09 -0700 Subject: Makefile: infer --target from ARCH for CC=clang We get constant feedback that the command line invocation of make is too long when compiling with LLVM. CROSS_COMPILE is helpful when a toolchain has a prefix of the target triple, or is an absolute path outside of $PATH. Since a Clang binary is generally multi-targeted, we can infer a given target from SRCARCH/ARCH. If CROSS_COMPILE is not set, simply set --target= for CLANG_FLAGS, KBUILD_CFLAGS, and KBUILD_AFLAGS based on $SRCARCH. Previously, we'd cross compile via: $ ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make LLVM=1 LLVM_IAS=1 Now: $ ARCH=arm64 make LLVM=1 LLVM_IAS=1 For native builds (not involving cross compilation) we now explicitly specify a target triple rather than rely on the implicit host triple. Link: https://github.com/ClangBuiltLinux/linux/issues/1399 Suggested-by: Arnd Bergmann Suggested-by: Linus Torvalds Suggested-by: Masahiro Yamada Suggested-by: Nathan Chancellor Acked-by: Arnd Bergmann Reviewed-by: Nathan Chancellor Signed-off-by: Nick Desaulniers Acked-by: Miguel Ojeda Signed-off-by: Masahiro Yamada --- scripts/Makefile.clang | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 297932e973d4..1f4e3eb70f88 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -1,6 +1,27 @@ -ifneq ($(CROSS_COMPILE),) +# Individual arch/{arch}/Makefiles should use -EL/-EB to set intended +# endianness and -m32/-m64 to set word size based on Kconfigs instead of +# relying on the target triple. +CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi +CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu +CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl +CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu +CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu +CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu +CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu +CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu +CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(SRCARCH)) + +ifeq ($(CROSS_COMPILE),) +ifeq ($(CLANG_TARGET_FLAGS),) +$(error Specify CROSS_COMPILE or add '--target=' option to scripts/Makefile.clang) +else +CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS) +endif # CLANG_TARGET_FLAGS +else CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) -endif +endif # CROSS_COMPILE + ifeq ($(LLVM_IAS),1) CLANG_FLAGS += -integrated-as else -- cgit v1.2.3 From f12b034afeb3a977bbb1c6584dedc0f3dc666f14 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 6 Aug 2021 10:27:01 -0700 Subject: scripts/Makefile.clang: default to LLVM_IAS=1 LLVM_IAS=1 controls enabling clang's integrated assembler via -integrated-as. This was an explicit opt in until we could enable assembler support in Clang for more architecures. Now we have support and CI coverage of LLVM_IAS=1 for all architecures except a few more bugs affecting s390 and powerpc. This commit flips the default from opt in via LLVM_IAS=1 to opt out via LLVM_IAS=0. CI systems or developers that were previously doing builds with CC=clang or LLVM=1 without explicitly setting LLVM_IAS must now explicitly opt out via LLVM_IAS=0, otherwise they will be implicitly opted-in. This finally shortens the command line invocation when cross compiling with LLVM to simply: $ make ARCH=arm64 LLVM=1 Signed-off-by: Nick Desaulniers Reviewed-by: Nathan Chancellor Signed-off-by: Masahiro Yamada --- Documentation/kbuild/llvm.rst | 14 ++++++++------ scripts/Makefile.clang | 6 +++--- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'scripts') diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index f8a360958f4c..e87ed5479963 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -60,17 +60,14 @@ They can be enabled individually. The full list of the parameters: :: OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \ HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld -Currently, the integrated assembler is disabled by default. You can pass -``LLVM_IAS=1`` to enable it. +The integrated assembler is enabled by default. You can pass ``LLVM_IAS=0`` to +disable it. Omitting CROSS_COMPILE ---------------------- As explained above, ``CROSS_COMPILE`` is used to set ``--target=``. -Unless ``LLVM_IAS=1`` is specified, ``CROSS_COMPILE`` is also used to derive -``--prefix=`` to search for the GNU assembler and linker. - If ``CROSS_COMPILE`` is not specified, the ``--target=`` is inferred from ``ARCH``. @@ -78,7 +75,12 @@ That means if you use only LLVM tools, ``CROSS_COMPILE`` becomes unnecessary. For example, to cross-compile the arm64 kernel:: - make ARCH=arm64 LLVM=1 LLVM_IAS=1 + make ARCH=arm64 LLVM=1 + +If ``LLVM_IAS=0`` is specified, ``CROSS_COMPILE`` is also used to derive +``--prefix=`` to search for the GNU assembler and linker. :: + + make ARCH=arm64 LLVM=1 LLVM_IAS=0 CROSS_COMPILE=aarch64-linux-gnu- Supported Architectures ----------------------- diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 1f4e3eb70f88..3ae63bd35582 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -22,12 +22,12 @@ else CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) endif # CROSS_COMPILE -ifeq ($(LLVM_IAS),1) -CLANG_FLAGS += -integrated-as -else +ifeq ($(LLVM_IAS),0) CLANG_FLAGS += -no-integrated-as GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) +else +CLANG_FLAGS += -integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) -- cgit v1.2.3 From bed4ed3057e495dc91ac4049770319f23c579b32 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 31 Jul 2021 01:54:01 +0300 Subject: scripts/kernel-doc: Override -Werror from KCFLAGS with KDOC_WERROR Since commit 2c12c8103d8f ("scripts/kernel-doc: optionally treat warnings as errors"), the kernel-doc script will treat warnings as errors when one of the following conditions is true: - The KDOC_WERROR environment variable is non-zero - The KCFLAGS environment variable contains -Werror - The -Werror parameter is passed to kernel-doc Checking KCFLAGS for -Werror allows piggy-backing on the C compiler error handling. However, unlike the C compiler, kernel-doc has no provision for -Wno-error. This makes compiling the kernel with -Werror (to catch regressions) and W=1 (to enable more checks) always fail, without the same possibility as offered by the C compiler to treating some selected warnings as warnings despite the global -Werror setting. To fix this, evaluate KDOC_WERROR after KCFLAGS, which allows disabling the warnings-as-errors behaviour of kernel-doc selectively by setting KDOC_WERROR=0. Signed-off-by: Laurent Pinchart Link: https://lore.kernel.org/r/20210730225401.4401-1-laurent.pinchart+renesas@ideasonboard.com Signed-off-by: Jonathan Corbet --- scripts/kernel-doc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 7c4a6a507ac4..cfcb60737957 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -329,10 +329,6 @@ if (defined($ENV{'KBUILD_VERBOSE'})) { $verbose = "$ENV{'KBUILD_VERBOSE'}"; } -if (defined($ENV{'KDOC_WERROR'})) { - $Werror = "$ENV{'KDOC_WERROR'}"; -} - if (defined($ENV{'KCFLAGS'})) { my $kcflags = "$ENV{'KCFLAGS'}"; @@ -341,6 +337,10 @@ if (defined($ENV{'KCFLAGS'})) { } } +if (defined($ENV{'KDOC_WERROR'})) { + $Werror = "$ENV{'KDOC_WERROR'}"; +} + # Generated docbook code is inserted in a template at a point where # docbook v3.1 requires a non-zero sequence of RefEntry's; see: # https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html -- cgit v1.2.3 From 1e688dd2a3d6759d416616ff07afc4bb836c4213 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 13 Apr 2021 16:38:10 +0000 Subject: powerpc/bug: Provide better flexibility to WARN_ON/__WARN_FLAGS() with asm goto Using asm goto in __WARN_FLAGS() and WARN_ON() allows more flexibility to GCC. For that add an entry to the exception table so that program_check_exception() knowns where to resume execution after a WARNING. Here are two exemples. The first one is done on PPC32 (which benefits from the previous patch), the second is on PPC64. unsigned long test(struct pt_regs *regs) { int ret; WARN_ON(regs->msr & MSR_PR); return regs->gpr[3]; } unsigned long test9w(unsigned long a, unsigned long b) { if (WARN_ON(!b)) return 0; return a / b; } Before the patch: 000003a8 : 3a8: 81 23 00 84 lwz r9,132(r3) 3ac: 71 29 40 00 andi. r9,r9,16384 3b0: 40 82 00 0c bne 3bc 3b4: 80 63 00 0c lwz r3,12(r3) 3b8: 4e 80 00 20 blr 3bc: 0f e0 00 00 twui r0,0 3c0: 80 63 00 0c lwz r3,12(r3) 3c4: 4e 80 00 20 blr 0000000000000bf0 <.test9w>: bf0: 7c 89 00 74 cntlzd r9,r4 bf4: 79 29 d1 82 rldicl r9,r9,58,6 bf8: 0b 09 00 00 tdnei r9,0 bfc: 2c 24 00 00 cmpdi r4,0 c00: 41 82 00 0c beq c0c <.test9w+0x1c> c04: 7c 63 23 92 divdu r3,r3,r4 c08: 4e 80 00 20 blr c0c: 38 60 00 00 li r3,0 c10: 4e 80 00 20 blr After the patch: 000003a8 : 3a8: 81 23 00 84 lwz r9,132(r3) 3ac: 71 29 40 00 andi. r9,r9,16384 3b0: 40 82 00 0c bne 3bc 3b4: 80 63 00 0c lwz r3,12(r3) 3b8: 4e 80 00 20 blr 3bc: 0f e0 00 00 twui r0,0 0000000000000c50 <.test9w>: c50: 7c 89 00 74 cntlzd r9,r4 c54: 79 29 d1 82 rldicl r9,r9,58,6 c58: 0b 09 00 00 tdnei r9,0 c5c: 7c 63 23 92 divdu r3,r3,r4 c60: 4e 80 00 20 blr c70: 38 60 00 00 li r3,0 c74: 4e 80 00 20 blr In the first exemple, we see GCC doesn't need to duplicate what happens after the trap. In the second exemple, we see that GCC doesn't need to emit a test and a branch in the likely path in addition to the trap. We've got some WARN_ON() in .softirqentry.text section so it needs to be added in the OTHER_TEXT_SECTIONS in modpost.c Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/389962b1b702e3c78d169e59bcfac56282889173.1618331882.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/64/kup.h | 2 +- arch/powerpc/include/asm/bug.h | 54 ++++++++++++++++++---- arch/powerpc/include/asm/extable.h | 14 ++++++ arch/powerpc/include/asm/ppc_asm.h | 11 +---- arch/powerpc/kernel/entry_64.S | 2 +- arch/powerpc/kernel/misc_32.S | 2 +- arch/powerpc/kernel/traps.c | 9 +++- scripts/mod/modpost.c | 2 +- .../selftests/powerpc/primitives/asm/extable.h | 1 + 9 files changed, 72 insertions(+), 25 deletions(-) create mode 120000 tools/testing/selftests/powerpc/primitives/asm/extable.h (limited to 'scripts') diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index a1cc73a88710..170339969b7c 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -90,7 +90,7 @@ /* Prevent access to userspace using any key values */ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) 999: tdne \gpr1, \gpr2 - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) + EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) #endif .endm diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index d844de5adfcb..1ee0f22313ee 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -4,6 +4,7 @@ #ifdef __KERNEL__ #include +#include #ifdef CONFIG_BUG @@ -30,6 +31,11 @@ .endm #endif /* verbose */ +.macro EMIT_WARN_ENTRY addr,file,line,flags + EX_TABLE(\addr,\addr+4) + EMIT_BUG_ENTRY \addr,\file,\line,\flags +.endm + #else /* !__ASSEMBLY__ */ /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and sizeof(struct bug_entry), respectively */ @@ -58,6 +64,16 @@ "i" (sizeof(struct bug_entry)), \ ##__VA_ARGS__) +#define WARN_ENTRY(insn, flags, label, ...) \ + asm_volatile_goto( \ + "1: " insn "\n" \ + EX_TABLE(1b, %l[label]) \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (flags), \ + "i" (sizeof(struct bug_entry)), \ + ##__VA_ARGS__ : : label) + /* * BUG_ON() and WARN_ON() do their best to cooperate with compile-time * optimisations. However depending on the complexity of the condition @@ -70,7 +86,15 @@ } while (0) #define HAVE_ARCH_BUG -#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags)) +#define __WARN_FLAGS(flags) do { \ + __label__ __label_warn_on; \ + \ + WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ + unreachable(); \ + \ +__label_warn_on: \ + break; \ +} while (0) #ifdef CONFIG_PPC64 #define BUG_ON(x) do { \ @@ -83,15 +107,24 @@ } while (0) #define WARN_ON(x) ({ \ - int __ret_warn_on = !!(x); \ - if (__builtin_constant_p(__ret_warn_on)) { \ - if (__ret_warn_on) \ + bool __ret_warn_on = false; \ + do { \ + if (__builtin_constant_p((x))) { \ + if (!(x)) \ + break; \ __WARN(); \ - } else { \ - BUG_ENTRY(PPC_TLNEI " %4, 0", \ - BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ - "r" (__ret_warn_on)); \ - } \ + __ret_warn_on = true; \ + } else { \ + __label__ __label_warn_on; \ + \ + WARN_ENTRY(PPC_TLNEI " %4, 0", \ + BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ + __label_warn_on, "r" (x)); \ + break; \ +__label_warn_on: \ + __ret_warn_on = true; \ + } \ + } while (0); \ unlikely(__ret_warn_on); \ }) @@ -104,8 +137,11 @@ #ifdef __ASSEMBLY__ .macro EMIT_BUG_ENTRY addr,file,line,flags .endm +.macro EMIT_WARN_ENTRY addr,file,line,flags +.endm #else /* !__ASSEMBLY__ */ #define _EMIT_BUG_ENTRY +#define _EMIT_WARN_ENTRY #endif #endif /* CONFIG_BUG */ diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h index eb91b2d2935a..26ce2e5c0fa8 100644 --- a/arch/powerpc/include/asm/extable.h +++ b/arch/powerpc/include/asm/extable.h @@ -17,6 +17,8 @@ #define ARCH_HAS_RELATIVE_EXTABLE +#ifndef __ASSEMBLY__ + struct exception_table_entry { int insn; int fixup; @@ -28,3 +30,15 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) } #endif + +/* + * Helper macro for exception table entries + */ +#define EX_TABLE(_fault, _target) \ + stringify_in_c(.section __ex_table,"a";)\ + stringify_in_c(.balign 4;) \ + stringify_in_c(.long (_fault) - . ;) \ + stringify_in_c(.long (_target) - . ;) \ + stringify_in_c(.previous) + +#endif diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 116c1519728a..ffe712307e11 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef __ASSEMBLY__ @@ -752,16 +753,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #endif /* __ASSEMBLY__ */ -/* - * Helper macro for exception table entries - */ -#define EX_TABLE(_fault, _target) \ - stringify_in_c(.section __ex_table,"a";)\ - stringify_in_c(.balign 4;) \ - stringify_in_c(.long (_fault) - . ;) \ - stringify_in_c(.long (_target) - . ;) \ - stringify_in_c(.previous) - #define SOFT_MASK_TABLE(_start, _end) \ stringify_in_c(.section __soft_mask_table,"a";)\ stringify_in_c(.balign 8;) \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 15720f8661a1..70cff7b49e17 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -309,7 +309,7 @@ _GLOBAL(enter_rtas) */ lbz r0,PACAIRQSOFTMASK(r13) 1: tdeqi r0,IRQS_ENABLED - EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING + EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif /* Hard-disable interrupts */ diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 39ab15419592..d8645efff902 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -237,7 +237,7 @@ _GLOBAL(copy_page) addi r3,r3,-4 0: twnei r5, 0 /* WARN if r3 is not cache aligned */ - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING + EMIT_WARN_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING addi r4,r4,-4 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c8f648727d36..51d4f5faf425 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1477,8 +1477,13 @@ static void do_program_check(struct pt_regs *regs) if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { - regs_add_return_ip(regs, 4); - return; + const struct exception_table_entry *entry; + + entry = search_exception_tables(bugaddr); + if (entry) { + regs_set_return_ip(regs, extable_fixup(entry) + regs->nip - bugaddr); + return; + } } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); return; diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 270a7df898e2..1209e1786af7 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -931,7 +931,7 @@ static void check_section(const char *modname, struct elf_info *elf, ".kprobes.text", ".cpuidle.text", ".noinstr.text" #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ ".fixup", ".entry.text", ".exception.text", ".text.*", \ - ".coldtext" + ".coldtext", ".softirqentry.text" #define INIT_SECTIONS ".init.*" #define MEM_INIT_SECTIONS ".meminit.*" diff --git a/tools/testing/selftests/powerpc/primitives/asm/extable.h b/tools/testing/selftests/powerpc/primitives/asm/extable.h new file mode 120000 index 000000000000..6385f059a951 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/asm/extable.h @@ -0,0 +1 @@ +../../../../../../arch/powerpc/include/asm/extable.h \ No newline at end of file -- cgit v1.2.3 From 1ee7943c33431e93faa49bf8fe38f1ad70c48705 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 20 Aug 2021 11:24:20 -0500 Subject: kbuild: Enable dtc 'pci_device_reg' warning by default There's only a couple of instances of the 'pci_device_reg' warnings left and they look legit, so let's enable the warning by default. Cc: Heiko Stuebner Cc: Nicolas Saenz Julienne Cc: soc@kernel.org Signed-off-by: Rob Herring Link: https://lore.kernel.org/r/20210820165011.3257112-1-robh@kernel.org/ --- scripts/Makefile.lib | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 10950559b223..c57f14ac6f99 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -309,8 +309,7 @@ DTC_FLAGS += -Wno-unit_address_vs_reg \ -Wno-alias_paths \ -Wno-graph_child_address \ -Wno-simple_bus_reg \ - -Wno-unique_unit_address \ - -Wno-pci_device_reg + -Wno-unique_unit_address endif ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),) -- cgit v1.2.3 From cc6711b0bf36de068b10490198d05ac168377989 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 26 Aug 2021 13:39:09 +0300 Subject: PCI / VFIO: Add 'override_only' support for VFIO PCI sub system Expose an 'override_only' helper macro (i.e. PCI_DRIVER_OVERRIDE_DEVICE_VFIO) for VFIO PCI sub system and add the required code to prefix its matching entries with "vfio_" in modules.alias file. It allows VFIO device drivers to include match entries in the modules.alias file produced by kbuild that are not used for normal driver autoprobing and module autoloading. Drivers using these match entries can be connected to the PCI device manually, by userspace, using the existing driver_override sysfs. For example the resulting modules.alias may have: alias pci:v000015B3d00001021sv*sd*bc*sc*i* mlx5_core alias vfio_pci:v000015B3d00001021sv*sd*bc*sc*i* mlx5_vfio_pci alias vfio_pci:v*d*sv*sd*bc*sc*i* vfio_pci In this example mlx5_core and mlx5_vfio_pci match to the same PCI device. The kernel will autoload and autobind to mlx5_core but the kernel and udev mechanisms will ignore mlx5_vfio_pci. When userspace wants to change a device to the VFIO subsystem it can implement a generic algorithm: 1) Identify the sysfs path to the device: /sys/devices/pci0000:00/0000:00:01.0/0000:01:00.0 2) Get the modalias string from the kernel: $ cat /sys/bus/pci/devices/0000:01:00.0/modalias pci:v000015B3d00001021sv000015B3sd00000001bc02sc00i00 3) Prefix it with vfio_: vfio_pci:v000015B3d00001021sv000015B3sd00000001bc02sc00i00 4) Search modules.alias for the above string and select the entry that has the fewest *'s: alias vfio_pci:v000015B3d00001021sv*sd*bc*sc*i* mlx5_vfio_pci 5) modprobe the matched module name: $ modprobe mlx5_vfio_pci 6) cat the matched module name to driver_override: echo mlx5_vfio_pci > /sys/bus/pci/devices/0000:01:00.0/driver_override 7) unbind device from original module echo 0000:01:00.0 > /sys/bus/pci/devices/0000:01:00.0/driver/unbind 8) probe PCI drivers (or explicitly bind to mlx5_vfio_pci) echo 0000:01:00.0 > /sys/bus/pci/drivers_probe The algorithm is independent of bus type. In future the other buses with VFIO device drivers, like platform and ACPI, can use this algorithm as well. This patch is the infrastructure to provide the information in the modules.alias to userspace. Convert the only VFIO pci_driver which results in one new line in the modules.alias: alias vfio_pci:v*d*sv*sd*bc*sc*i* vfio_pci Later series introduce additional HW specific VFIO PCI drivers, such as mlx5_vfio_pci. Signed-off-by: Max Gurtovoy Signed-off-by: Jason Gunthorpe Acked-by: Bjorn Helgaas # for pci.h Signed-off-by: Yishai Hadas Link: https://lore.kernel.org/r/20210826103912.128972-11-yishaih@nvidia.com Signed-off-by: Alex Williamson --- drivers/vfio/pci/vfio_pci.c | 9 ++++++++- include/linux/mod_devicetable.h | 4 ++++ include/linux/pci.h | 14 ++++++++++++++ scripts/mod/devicetable-offsets.c | 1 + scripts/mod/file2alias.c | 17 +++++++++++++++-- 5 files changed, 42 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 163e560c4495..85fd638a5955 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -178,9 +178,16 @@ static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn) return vfio_pci_core_sriov_configure(pdev, nr_virtfn); } +static const struct pci_device_id vfio_pci_table[] = { + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_ANY_ID, PCI_ANY_ID) }, /* match all by default */ + {} +}; + +MODULE_DEVICE_TABLE(pci, vfio_pci_table); + static struct pci_driver vfio_pci_driver = { .name = "vfio-pci", - .id_table = NULL, /* only dynamic ids */ + .id_table = vfio_pci_table, .probe = vfio_pci_probe, .remove = vfio_pci_remove, .sriov_configure = vfio_pci_sriov_configure, diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 2e3ba6d9ece0..ae2e75d15b21 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -16,6 +16,10 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) +enum { + PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1, +}; + /** * struct pci_device_id - PCI device ID structure * @vendor: Vendor ID to match (or PCI_ANY_ID) diff --git a/include/linux/pci.h b/include/linux/pci.h index 0506b1a8c921..527a1dfd1d06 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -916,6 +916,20 @@ struct pci_driver { .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, .override_only = (driver_override) +/** + * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO + * "driver_override" PCI device. + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID and the driver_override will be set to + * PCI_ID_F_VFIO_DRIVER_OVERRIDE. + */ +#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ + PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) + /** * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem * @vend: the 16 bit PCI Vendor ID diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index 9bb6c7edccc4..cc3625617a0e 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -42,6 +42,7 @@ int main(void) DEVID_FIELD(pci_device_id, subdevice); DEVID_FIELD(pci_device_id, class); DEVID_FIELD(pci_device_id, class_mask); + DEVID_FIELD(pci_device_id, override_only); DEVID(ccw_device_id); DEVID_FIELD(ccw_device_id, match_flags); diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 7c97fa8e36bc..49aba862073e 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -426,7 +426,7 @@ static int do_ieee1394_entry(const char *filename, return 1; } -/* Looks like: pci:vNdNsvNsdNbcNscNiN. */ +/* Looks like: pci:vNdNsvNsdNbcNscNiN or _pci:vNdNsvNsdNbcNscNiN. */ static int do_pci_entry(const char *filename, void *symval, char *alias) { @@ -440,8 +440,21 @@ static int do_pci_entry(const char *filename, DEF_FIELD(symval, pci_device_id, subdevice); DEF_FIELD(symval, pci_device_id, class); DEF_FIELD(symval, pci_device_id, class_mask); + DEF_FIELD(symval, pci_device_id, override_only); + + switch (override_only) { + case 0: + strcpy(alias, "pci:"); + break; + case PCI_ID_F_VFIO_DRIVER_OVERRIDE: + strcpy(alias, "vfio_pci:"); + break; + default: + warn("Unknown PCI driver_override alias %08X\n", + override_only); + return 0; + } - strcpy(alias, "pci:"); ADD(alias, "v", vendor != PCI_ANY_ID, vendor); ADD(alias, "d", device != PCI_ANY_ID, device); ADD(alias, "sv", subvendor != PCI_ANY_ID, subvendor); -- cgit v1.2.3 From 5f6e0fe01b6b33894cf6f61b359ab5a6d2b7674e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Jun 2021 11:03:31 +0900 Subject: parisc: Fix compile failure when building 64-bit kernel natively Commit 23243c1ace9f ("arch: use cross_compiling to check whether it is a cross build or not") broke 64-bit parisc builds on 32-bit parisc systems. Helge mentioned: - 64-bit parisc userspace is not supported yet [1] - hppa gcc does not support "-m64" flag [2] That means, parisc developers working on a 32-bit parisc machine need to use hppa64-linux-gnu-gcc (cross compiler) for building the 64-bit parisc kernel. After the offending commit, gcc is used in such a case because both $(SRCARCH) and $(SUBARCH) are 'parisc', hence cross_compiling is unset. A correct way is to introduce ARCH=parisc64 because building the 64-bit parisc kernel on a 32-bit parisc system is not exactly a native build, but rather a semi-cross build. [1]: https://lore.kernel.org/linux-parisc/5dfd81eb-c8ca-b7f5-e80e-8632767c022d@gmx.de/#t [2]: https://lore.kernel.org/linux-parisc/89515325-fc21-31da-d238-6f7a9abbf9a0@gmx.de/ Fixes: 23243c1ace9f ("arch: use cross_compiling to check whether it is a cross build or not") Signed-off-by: Masahiro Yamada Reported-by: Meelis Roos Tested-by: Meelis Roos Cc: # v5.13+ Signed-off-by: Helge Deller --- Makefile | 5 +++++ arch/parisc/Makefile | 6 +++--- scripts/subarch.include | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/Makefile b/Makefile index 61741e9d9c6e..79284e3383c2 100644 --- a/Makefile +++ b/Makefile @@ -404,6 +404,11 @@ ifeq ($(ARCH),sparc64) SRCARCH := sparc endif +# Additional ARCH settings for parisc +ifeq ($(ARCH),parisc64) + SRCARCH := parisc +endif + export cross_compiling := ifneq ($(SRCARCH),$(SUBARCH)) cross_compiling := 1 diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 6c27940eff03..fcde3ffa0221 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -25,18 +25,18 @@ CHECKFLAGS += -D__hppa__=1 ifdef CONFIG_64BIT UTS_MACHINE := parisc64 CHECKFLAGS += -D__LP64__=1 -CC_ARCHES = hppa64 LD_BFD := elf64-hppa-linux else # 32-bit -CC_ARCHES = hppa hppa2.0 hppa1.1 LD_BFD := elf32-hppa-linux endif # select defconfig based on actual architecture -ifeq ($(shell uname -m),parisc64) +ifeq ($(ARCH),parisc64) KBUILD_DEFCONFIG := generic-64bit_defconfig + CC_ARCHES := hppa64 else KBUILD_DEFCONFIG := generic-32bit_defconfig + CC_ARCHES := hppa hppa2.0 hppa1.1 endif export LD_BFD diff --git a/scripts/subarch.include b/scripts/subarch.include index 650682821126..776849a3c500 100644 --- a/scripts/subarch.include +++ b/scripts/subarch.include @@ -7,7 +7,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ -e s/sun4u/sparc64/ \ -e s/arm.*/arm/ -e s/sa110/arm/ \ - -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/s390x/s390/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ -e s/riscv.*/riscv/) -- cgit v1.2.3 From 850ded46c64299f04d15e39caaba21963fb966f8 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 16 Aug 2021 11:05:19 -0700 Subject: kbuild: Fix TRIM_UNUSED_KSYMS with LTO_CLANG With CONFIG_LTO_CLANG, we currently link modules into native code just before modpost, which means with TRIM_UNUSED_KSYMS enabled, we still look at the LLVM bitcode in the .o files when generating the list of used symbols. As the bitcode doesn't yet have calls to compiler intrinsics and llvm-nm doesn't see function references that only exist in function-level inline assembly, we currently need a whitelist for TRIM_UNUSED_KSYMS to work with LTO. This change moves module LTO linking to happen earlier, and thus avoids the issue with LLVM bitcode and TRIM_UNUSED_KSYMS entirely, allowing us to also drop the whitelist from gen_autoksyms.sh. Link: https://github.com/ClangBuiltLinux/linux/issues/1369 Signed-off-by: Sami Tolvanen Reviewed-by: Alexander Lobakin Tested-by: Alexander Lobakin Signed-off-by: Masahiro Yamada --- scripts/Makefile.build | 27 ++++++++++++++++++++++++++- scripts/Makefile.lib | 7 +++++++ scripts/Makefile.modfinal | 21 ++------------------- scripts/Makefile.modpost | 22 +++------------------- scripts/gen_autoksyms.sh | 12 ------------ 5 files changed, 38 insertions(+), 51 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 02197cb8e3a7..ea549579bfb7 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -88,6 +88,10 @@ endif targets-for-modules := $(patsubst %.o, %.mod, $(filter %.o, $(obj-m))) +ifdef CONFIG_LTO_CLANG +targets-for-modules += $(patsubst %.o, %.lto.o, $(filter %.o, $(obj-m))) +endif + ifdef need-modorder targets-for-modules += $(obj)/modules.order endif @@ -271,12 +275,33 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE $(call if_changed_rule,cc_o_c) $(call cmd,force_checksrc) +ifdef CONFIG_LTO_CLANG +# Module .o files may contain LLVM bitcode, compile them into native code +# before ELF processing +quiet_cmd_cc_lto_link_modules = LTO [M] $@ +cmd_cc_lto_link_modules = \ + $(LD) $(ld_flags) -r -o $@ \ + $(shell [ -s $(@:.lto.o=.o.symversions) ] && \ + echo -T $(@:.lto.o=.o.symversions)) \ + --whole-archive $(filter-out FORCE,$^) + +ifdef CONFIG_STACK_VALIDATION +# objtool was skipped for LLVM bitcode, run it now that we have compiled +# modules into native code +cmd_cc_lto_link_modules += ; \ + $(objtree)/tools/objtool/objtool $(objtool_args) --module $@ +endif + +$(obj)/%.lto.o: $(obj)/%.o FORCE + $(call if_changed,cc_lto_link_modules) +endif + cmd_mod = { \ echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \ $(undefined_syms) echo; \ } > $@ -$(obj)/%.mod: $(obj)/%.o FORCE +$(obj)/%.mod: $(obj)/%$(mod-prelink-ext).o FORCE $(call if_changed,mod) quiet_cmd_cc_lst_c = MKLST $@ diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 10950559b223..af1c920a585c 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -225,6 +225,13 @@ dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \ $(addprefix -I,$(DTC_INCLUDE)) \ -undef -D__DTS__ +ifeq ($(CONFIG_LTO_CLANG),y) +# With CONFIG_LTO_CLANG, .o files in modules might be LLVM bitcode, so we +# need to run LTO to compile them into native code (.lto.o) before further +# processing. +mod-prelink-ext := .lto +endif + # Objtool arguments are also needed for modfinal with LTO, so we define # then here to avoid duplication. objtool_args = \ diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index 5e9b8057fb24..ff805777431c 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -9,7 +9,7 @@ __modfinal: include include/config/auto.conf include $(srctree)/scripts/Kbuild.include -# for c_flags and objtool_args +# for c_flags and mod-prelink-ext include $(srctree)/scripts/Makefile.lib # find all modules listed in modules.order @@ -30,23 +30,6 @@ quiet_cmd_cc_o_c = CC [M] $@ ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) -ifdef CONFIG_LTO_CLANG -# With CONFIG_LTO_CLANG, reuse the object file we compiled for modpost to -# avoid a second slow LTO link -prelink-ext := .lto - -# ELF processing was skipped earlier because we didn't have native code, -# so let's now process the prelinked binary before we link the module. - -ifdef CONFIG_STACK_VALIDATION -cmd_ld_ko_o += \ - $(objtree)/tools/objtool/objtool $(objtool_args) \ - $(@:.ko=$(prelink-ext).o); - -endif # CONFIG_STACK_VALIDATION - -endif # CONFIG_LTO_CLANG - quiet_cmd_ld_ko_o = LD [M] $@ cmd_ld_ko_o += \ $(LD) -r $(KBUILD_LDFLAGS) \ @@ -72,7 +55,7 @@ if_changed_except = $(if $(call newer_prereqs_except,$(2))$(cmd-check), \ # Re-generate module BTFs if either module's .ko or vmlinux changed -$(modules): %.ko: %$(prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE +$(modules): %.ko: %$(mod-prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE +$(call if_changed_except,ld_ko_o,vmlinux) ifdef CONFIG_DEBUG_INFO_BTF_MODULES +$(if $(newer-prereqs),$(call cmd,btf_ko)) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index c383ba33d837..eef56d629799 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -41,7 +41,7 @@ __modpost: include include/config/auto.conf include $(srctree)/scripts/Kbuild.include -# for ld_flags +# for mod-prelink-ext include $(srctree)/scripts/Makefile.lib MODPOST = scripts/mod/modpost \ @@ -118,22 +118,6 @@ $(input-symdump): @echo >&2 ' Modules may not have dependencies or modversions.' @echo >&2 ' You may get many unresolved symbol warnings.' -ifdef CONFIG_LTO_CLANG -# With CONFIG_LTO_CLANG, .o files might be LLVM bitcode, so we need to run -# LTO to compile them into native code before running modpost -prelink-ext := .lto - -quiet_cmd_cc_lto_link_modules = LTO [M] $@ -cmd_cc_lto_link_modules = \ - $(LD) $(ld_flags) -r -o $@ \ - $(shell [ -s $(@:.lto.o=.o.symversions) ] && \ - echo -T $(@:.lto.o=.o.symversions)) \ - --whole-archive $^ - -%.lto.o: %.o - $(call if_changed,cc_lto_link_modules) -endif - modules := $(sort $(shell cat $(MODORDER))) # KBUILD_MODPOST_WARN can be set to avoid error out in case of undefined symbols @@ -144,9 +128,9 @@ endif # Read out modules.order to pass in modpost. # Otherwise, allmodconfig would fail with "Argument list too long". quiet_cmd_modpost = MODPOST $@ - cmd_modpost = sed 's/\.ko$$/$(prelink-ext)\.o/' $< | $(MODPOST) -T - + cmd_modpost = sed 's/\.ko$$/$(mod-prelink-ext)\.o/' $< | $(MODPOST) -T - -$(output-symdump): $(MODORDER) $(input-symdump) $(modules:.ko=$(prelink-ext).o) FORCE +$(output-symdump): $(MODORDER) $(input-symdump) $(modules:.ko=$(mod-prelink-ext).o) FORCE $(call if_changed,modpost) targets += $(output-symdump) diff --git a/scripts/gen_autoksyms.sh b/scripts/gen_autoksyms.sh index da320151e7c3..6ed0d225c8b1 100755 --- a/scripts/gen_autoksyms.sh +++ b/scripts/gen_autoksyms.sh @@ -26,18 +26,6 @@ if [ -n "$CONFIG_MODVERSIONS" ]; then needed_symbols="$needed_symbols module_layout" fi -# With CONFIG_LTO_CLANG, LLVM bitcode has not yet been compiled into a binary -# when the .mod files are generated, which means they don't yet contain -# references to certain symbols that will be present in the final binaries. -if [ -n "$CONFIG_LTO_CLANG" ]; then - # intrinsic functions - needed_symbols="$needed_symbols memcpy memmove memset" - # ftrace - needed_symbols="$needed_symbols _mcount" - # stack protector symbols - needed_symbols="$needed_symbols __stack_chk_fail __stack_chk_guard" -fi - ksym_wl= if [ -n "$CONFIG_UNUSED_KSYMS_WHITELIST" ]; then # Use 'eval' to expand the whitelist path and check if it is relative -- cgit v1.2.3 From 6796e80409b9031458e33dc39a3ac8aa3b93855b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 13 Aug 2021 15:30:04 +0900 Subject: kbuild: macrofy the condition of if_changed and friends Add a new macro that expands into $(newer-prereqs)$(cmd-check). It makes it easier to add common code for if_changed, if_changed_dep, and if_changed_rule. Signed-off-by: Masahiro Yamada --- scripts/Kbuild.include | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index f247e691562d..c3c975a92318 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -130,13 +130,15 @@ make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))) # PHONY targets skipped in both cases. newer-prereqs = $(filter-out $(PHONY),$?) +if-changed-cond = $(newer-prereqs)$(cmd-check) + # Execute command if command has changed or prerequisite(s) are updated. -if_changed = $(if $(newer-prereqs)$(cmd-check), \ +if_changed = $(if $(if-changed-cond), \ $(cmd); \ printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:) # Execute the command and also postprocess generated .d dependencies file. -if_changed_dep = $(if $(newer-prereqs)$(cmd-check),$(cmd_and_fixdep),@:) +if_changed_dep = $(if $(if-changed-cond),$(cmd_and_fixdep),@:) cmd_and_fixdep = \ $(cmd); \ @@ -146,7 +148,7 @@ cmd_and_fixdep = \ # Usage: $(call if_changed_rule,foo) # Will check if $(cmd_foo) or any of the prerequisites changed, # and if so will execute $(rule_foo). -if_changed_rule = $(if $(newer-prereqs)$(cmd-check),$(rule_$(1)),@:) +if_changed_rule = $(if $(if-changed-cond),$(rule_$(1)),@:) ### # why - tell why a target got built -- cgit v1.2.3 From e1f86d7b4b2a5213b012c2b4fe3e5b6ad537686e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 13 Aug 2021 15:30:05 +0900 Subject: kbuild: warn if FORCE is missing for if_changed(_dep,_rule) and filechk if_changed, if_changed_dep, and if_changed_rule must have FORCE as a prerequisite so the command line change is detected. Documentation/kbuild/makefiles.rst clearly explains it: Note: It is a typical mistake to forget the FORCE prerequisite. However, not all people follow the document. This mistake occurred again and again, so a compelling force is needed. Show a warning if FORCE is missing in the prerequisite of if_changed and friends. Same for filechk. Signed-off-by: Masahiro Yamada Tested-by: Nicolas Schier --- scripts/Kbuild.include | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c3c975a92318..cdec22088423 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -57,6 +57,7 @@ kecho := $($(quiet)kecho) # - If the content differ the new file is used # - If they are equal no change, and no timestamp update define filechk + $(check-FORCE) $(Q)set -e; \ mkdir -p $(dir $@); \ trap "rm -f $(dot-target).tmp" EXIT; \ @@ -130,7 +131,11 @@ make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))) # PHONY targets skipped in both cases. newer-prereqs = $(filter-out $(PHONY),$?) -if-changed-cond = $(newer-prereqs)$(cmd-check) +# It is a typical mistake to forget the FORCE prerequisite. Check it here so +# no more breakage will slip in. +check-FORCE = $(if $(filter FORCE, $^),,$(warning FORCE prerequisite is missing)) + +if-changed-cond = $(newer-prereqs)$(cmd-check)$(check-FORCE) # Execute command if command has changed or prerequisite(s) are updated. if_changed = $(if $(if-changed-cond), \ -- cgit v1.2.3 From 2185a7e4b0ade86c2c57fc63d4a7535c40254bd0 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 16 Aug 2021 13:52:47 -0700 Subject: kbuild: Switch to 'f' variants of integrated assembler flag It has been brought up a few times in various code reviews that clang 3.5 introduced -f{,no-}integrated-as as the preferred way to enable and disable the integrated assembler, mentioning that -{no-,}integrated-as are now considered legacy flags. Switch the kernel over to using those variants in case there is ever a time where clang decides to remove the non-'f' variants of the flag. Also, fix a typo in a comment ("intergrated" -> "integrated"). Link: https://releases.llvm.org/3.5.0/tools/clang/docs/ReleaseNotes.html#new-compiler-flags Reviewed-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Masahiro Yamada --- scripts/Makefile.clang | 4 ++-- scripts/as-version.sh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 3ae63bd35582..4cce8fd0779c 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -23,11 +23,11 @@ CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) endif # CROSS_COMPILE ifeq ($(LLVM_IAS),0) -CLANG_FLAGS += -no-integrated-as +CLANG_FLAGS += -fno-integrated-as GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) else -CLANG_FLAGS += -integrated-as +CLANG_FLAGS += -fintegrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) diff --git a/scripts/as-version.sh b/scripts/as-version.sh index 8b9410e329df..1a21495e9ff0 100755 --- a/scripts/as-version.sh +++ b/scripts/as-version.sh @@ -21,14 +21,14 @@ get_canonical_version() echo $((10000 * $1 + 100 * ${2:-0} + ${3:-0})) } -# Clang fails to handle -Wa,--version unless -no-integrated-as is given. -# We check -(f)integrated-as, expecting it is explicitly passed in for the +# Clang fails to handle -Wa,--version unless -fno-integrated-as is given. +# We check -fintegrated-as, expecting it is explicitly passed in for the # integrated assembler case. check_integrated_as() { while [ $# -gt 0 ]; do - if [ "$1" = -integrated-as -o "$1" = -fintegrated-as ]; then - # For the intergrated assembler, we do not check the + if [ "$1" = -fintegrated-as ]; then + # For the integrated assembler, we do not check the # version here. It is the same as the clang version, and # it has been already checked by scripts/cc-version.sh. echo LLVM 0 -- cgit v1.2.3 From 52d83df682c82055961531853c066f4f16e234ea Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:01:14 +0900 Subject: kbuild: Fix 'no symbols' warning when CONFIG_TRIM_UNUSD_KSYMS=y When CONFIG_TRIM_UNUSED_KSYMS is enabled, I see some warnings like this: nm: arch/x86/entry/vdso/vdso32/note.o: no symbols $NM (both GNU nm and llvm-nm) warns when no symbol is found in the object. Suppress the stderr. Fangrui Song mentioned binutils>=2.37 `nm -q` can be used to suppress "no symbols" [1], and llvm-nm>=13.0.0 supports -q as well. We cannot use it for now, but note it as a TODO. [1]: https://sourceware.org/bugzilla/show_bug.cgi?id=27408 Fixes: bbda5ec671d3 ("kbuild: simplify dependency generation for CONFIG_TRIM_UNUSED_KSYMS") Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor --- scripts/gen_ksymdeps.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh index 1324986e1362..725e8c9c1b53 100755 --- a/scripts/gen_ksymdeps.sh +++ b/scripts/gen_ksymdeps.sh @@ -4,7 +4,13 @@ set -e # List of exported symbols -ksyms=$($NM $1 | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z) +# +# If the object has no symbol, $NM warns 'no symbols'. +# Suppress the stderr. +# TODO: +# Use -q instead of 2>/dev/null when we upgrade the minimum version of +# binutils to 2.37, llvm to 13.0.0. +ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z) if [ -z "$ksyms" ]; then exit 0 -- cgit v1.2.3 From 265264b814c2121513eab2e1cffe196502af0961 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:33 +0900 Subject: gen_compile_commands: extract compiler command from a series of commands The current gen_compile_commands.py assumes that objects are always built by a single command. It makes sense to support cases where objects are built by a series of commands: cmd_ := ; One use-case is that is a compiler command, and an objtool command. It allows *.cmd files to contain an objtool command so that any change in it triggers object rebuilds. If ; appears after the C source file, take the first command. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/clang-tools/gen_compile_commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py index b7e9ecf16e56..0033eedce003 100755 --- a/scripts/clang-tools/gen_compile_commands.py +++ b/scripts/clang-tools/gen_compile_commands.py @@ -18,7 +18,7 @@ _DEFAULT_OUTPUT = 'compile_commands.json' _DEFAULT_LOG_LEVEL = 'WARNING' _FILENAME_PATTERN = r'^\..*\.cmd$' -_LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$' +_LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c) *(;|$)' _VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] # The tools/ directory adopts a different build system, and produces .cmd # files in a different format. Do not support it. -- cgit v1.2.3 From f01ac2a15218f3282b0b09b7ef7f314cbd7dd2b3 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:35 +0900 Subject: kbuild: remove unused quiet_cmd_update_lto_symversions This is not used anywhere because the short log is displayed when it is used through a $(call cmd,...) invocation. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/Makefile.build | 1 - 1 file changed, 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Makefile.build b/scripts/Makefile.build index ea549579bfb7..6961b4acacf8 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -406,7 +406,6 @@ $(subdir-builtin): $(obj)/%/built-in.a: $(obj)/% ; $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ; # combine symversions for later processing -quiet_cmd_update_lto_symversions = SYMVER $@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y) cmd_update_lto_symversions = \ rm -f $@.symversions \ -- cgit v1.2.3 From a8390ba9ddce15242a41ca04d097b75fd54fd63f Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:36 +0900 Subject: kbuild: remove stale *.symversions cmd_update_lto_symversions merges all the existing *.symversions, but some of them might be stale. If the last EXPORT_SYMBOL is removed from a C file, the *.symversions file is not deleted or updated. It contains stale CRCs, but still they will be used for linking the vmlinux or modules. It is not a big deal when the EXPORT_SYMBOL is really removed. However, when the EXPORT_SYMBOL is moved to another file, the same __crc_ will appear twice in the merged *.symversions, possibly with different CRCs if the function argument is changed at the same time. It would confuse module versioning. If no EXPORT_SYMBOL is found, let's remove *.symversions explicitly. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/Makefile.build | 2 ++ 1 file changed, 2 insertions(+) (limited to 'scripts') diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 6961b4acacf8..3efc984d4c69 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -177,6 +177,8 @@ cmd_modversions_c = \ if $(NM) $@ 2>/dev/null | grep -q __ksymtab; then \ $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ > $@.symversions; \ + else \ + rm -f $@.symversions; \ fi; else cmd_modversions_c = \ -- cgit v1.2.3 From 8f1305124ea48943d1dd07683ed4f82c69b232ee Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:37 +0900 Subject: kbuild: merge vmlinux_link() between the ordinary link and Clang LTO When Clang LTO is enabled, vmlinux_link() reuses vmlinux.o instead of re-linking ${KBUILD_VMLINUX_OBJS} and ${KBUILD_VMLINUX_LIBS}. That is the only difference here, so merge the similar code. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/link-vmlinux.sh | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'scripts') diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 36ef7b37fc5d..a6c4d0bce3ba 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -154,12 +154,23 @@ vmlinux_link() local objects local strip_debug local map_option + local objs + local libs info LD ${output} # skip output file argument shift + if [ -n "${CONFIG_LTO_CLANG}" ]; then + # Use vmlinux.o instead of performing the slow LTO link again. + objs=vmlinux.o + libs= + else + objs="${KBUILD_VMLINUX_OBJS}" + libs="${KBUILD_VMLINUX_LIBS}" + fi + # The kallsyms linking does not need debug symbols included. if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then strip_debug=-Wl,--strip-debug @@ -170,22 +181,9 @@ vmlinux_link() fi if [ "${SRCARCH}" != "um" ]; then - if [ -n "${CONFIG_LTO_CLANG}" ]; then - # Use vmlinux.o instead of performing the slow LTO - # link again. - objects="--whole-archive \ - vmlinux.o \ - --no-whole-archive \ - ${@}" - else - objects="--whole-archive \ - ${KBUILD_VMLINUX_OBJS} \ - --no-whole-archive \ - --start-group \ - ${KBUILD_VMLINUX_LIBS} \ - --end-group \ - ${@}" - fi + objects="--whole-archive ${objs} --no-whole-archive \ + --start-group ${libs} --end-group \ + $@" ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ ${strip_debug#-Wl,} \ -- cgit v1.2.3 From d40aecd108d2a6413d53f6f8339e787a23150595 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:38 +0900 Subject: kbuild: do not remove 'linux' link in scripts/link-vmlinux.sh arch/um/Makefile passes the -f option to the ln command: linux: vmlinux @echo ' LINK $@' $(Q)ln -f $< $@ So, the hard link is always re-created, and the old one is removed anyway. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/link-vmlinux.sh | 1 - 1 file changed, 1 deletion(-) (limited to 'scripts') diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index a6c4d0bce3ba..7b9c62e4d54a 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -206,7 +206,6 @@ vmlinux_link() -Wl,-T,${lds} \ ${objects} \ -lutil -lrt -lpthread - rm -f linux fi } -- cgit v1.2.3 From 5df77ad61fd71b1b342ba40d913ad5595480691c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 19 Aug 2021 09:57:39 +0900 Subject: kbuild: merge vmlinux_link() between ARCH=um and other architectures For ARCH=um, ${CC} is used as the linker driver. Hence, the linker options are prefixed with -Wl, . Merge the similar code. I replaced the -T option with the long option --script= so that it works well with/without ${wl}. Signed-off-by: Masahiro Yamada Reviewed-by: Kees Cook --- scripts/link-vmlinux.sh | 56 ++++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 33 deletions(-) (limited to 'scripts') diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 7b9c62e4d54a..d74cee5c4326 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -149,13 +149,12 @@ objtool_link() # ${2}, ${3}, ... - optional extra .o files vmlinux_link() { - local lds="${objtree}/${KBUILD_LDS}" local output=${1} - local objects - local strip_debug - local map_option local objs local libs + local ld + local ldflags + local ldlibs info LD ${output} @@ -171,42 +170,33 @@ vmlinux_link() libs="${KBUILD_VMLINUX_LIBS}" fi + if [ "${SRCARCH}" = "um" ]; then + wl=-Wl, + ld="${CC}" + ldflags="${CFLAGS_vmlinux}" + ldlibs="-lutil -lrt -lpthread" + else + wl= + ld="${LD}" + ldflags="${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux}" + ldlibs= + fi + + ldflags="${ldflags} ${wl}--script=${objtree}/${KBUILD_LDS}" + # The kallsyms linking does not need debug symbols included. if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then - strip_debug=-Wl,--strip-debug + ldflags="${ldflags} ${wl}--strip-debug" fi if [ -n "${CONFIG_VMLINUX_MAP}" ]; then - map_option="-Map=${output}.map" + ldflags="${ldflags} ${wl}-Map=${output}.map" fi - if [ "${SRCARCH}" != "um" ]; then - objects="--whole-archive ${objs} --no-whole-archive \ - --start-group ${libs} --end-group \ - $@" - - ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \ - ${strip_debug#-Wl,} \ - -o ${output} \ - ${map_option} \ - -T ${lds} ${objects} - else - objects="-Wl,--whole-archive \ - ${KBUILD_VMLINUX_OBJS} \ - -Wl,--no-whole-archive \ - -Wl,--start-group \ - ${KBUILD_VMLINUX_LIBS} \ - -Wl,--end-group \ - ${@}" - - ${CC} ${CFLAGS_vmlinux} \ - ${strip_debug} \ - -o ${output} \ - ${map_option:+-Wl,${map_option}} \ - -Wl,-T,${lds} \ - ${objects} \ - -lutil -lrt -lpthread - fi + ${ld} ${ldflags} -o ${output} \ + ${wl}--whole-archive ${objs} ${wl}--no-whole-archive \ + ${wl}--start-group ${libs} ${wl}--end-group \ + $@ ${ldlibs} } # generate .BTF typeinfo from DWARF debuginfo -- cgit v1.2.3 From 1439ebd2ce77242400518d4e6a1e85bebcd8084f Mon Sep 17 00:00:00 2001 From: Ariel Marcovitch Date: Sun, 22 Aug 2021 22:22:01 +0300 Subject: checkkconfigsymbols.py: Fix the '--ignore' option It seems like the implementation of the --ignore option is broken. In check_symbols_helper, when going through the list of files, a file is added to the list of source files to check if it matches the ignore pattern. Instead, as stated in the comment below this condition, the file should be added if it doesn't match the pattern. This means that when providing an ignore pattern, the only files that will be checked will be the ones we want the ignore, in addition to the Kconfig files that don't match the pattern (the check in parse_kconfig_files is done right) Signed-off-by: Ariel Marcovitch Signed-off-by: Masahiro Yamada --- scripts/checkkconfigsymbols.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index 1548f9ce4682..b9b0f15e5880 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -329,7 +329,7 @@ def check_symbols_helper(pool, ignore): if REGEX_FILE_KCONFIG.match(gitfile): kconfig_files.append(gitfile) else: - if ignore and not re.match(ignore, gitfile): + if ignore and re.match(ignore, gitfile): continue # add source files that do not match the ignore pattern source_files.append(gitfile) -- cgit v1.2.3 From e54dd93a08228b9942d708b133ad3715d92712b0 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 28 Aug 2021 18:50:59 +0900 Subject: modpost: get the *.mod file path more simply get_src_version() strips 'o' or 'lto.o' from the end of the object file path (so, postfixlen is 1 or 5), then adds 'mod'. If you look at the code closely, mod->name already holds the base path with the extension stripped. Most of the code changes made by commit 7ac204b545f2 ("modpost: lto: strip .lto from module names") was actually unneeded. sumversion.c does not need strends(), so it can get back local in modpost.c again. Signed-off-by: Masahiro Yamada --- scripts/mod/modpost.c | 11 ++++++++++- scripts/mod/modpost.h | 9 --------- scripts/mod/sumversion.c | 7 +------ 3 files changed, 11 insertions(+), 16 deletions(-) (limited to 'scripts') diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 270a7df898e2..a26139aa57fd 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "modpost.h" #include "../../include/linux/license.h" @@ -89,6 +90,14 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...) error_occurred = true; } +static inline bool strends(const char *str, const char *postfix) +{ + if (strlen(str) < strlen(postfix)) + return false; + + return strcmp(str + strlen(str) - strlen(postfix), postfix) == 0; +} + void *do_nofail(void *ptr, const char *expr) { if (!ptr) @@ -2060,7 +2069,7 @@ static void read_symbols(const char *modname) if (!mod->is_vmlinux) { version = get_modinfo(&info, "version"); if (version || all_versions) - get_src_version(modname, mod->srcversion, + get_src_version(mod->name, mod->srcversion, sizeof(mod->srcversion) - 1); } diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index c1a895c0d682..0c47ff95c0e2 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include @@ -178,14 +177,6 @@ static inline unsigned int get_secindex(const struct elf_info *info, return info->symtab_shndx_start[sym - info->symtab_start]; } -static inline bool strends(const char *str, const char *postfix) -{ - if (strlen(str) < strlen(postfix)) - return false; - - return strcmp(str + strlen(str) - strlen(postfix), postfix) == 0; -} - /* file2alias.c */ extern unsigned int cross_build; void handle_moddevtable(struct module *mod, struct elf_info *info, diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c index 760e6baa7eda..905c0ec291e1 100644 --- a/scripts/mod/sumversion.c +++ b/scripts/mod/sumversion.c @@ -391,14 +391,9 @@ void get_src_version(const char *modname, char sum[], unsigned sumlen) struct md4_ctx md; char *fname; char filelist[PATH_MAX + 1]; - int postfix_len = 1; - - if (strends(modname, ".lto.o")) - postfix_len = 5; /* objects for a module are listed in the first line of *.mod file. */ - snprintf(filelist, sizeof(filelist), "%.*smod", - (int)strlen(modname) - postfix_len, modname); + snprintf(filelist, sizeof(filelist), "%s.mod", modname); buf = read_text_file(filelist); -- cgit v1.2.3 From 44815c90210cd858c4a116c3eb35270160cecf81 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 28 Aug 2021 18:51:01 +0900 Subject: kbuild: clean up objtool_args slightly The code: $(if $(or $(CONFIG_GCOV_KERNEL),$(CONFIG_LTO_CLANG)), ...) ... can be simpled to: $(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), ...) Also, remove meaningless commas at the end of $(if ...). Signed-off-by: Masahiro Yamada --- scripts/Makefile.lib | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'scripts') diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index af1c920a585c..cd011f3f6f78 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -236,13 +236,12 @@ endif # then here to avoid duplication. objtool_args = \ $(if $(CONFIG_UNWINDER_ORC),orc generate,check) \ - $(if $(part-of-module), --module,) \ + $(if $(part-of-module), --module) \ $(if $(CONFIG_FRAME_POINTER),, --no-fp) \ - $(if $(or $(CONFIG_GCOV_KERNEL),$(CONFIG_LTO_CLANG)), \ - --no-unreachable,) \ - $(if $(CONFIG_RETPOLINE), --retpoline,) \ - $(if $(CONFIG_X86_SMAP), --uaccess,) \ - $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount,) + $(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\ + $(if $(CONFIG_RETPOLINE), --retpoline) \ + $(if $(CONFIG_X86_SMAP), --uaccess) \ + $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) # Useful for describing the dependency of composite objects # Usage: -- cgit v1.2.3 From bc7cd2dd1f8e5889cc68b69984033ac5bef6ba61 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 30 Aug 2021 17:20:33 +0900 Subject: kbuild: redo fake deps at include/ksym/*.h Commit 0e0345b77ac4 ("kbuild: redo fake deps at include/config/*.h") simplified the Kconfig/fixdep interaction a lot. For CONFIG_FOO_BAR_BAZ, Kconfig now touches include/config/FOO_BAR_BAZ instead of the previous include/config/foo/bar/baz.h . This commit simplifies the TRIM_UNUSED_KSYMS feature in a similar way: - delete .h suffix - delete tolower() - put everything in 1 directory For EXPORT_SYMBOL(FOO_BAR_BAZ), scripts/adjust_autoksyms.sh now touches include/ksym/FOO_BAR_BAZ instead of include/ksym/foo/bar/baz.h . This is more precise, avoiding possibly unnecessary rebuilds. EXPORT_SYMBOL(FOO_BAR_BAZ) EXPORT_SYMBOL(_FOO_BAR_BAZ) EXPORT_SYMBOL(__FOO_BAR_BAZ) were previously mapped to the same header, include/ksym/foo/bar/baz.h but now are handled separately. Signed-off-by: Masahiro Yamada --- scripts/adjust_autoksyms.sh | 4 ++-- scripts/gen_ksymdeps.sh | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'scripts') diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh index d8f6f9c63043..59fdb875e818 100755 --- a/scripts/adjust_autoksyms.sh +++ b/scripts/adjust_autoksyms.sh @@ -42,10 +42,10 @@ $CONFIG_SHELL $srctree/scripts/gen_autoksyms.sh "$new_ksyms_file" changed=$( count=0 sort "$cur_ksyms_file" "$new_ksyms_file" | uniq -u | -sed -n 's/^#define __KSYM_\(.*\) 1/\1/p' | tr "A-Z_" "a-z/" | +sed -n 's/^#define __KSYM_\(.*\) 1/\1/p' | while read sympath; do if [ -z "$sympath" ]; then continue; fi - depfile="include/ksym/${sympath}.h" + depfile="include/ksym/${sympath}" mkdir -p "$(dirname "$depfile")" touch "$depfile" # Filesystems with coarse time precision may create timestamps diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh index 725e8c9c1b53..8ee533f33659 100755 --- a/scripts/gen_ksymdeps.sh +++ b/scripts/gen_ksymdeps.sh @@ -10,7 +10,7 @@ set -e # TODO: # Use -q instead of 2>/dev/null when we upgrade the minimum version of # binutils to 2.37, llvm to 13.0.0. -ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z) +ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p') if [ -z "$ksyms" ]; then exit 0 @@ -21,8 +21,7 @@ echo "ksymdeps_$1 := \\" for s in $ksyms do - echo $s | sed -e 's:^_*: $(wildcard include/ksym/:' \ - -e 's:__*:/:g' -e 's/$/.h) \\/' + printf ' $(wildcard include/ksym/%s) \\\n' "$s" done echo -- cgit v1.2.3 From 20fbb11fe4ea99e02d77824613f1438bea456683 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 6 Sep 2021 15:47:02 +1000 Subject: don't make the syscall checking produce errors from warnings Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- scripts/checksyscalls.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh index b7609958ee36..fd9777f63f14 100755 --- a/scripts/checksyscalls.sh +++ b/scripts/checksyscalls.sh @@ -266,4 +266,4 @@ syscall_list() { } (ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \ -$* -E -x c - > /dev/null +$* -Wno-error -E -x c - > /dev/null -- cgit v1.2.3 From d2af5aa6c036d3fd2c1ff3379ffe3e6805929952 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 7 Sep 2021 19:59:51 -0700 Subject: checkpatch: support wide strings Allow prefixing typical strings with L for wide strings and u for unicode strings. Link: https://lkml.kernel.org/r/20210801170733.1.I3f9784fd3c1007d08ec2e70b151d137687575495@changeid Signed-off-by: Joe Perches Signed-off-by: Simon Glass Cc: Dwaipayan Ray Cc: Lukas Bulwahn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 461d4221e4a4..a65753c05a69 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -501,7 +501,7 @@ our $Binary = qr{(?i)0b[01]+$Int_type?}; our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?}; our $Int = qr{[0-9]+$Int_type?}; our $Octal = qr{0[0-7]+$Int_type?}; -our $String = qr{"[X\t]*"}; +our $String = qr{(?:\b[Lu])?"[X\t]*"}; our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; @@ -6132,7 +6132,8 @@ sub process { } # concatenated string without spaces between elements - if ($line =~ /$String[A-Za-z0-9_]/ || $line =~ /[A-Za-z0-9_]$String/) { + if ($line =~ /$String[A-Z_]/ || + ($line =~ /([A-Za-z0-9_]+)$String/ && $1 !~ /^[Lu]$/)) { if (CHK("CONCATENATED_STRING", "Concatenated strings should use spaces between elements\n" . $herecurr) && $fix) { @@ -6145,7 +6146,7 @@ sub process { } # uncoalesced string fragments - if ($line =~ /$String\s*"/) { + if ($line =~ /$String\s*[Lu]?"/) { if (WARN("STRING_FRAGMENTS", "Consecutive strings are generally better as a single string\n" . $herecurr) && $fix) { -- cgit v1.2.3 From 046fc741e35e9dee7c7f7c4173ba3d79f938286f Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Tue, 7 Sep 2021 19:59:54 -0700 Subject: checkpatch: make email address check case insensitive Instead of checkpatch requiring the patch author to exactly match the signed-off-by tag, commit 48ca2d8ac8a1 ("checkpatch: add new warnings to author signoff checks.") safely relaxed this requirement. Although the local-part of an email address (local-part@domain), may be case sensitive, exploiting the case sensitivity of mailbox local-parts impedes interoperability and is discouraged. Mailbox domains follow normal DNS rules and are hence not case sensitive. (Refer to https://datatracker.ietf.org/doc/html/rfc5321#section-2.4.) Further relax the patch author and signed-off-by tag comparison by making the email address check case insensitive. Link: https://lkml.kernel.org/r/20210816112725.173206-1-zohar@linux.ibm.com Signed-off-by: Mimi Zohar Acked-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a65753c05a69..161ce7fe5d1e 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2909,10 +2909,10 @@ sub process { my ($email_name, $email_comment, $email_address, $comment1) = parse_email($ctx); my ($author_name, $author_comment, $author_address, $comment2) = parse_email($author); - if ($email_address eq $author_address && $email_name eq $author_name) { + if (lc $email_address eq lc $author_address && $email_name eq $author_name) { $author_sob = $ctx; $authorsignoff = 2; - } elsif ($email_address eq $author_address) { + } elsif (lc $email_address eq lc $author_address) { $author_sob = $ctx; $authorsignoff = 3; } elsif ($email_name eq $author_name) { -- cgit v1.2.3 From 4ce9f970457899defdf68e26e0502c7245002eb3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 7 Sep 2021 19:59:57 -0700 Subject: checkpatch: improve GIT_COMMIT_ID test The preferred git commit id reference has the form commit ("Title line") where SHA-1 is the commit hex hash with a minimum lenth of 12 and ("Title line") is the complete title line of the commit with a (" prefix and ") suffix. The current tests fail when the "Title line" has one or more embedded double quotes. Improve the test that finds the commit SHA-1 hex hash then ("Title line") by using $balanced_parens for a maximum of 3 consecutive lines. [akpm@linux-foundation.org: add missing &&] Link: https://lkml.kernel.org/r/976c6cdd680db4b55ae31b5fc2d1779da5c0dc66.camel@perches.com Signed-off-by: Joe Perches Cc: Dwaipayan Ray Cc: Lukas Bulwahn Cc: Denis Efremov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 82 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 31 deletions(-) (limited to 'scripts') diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 161ce7fe5d1e..c27d2312cfc3 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -1181,7 +1181,8 @@ sub git_commit_info { # git log --format='%H %s' -1 $line | # echo "commit $(cut -c 1-12,41-)" # done - } elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./) { + } elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./ || + $lines[0] =~ /^fatal: bad object $commit/) { $id = undef; } else { $id = substr($lines[0], 0, 12); @@ -2587,6 +2588,8 @@ sub process { my $reported_maintainer_file = 0; my $non_utf8_charset = 0; + my $last_git_commit_id_linenr = -1; + my $last_blank_line = 0; my $last_coalesced_string_linenr = -1; @@ -3170,10 +3173,20 @@ sub process { } # Check for git id commit length and improperly formed commit descriptions - if ($in_commit_log && !$commit_log_possible_stack_dump && +# A correctly formed commit description is: +# commit ("Complete commit subject") +# with the commit subject '("' prefix and '")' suffix +# This is a fairly compilicated block as it tests for what appears to be +# bare SHA-1 hash with minimum length of 5. It also avoids several types of +# possible SHA-1 matches. +# A commit match can span multiple lines so this block attempts to find a +# complete typical commit on a maximum of 3 lines + if ($perl_version_ok && + $in_commit_log && !$commit_log_possible_stack_dump && $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink|base-commit):/i && $line !~ /^This reverts commit [0-9a-f]{7,40}/ && - ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || + (($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || + ($line =~ /\bcommit\s*$/i && defined($rawlines[$linenr]) && $rawlines[$linenr] =~ /^\s*[0-9a-f]{5,}\b/i)) || ($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i && $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && $line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) { @@ -3183,49 +3196,56 @@ sub process { my $long = 0; my $case = 1; my $space = 1; - my $hasdesc = 0; - my $hasparens = 0; my $id = '0123456789ab'; my $orig_desc = "commit description"; my $description = ""; + my $herectx = $herecurr; + my $has_parens = 0; + my $has_quotes = 0; + + my $input = $line; + if ($line =~ /(?:\bcommit\s+[0-9a-f]{5,}|\bcommit\s*$)/i) { + for (my $n = 0; $n < 2; $n++) { + if ($input =~ /\bcommit\s+[0-9a-f]{5,}\s*($balanced_parens)/i) { + $orig_desc = $1; + $has_parens = 1; + # Always strip leading/trailing parens then double quotes if existing + $orig_desc = substr($orig_desc, 1, -1); + if ($orig_desc =~ /^".*"$/) { + $orig_desc = substr($orig_desc, 1, -1); + $has_quotes = 1; + } + last; + } + last if ($#lines < $linenr + $n); + $input .= " " . trim($rawlines[$linenr + $n]); + $herectx .= "$rawlines[$linenr + $n]\n"; + } + $herectx = $herecurr if (!$has_parens); + } - if ($line =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) { + if ($input =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) { $init_char = $1; $orig_commit = lc($2); - } elsif ($line =~ /\b([0-9a-f]{12,40})\b/i) { + $short = 0 if ($input =~ /\bcommit\s+[0-9a-f]{12,40}/i); + $long = 1 if ($input =~ /\bcommit\s+[0-9a-f]{41,}/i); + $space = 0 if ($input =~ /\bcommit [0-9a-f]/i); + $case = 0 if ($input =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/); + } elsif ($input =~ /\b([0-9a-f]{12,40})\b/i) { $orig_commit = lc($1); } - $short = 0 if ($line =~ /\bcommit\s+[0-9a-f]{12,40}/i); - $long = 1 if ($line =~ /\bcommit\s+[0-9a-f]{41,}/i); - $space = 0 if ($line =~ /\bcommit [0-9a-f]/i); - $case = 0 if ($line =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/); - if ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)"\)/i) { - $orig_desc = $1; - $hasparens = 1; - } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s*$/i && - defined $rawlines[$linenr] && - $rawlines[$linenr] =~ /^\s*\("([^"]+)"\)/) { - $orig_desc = $1; - $hasparens = 1; - } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("[^"]+$/i && - defined $rawlines[$linenr] && - $rawlines[$linenr] =~ /^\s*[^"]+"\)/) { - $line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)$/i; - $orig_desc = $1; - $rawlines[$linenr] =~ /^\s*([^"]+)"\)/; - $orig_desc .= " " . $1; - $hasparens = 1; - } - ($id, $description) = git_commit_info($orig_commit, $id, $orig_desc); if (defined($id) && - ($short || $long || $space || $case || ($orig_desc ne $description) || !$hasparens)) { + ($short || $long || $space || $case || ($orig_desc ne $description) || !$has_quotes) && + $last_git_commit_id_linenr != $linenr - 1) { ERROR("GIT_COMMIT_ID", - "Please use git commit description style 'commit <12+ chars of sha1> (\"\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herecurr); + "Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herectx); } + #don't report the next line if this line ends in commit and the sha1 hash is the next line + $last_git_commit_id_linenr = $linenr if ($line =~ /\bcommit\s*$/i); } # Check for added, moved or deleted files -- cgit v1.2.3 From b285437d1d929785a5bef3603da78d2cd5341893 Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Tue, 7 Sep 2021 20:00:59 -0700 Subject: scripts: check_extable: fix typo in user error message Fix typo ("and" should be "an") in an error message. Link: https://lkml.kernel.org/r/20210727002943.29774-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- scripts/check_extable.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/check_extable.sh b/scripts/check_extable.sh index 93af93c7b346..4b380564cf74 100755 --- a/scripts/check_extable.sh +++ b/scripts/check_extable.sh @@ -4,7 +4,7 @@ obj=$1 -file ${obj} | grep -q ELF || (echo "${obj} is not and ELF file." 1>&2 ; exit 0) +file ${obj} | grep -q ELF || (echo "${obj} is not an ELF file." 1>&2 ; exit 0) # Bail out early if there isn't an __ex_table section in this object file. objdump -hj __ex_table ${obj} 2> /dev/null > /dev/null -- cgit v1.2.3 From 54fed35fd3939398be292e4090b0b1c5ff2238b4 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang <jszhang@kernel.org> Date: Thu, 26 Aug 2021 22:10:29 +0800 Subject: riscv: Enable BUILDTIME_TABLE_SORT Enable BUILDTIME_TABLE_SORT to sort the exception table at build time rather than during boot. Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com> --- arch/riscv/Kconfig | 1 + scripts/sorttable.c | 1 + 2 files changed, 2 insertions(+) (limited to 'scripts') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index baf60fc350a4..c1abbc876e5b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -41,6 +41,7 @@ config RISCV select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU + select BUILDTIME_TABLE_SORT if MMU select CLONE_BACKWARDS select CLINT_TIMER if !MMU select COMMON_CLK diff --git a/scripts/sorttable.c b/scripts/sorttable.c index 0ef3abfc4a51..f355869c65cd 100644 --- a/scripts/sorttable.c +++ b/scripts/sorttable.c @@ -349,6 +349,7 @@ static int do_file(char const *const fname, void *addr) case EM_ARM: case EM_MICROBLAZE: case EM_MIPS: + case EM_RISCV: case EM_XTENSA: break; default: -- cgit v1.2.3 From 76ae847497bc5207c479de5e2ac487270008b19b Mon Sep 17 00:00:00 2001 From: Nick Desaulniers <ndesaulniers@google.com> Date: Fri, 10 Sep 2021 16:40:38 -0700 Subject: Documentation: raise minimum supported version of GCC to 5.1 commit fad7cd3310db ("nbd: add the check to prevent overflow in __nbd_ioctl()") raised an issue from the fallback helpers added in commit f0907827a8a9 ("compiler.h: enable builtin overflow checkers and add fallback code") Specifically, the helpers for checking whether the results of a multiplication overflowed (__unsigned_mul_overflow, __signed_add_overflow) use the division operator when !COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW. This is problematic for 64b operands on 32b hosts. Also, because the macro is type agnostic, it is very difficult to write a similarly type generic macro that dispatches to one of: * div64_s64 * div64_u64 * div_s64 * div_u64 Raising the minimum supported versions allows us to remove all of the fallback helpers for !COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW, instead dispatching the compiler builtins. arm64 has already raised the minimum supported GCC version to 5.1, do this for all targets now. See the link below for the previous discussion. Link: https://lore.kernel.org/all/20210909182525.372ee687@canb.auug.org.au/ Link: https://lore.kernel.org/lkml/CAK7LNASs6dvU6D3jL2GG3jW58fXfaj6VNOe55NJnTB8UPuk2pA@mail.gmail.com/ Link: https://github.com/ClangBuiltLinux/linux/issues/1438 Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Reported-by: Nathan Chancellor <nathan@kernel.org> Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Reviewed-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- Documentation/process/changes.rst | 2 +- scripts/min-tool-version.sh | 8 +------- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'scripts') diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index d3a8557b66a1..e35ab74a0f80 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils. ====================== =============== ======================================== Program Minimal version Command to check the version ====================== =============== ======================================== -GNU C 4.9 gcc --version +GNU C 5.1 gcc --version Clang/LLVM (optional) 10.0.1 clang --version GNU make 3.81 make --version binutils 2.23 ld -v diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh index 319f92104f56..4edc708baa63 100755 --- a/scripts/min-tool-version.sh +++ b/scripts/min-tool-version.sh @@ -17,13 +17,7 @@ binutils) echo 2.23.0 ;; gcc) - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 - # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk - if [ "$SRCARCH" = arm64 ]; then - echo 5.1.0 - else - echo 4.9.0 - fi + echo 5.1.0 ;; icc) # temporary -- cgit v1.2.3 From d62d5aed3354ae57d57fae1e59948913f360d4eb Mon Sep 17 00:00:00 2001 From: Ariel Marcovitch <arielmarcovitch@gmail.com> Date: Wed, 1 Sep 2021 17:52:12 +0300 Subject: checkkconfigsymbols.py: Forbid passing 'HEAD' to --commit As opposed to the --diff option, --commit can get ref names instead of commit hashes. When using the --commit option, the script resets the working directory to the commit before the given ref, by adding '~' to the end of the ref. However, the 'HEAD' ref is relative, and so when the working directory is reset to 'HEAD~', 'HEAD' points to what was 'HEAD~'. Then when the script resets to 'HEAD' it actually stays in the same commit. In this case, the script won't report any cases because there is no diff between the cases of the two refs. Prevent the user from using HEAD refs. A better solution might be to resolve the refs before doing the reset, but for now just disallow such refs. Signed-off-by: Ariel Marcovitch <arielmarcovitch@gmail.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> --- scripts/checkkconfigsymbols.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'scripts') diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index b9b0f15e5880..c57c990c3244 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -102,6 +102,9 @@ def parse_options(): "continue.") if args.commit: + if args.commit.startswith('HEAD'): + sys.exit("The --commit option can't use the HEAD ref") + args.find = False if args.ignore: -- cgit v1.2.3 From aa0f5ea12e477b2940a57fcda4908627e0beed0f Mon Sep 17 00:00:00 2001 From: Ariel Marcovitch <arielmarcovitch@gmail.com> Date: Wed, 1 Sep 2021 19:49:52 +0300 Subject: checkkconfigsymbols.py: Remove skipping of help lines in parse_kconfig_file When parsing Kconfig files to find symbol definitions and references, lines after a 'help' line are skipped until a new config definition starts. However, Kconfig statements can actually be after a help section, as long as these have shallower indentation. These are skipped by the parser. This means that symbols referenced in this kind of statements are ignored by this function and thus are not considered undefined references in case the symbol is not defined. Remove the 'skip' logic entirely, as it is not needed if we just use the STMT regex to find the end of help lines. However, this means that keywords that appear as part of the help message (i.e. with the same indentation as the help lines) it will be considered as a reference/definition. This can happen now as well, but only with REGEX_KCONFIG_DEF lines. Also, the keyword must have a SYMBOL after it, which probably means that someone referenced a config in the help so it seems like a bonus :) The real solution is to keep track of the indentation when a the first help line in encountered and then handle DEF and STMT lines only if the indentation is shallower. Signed-off-by: Ariel Marcovitch <arielmarcovitch@gmail.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> --- scripts/checkkconfigsymbols.py | 8 -------- 1 file changed, 8 deletions(-) (limited to 'scripts') diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index c57c990c3244..217d21abc86e 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) -REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$") REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") REGEX_QUOTES = re.compile("(\"(.*?)\")") @@ -435,7 +434,6 @@ def parse_kconfig_file(kfile): lines = [] defined = [] references = [] - skip = False if not os.path.exists(kfile): return defined, references @@ -451,12 +449,6 @@ def parse_kconfig_file(kfile): if REGEX_KCONFIG_DEF.match(line): symbol_def = REGEX_KCONFIG_DEF.findall(line) defined.append(symbol_def[0]) - skip = False - elif REGEX_KCONFIG_HELP.match(line): - skip = True - elif skip: - # ignore content of help messages - pass elif REGEX_KCONFIG_STMT.match(line): line = REGEX_QUOTES.sub("", line) symbols = get_symbols_in_line(line) -- cgit v1.2.3 From ec783c7cb2495c5a3b8ca10db8056d43c528f940 Mon Sep 17 00:00:00 2001 From: Kortan <kortanzh@gmail.com> Date: Wed, 8 Sep 2021 11:28:48 +0800 Subject: gen_compile_commands: fix missing 'sys' package We need to import the 'sys' package since the script has called sys.exit() method. Fixes: 6ad7cbc01527 ("Makefile: Add clang-tidy and static analyzer support to makefile") Signed-off-by: Kortan <kortanzh@gmail.com> Reviewed-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> --- scripts/clang-tools/gen_compile_commands.py | 1 + 1 file changed, 1 insertion(+) (limited to 'scripts') diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py index 0033eedce003..1d1bde1fd45e 100755 --- a/scripts/clang-tools/gen_compile_commands.py +++ b/scripts/clang-tools/gen_compile_commands.py @@ -13,6 +13,7 @@ import logging import os import re import subprocess +import sys _DEFAULT_OUTPUT = 'compile_commands.json' _DEFAULT_LOG_LEVEL = 'WARNING' -- cgit v1.2.3 From 7c80144626db460bc3969027810a540741865fb1 Mon Sep 17 00:00:00 2001 From: Ramji Jiyani <ramjiyani@google.com> Date: Thu, 16 Sep 2021 09:21:22 +0000 Subject: kbuild: Fix comment typo in scripts/Makefile.modpost Change comment "create one <module>.mod.c file pr. module" to "create one <module>.mod.c file per module" Signed-off-by: Ramji Jiyani <ramjiyani@google.com> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> --- scripts/Makefile.modpost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index eef56d629799..48585c4d04ad 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -13,7 +13,7 @@ # Stage 2 is handled by this file and does the following # 1) Find all modules listed in modules.order # 2) modpost is then used to -# 3) create one <module>.mod.c file pr. module +# 3) create one <module>.mod.c file per module # 4) create one Module.symvers file with CRC for all exported symbols # Step 3 is used to place certain information in the module's ELF -- cgit v1.2.3 From 0664684e1ebd7875e120d0cecd525bac4805f8ed Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <nathan@kernel.org> Date: Thu, 16 Sep 2021 11:40:17 -0700 Subject: kbuild: Add -Werror=ignored-optimization-argument to CLANG_FLAGS Similar to commit 589834b3a009 ("kbuild: Add -Werror=unknown-warning-option to CLANG_FLAGS"). Clang ignores certain GCC flags that it has not implemented, only emitting a warning: $ echo | clang -fsyntax-only -falign-jumps -x c - clang-14: warning: optimization flag '-falign-jumps' is not supported [-Wignored-optimization-argument] When one of these flags gets added to KBUILD_CFLAGS unconditionally, all subsequent cc-{disable-warning,option} calls fail because -Werror was added to these invocations to turn the above warning and the equivalent -W flag warning into errors. To catch the presence of these flags earlier, turn -Wignored-optimization-argument into an error so that the flags can either be implemented or ignored via cc-option and there are no more weird errors. Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Masahiro Yamada <masahiroy@kernel.org> --- scripts/Makefile.clang | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'scripts') diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 4cce8fd0779c..51fc23e2e9e5 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -29,7 +29,12 @@ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) else CLANG_FLAGS += -fintegrated-as endif +# By default, clang only warns when it encounters an unknown warning flag or +# certain optimization flags it knows it has not implemented. +# Make it behave more like gcc by erroring when these flags are encountered +# so they can be implemented or wrapped in cc-option. CLANG_FLAGS += -Werror=unknown-warning-option +CLANG_FLAGS += -Werror=ignored-optimization-argument KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) export CLANG_FLAGS -- cgit v1.2.3 From d09c38726c78effaf910a1e3f31e247b5b031d56 Mon Sep 17 00:00:00 2001 From: Miles Chen <miles.chen@mediatek.com> Date: Fri, 24 Sep 2021 15:43:38 -0700 Subject: scripts/sorttable: riscv: fix undeclared identifier 'EM_RISCV' error Fix the following build failure reported in [1] by adding a conditional definition of EM_RISCV in order to allow cross-compilation on machines which do not have EM_RISCV definition in their host. scripts/sorttable.c:352:7: error: use of undeclared identifier 'EM_RISCV' EM_RISCV was added to <elf.h> in glibc 2.24 so builds on systems with glibc headers < 2.24 should show this error. [mkubecek@suse.cz: changelog addition] Link: https://lore.kernel.org/lkml/e8965b25-f15b-c7b4-748c-d207dda9c8e8@i2se.com/ [1] Link: https://lkml.kernel.org/r/20210913030625.4525-1-miles.chen@mediatek.com Fixes: 54fed35fd393 ("riscv: Enable BUILDTIME_TABLE_SORT") Signed-off-by: Miles Chen <miles.chen@mediatek.com> Reported-by: Stefan Wahren <stefan.wahren@i2se.com> Tested-by: Stefan Wahren <stefan.wahren@i2se.com> Reviewed-by: Jisheng Zhang <jszhang@kernel.org> Cc: Michal Kubecek <mkubecek@suse.cz> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Markus Mayer <mmayer@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- scripts/sorttable.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'scripts') diff --git a/scripts/sorttable.c b/scripts/sorttable.c index f355869c65cd..6ee4fa882919 100644 --- a/scripts/sorttable.c +++ b/scripts/sorttable.c @@ -54,6 +54,10 @@ #define EM_ARCV2 195 #endif +#ifndef EM_RISCV +#define EM_RISCV 243 +#endif + static uint32_t (*r)(const uint32_t *); static uint16_t (*r2)(const uint16_t *); static uint64_t (*r8)(const uint64_t *); -- cgit v1.2.3 From 19532869feb9b0a97d17ddc14609d1e53a5b60db Mon Sep 17 00:00:00 2001 From: Nathan Chancellor <nathan@kernel.org> Date: Fri, 24 Sep 2021 15:44:00 -0700 Subject: kasan: always respect CONFIG_KASAN_STACK Currently, the asan-stack parameter is only passed along if CFLAGS_KASAN_SHADOW is not empty, which requires KASAN_SHADOW_OFFSET to be defined in Kconfig so that the value can be checked. In RISC-V's case, KASAN_SHADOW_OFFSET is not defined in Kconfig, which means that asan-stack does not get disabled with clang even when CONFIG_KASAN_STACK is disabled, resulting in large stack warnings with allmodconfig: drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c:117:12: error: stack frame size (14400) exceeds limit (2048) in function 'lb035q02_connect' [-Werror,-Wframe-larger-than] static int lb035q02_connect(struct omap_dss_device *dssdev) ^ 1 error generated. Ensure that the value of CONFIG_KASAN_STACK is always passed along to the compiler so that these warnings do not happen when CONFIG_KASAN_STACK is disabled. Link: https://github.com/ClangBuiltLinux/linux/issues/1453 References: 6baec880d7a5 ("kasan: turn off asan-stack for clang-8 and earlier") Link: https://lkml.kernel.org/r/20210922205525.570068-1-nathan@kernel.org Signed-off-by: Nathan Chancellor <nathan@kernel.org> Reviewed-by: Marco Elver <elver@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- scripts/Makefile.kasan | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 801c415bac59..b9e94c5e7097 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -33,10 +33,11 @@ else CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ $(call cc-param,asan-globals=1) \ $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ - $(call cc-param,asan-stack=$(stack_enable)) \ $(call cc-param,asan-instrument-allocas=1) endif +CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) + endif # CONFIG_KASAN_GENERIC ifdef CONFIG_KASAN_SW_TAGS -- cgit v1.2.3 From 554afc3b9797511e3245864e32aebeb6abbab1e3 Mon Sep 17 00:00:00 2001 From: Brendan Higgins <brendanhiggins@google.com> Date: Wed, 29 Sep 2021 14:27:09 -0700 Subject: gcc-plugins/structleak: add makefile var for disabling structleak KUnit and structleak don't play nice, so add a makefile variable for enabling structleak when it complains. Co-developed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Brendan Higgins <brendanhiggins@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org> --- scripts/Makefile.gcc-plugins | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'scripts') diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 952e46876329..4aad28480035 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \ += -fplugin-arg-structleak_plugin-byref gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \ += -fplugin-arg-structleak_plugin-byref-all +ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK + DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable +endif +export DISABLE_STRUCTLEAK_PLUGIN gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \ += -DSTRUCTLEAK_PLUGIN -- cgit v1.2.3 From 3ef6ca4f354c53abf263cbeb51e7272523c294d8 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt <palmerdabbelt@google.com> Date: Sat, 2 Oct 2021 16:57:13 -0700 Subject: checksyscalls: Unconditionally ignore fstat{,at}64 These can be replaced by statx(). Since rv32 has a 64-bit time_t we just never ended up with them in the first place. This is now an error due to -Werror. Suggested-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com> --- scripts/checksyscalls.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh index fd9777f63f14..9dbab13329fa 100755 --- a/scripts/checksyscalls.sh +++ b/scripts/checksyscalls.sh @@ -82,10 +82,8 @@ cat << EOF #define __IGNORE_truncate64 #define __IGNORE_stat64 #define __IGNORE_lstat64 -#define __IGNORE_fstat64 #define __IGNORE_fcntl64 #define __IGNORE_fadvise64_64 -#define __IGNORE_fstatat64 #define __IGNORE_fstatfs64 #define __IGNORE_statfs64 #define __IGNORE_llseek @@ -253,6 +251,10 @@ cat << EOF #define __IGNORE_getpmsg #define __IGNORE_putpmsg #define __IGNORE_vserver + +/* 64-bit ports never needed these, and new 32-bit ports can use statx */ +#define __IGNORE_fstat64 +#define __IGNORE_fstatat64 EOF } -- cgit v1.2.3 From be358af1191b1b2fedebd8f3421cafdc8edacc7d Mon Sep 17 00:00:00 2001 From: Steven Rostedt <rostedt@goodmis.org> Date: Thu, 14 Oct 2021 14:35:07 -0400 Subject: nds32/ftrace: Fix Error: invalid operands (*UND* and *UND* sections) for `^' I received a build failure for a new patch I'm working on the nds32 architecture, and when I went to test it, I couldn't get to my build error, because it failed to build with a bunch of: Error: invalid operands (*UND* and *UND* sections) for `^' issues with various files. Those files were temporary asm files that looked like: kernel/.tmp_mc_fork.s I decided to look deeper, and found that the "mc" portion of that name stood for "mcount", and was created by the recordmcount.pl script. One that I wrote over a decade ago. Once I knew the source of the problem, I was able to investigate it further. The way the recordmcount.pl script works (BTW, there's a C version that simply modifies the ELF object) is by doing an "objdump" on the object file. Looks for all the calls to "mcount", and creates an offset of those locations from some global variable it can use (usually a global function name, found with <.*>:). Creates a asm file that is a table of references to these locations, using the found variable/function. Compiles it and links it back into the original object file. This asm file is called ".tmp_mc_<object_base_name>.s". The problem here is that the objdump produced by the nds32 object file, contains things that look like: 0000159a <.L3^B1>: 159a: c6 00 beqz38 $r6, 159a <.L3^B1> 159a: R_NDS32_9_PCREL_RELA .text+0x159e 159c: 84 d2 movi55 $r6, #-14 159e: 80 06 mov55 $r0, $r6 15a0: ec 3c addi10.sp #0x3c Where ".L3^B1 is somehow selected as the "global" variable to index off of. Then the assembly file that holds the mcount locations looks like this: .section __mcount_loc,"a",@progbits .align 2 .long .L3^B1 + -5522 .long .L3^B1 + -5384 .long .L3^B1 + -5270 .long .L3^B1 + -5098 .long .L3^B1 + -4970 .long .L3^B1 + -4758 .long .L3^B1 + -4122 [...] And when it is compiled back to an object to link to the original object, the compile fails on the "^" symbol. Simple solution for now, is to have the perl script ignore using function symbols that have an "^" in the name. Link: https://lkml.kernel.org/r/20211014143507.4ad2c0f7@gandalf.local.home Cc: stable@vger.kernel.org Acked-by: Greentime Hu <green.hu@gmail.com> Fixes: fbf58a52ac088 ("nds32/ftrace: Add RECORD_MCOUNT support") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> --- scripts/recordmcount.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 8f6b13ae46bf..7d631aaa0ae1 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -189,7 +189,7 @@ if ($arch =~ /(x86(_64)?)|(i386)/) { $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; $section_regex = "Disassembly of section\\s+(\\S+):"; -$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; +$function_regex = "^([0-9a-fA-F]+)\\s+<([^^]*?)>:"; $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$"; $section_type = '@progbits'; $mcount_adjust = 0; -- cgit v1.2.3