Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts, or adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-03-28 17:24:10 -07:00
commit 5e47fbe5ce
613 changed files with 13104 additions and 4263 deletions

View file

@ -340,7 +340,8 @@ Lee Jones <lee@kernel.org> <joneslee@google.com>
Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
Lee Jones <lee@kernel.org> <lee@ubuntu.com>
Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@nxp.com>
Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@intel.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
@ -497,7 +498,8 @@ Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>

View file

@ -144,14 +144,8 @@ passing 0 into the hint address parameter of mmap. On CPUs with an address space
smaller than sv48, the CPU maximum supported address space will be the default.
Software can "opt-in" to receiving VAs from another VA space by providing
a hint address to mmap. A hint address passed to mmap will cause the largest
address space that fits entirely into the hint to be used, unless there is no
space left in the address space. If there is no space available in the requested
address space, an address in the next smallest available address space will be
returned.
For example, in order to obtain 48-bit VA space, a hint address greater than
:code:`1 << 47` must be provided. Note that this is 47 due to sv48 userspace
ending at :code:`1 << 47` and the addresses beyond this are reserved for the
kernel. Similarly, to obtain 57-bit VA space addresses, a hint address greater
than or equal to :code:`1 << 56` must be provided.
a hint address to mmap. When a hint address is passed to mmap, the returned
address will never use more bits than the hint address. For example, if a hint
address of `1 << 40` is passed to mmap, a valid returned address will never use
bits 41 through 63. If no mappable addresses are available in that range, mmap
will return `MAP_FAILED`.

View file

@ -45,7 +45,7 @@ mount options are:
Enable code/data prioritization in L2 cache allocations.
"mba_MBps":
Enable the MBA Software Controller(mba_sc) to specify MBA
bandwidth in MBps
bandwidth in MiBps
"debug":
Make debug files accessible. Available debug files are annotated with
"Available only with debug option".
@ -526,7 +526,7 @@ threads start using more cores in an rdtgroup, the actual bandwidth may
increase or vary although user specified bandwidth percentage is same.
In order to mitigate this and make the interface more user friendly,
resctrl added support for specifying the bandwidth in MBps as well. The
resctrl added support for specifying the bandwidth in MiBps as well. The
kernel underneath would use a software feedback mechanism or a "Software
Controller(mba_sc)" which reads the actual bandwidth using MBM counters
and adjust the memory bandwidth percentages to ensure::
@ -573,13 +573,13 @@ Memory b/w domain is L3 cache.
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
Memory bandwidth Allocation specified in MBps
Memory bandwidth Allocation specified in MiBps
---------------------------------------------
Memory bandwidth domain is L3 cache.
::
MB:<cache_id0>=bw_MBps0;<cache_id1>=bw_MBps1;...
MB:<cache_id0>=bw_MiBps0;<cache_id1>=bw_MiBps1;...
Slow Memory Bandwidth Allocation (SMBA)
---------------------------------------

View file

@ -270,7 +270,7 @@ examples:
port {
ov7251_ep: endpoint {
data-lanes = <0 1>;
data-lanes = <0>;
link-frequencies = /bits/ 64 <240000000 319200000>;
remote-endpoint = <&csiphy3_ep>;
};

View file

@ -14,9 +14,6 @@ description: The Nomadik I2C host controller began its life in the ST
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
allOf:
- $ref: /schemas/i2c/i2c-controller.yaml#
# Need a custom select here or 'arm,primecell' will match on lots of nodes
select:
properties:
@ -24,21 +21,23 @@ select:
contains:
enum:
- st,nomadik-i2c
- mobileye,eyeq5-i2c
required:
- compatible
properties:
compatible:
oneOf:
# The variant found in STn8815
- items:
- const: st,nomadik-i2c
- const: arm,primecell
# The variant found in DB8500
- items:
- const: stericsson,db8500-i2c
- const: st,nomadik-i2c
- const: arm,primecell
- items:
- const: mobileye,eyeq5-i2c
- const: arm,primecell
reg:
maxItems: 1
@ -55,7 +54,7 @@ properties:
- items:
- const: mclk
- const: apb_pclk
# Clock name in DB8500
# Clock name in DB8500 or EyeQ5
- items:
- const: i2cclk
- const: apb_pclk
@ -70,6 +69,16 @@ properties:
minimum: 1
maximum: 400000
mobileye,olb:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- items:
- description: Phandle to OLB system controller node.
- description: Platform-wide controller ID (integer starting from zero).
description:
The phandle pointing to OLB system controller node, with the I2C
controller index.
required:
- compatible
- reg
@ -79,6 +88,20 @@ required:
unevaluatedProperties: false
allOf:
- $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:
contains:
const: mobileye,eyeq5-i2c
then:
required:
- mobileye,olb
else:
properties:
mobileye,olb: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
@ -111,5 +134,19 @@ examples:
clocks = <&i2c0clk>, <&pclki2c0>;
clock-names = "mclk", "apb_pclk";
};
- |
#include <dt-bindings/interrupt-controller/mips-gic.h>
i2c@300000 {
compatible = "mobileye,eyeq5-i2c", "arm,primecell";
reg = <0x300000 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <400000>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&i2c_ser_clk>, <&i2c_clk>;
clock-names = "i2cclk", "apb_pclk";
mobileye,olb = <&olb 0>;
};
...

View file

@ -0,0 +1,75 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/partitions/linux,ubi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Unsorted Block Images
description: |
UBI ("Unsorted Block Images") is a volume management system for raw
flash devices which manages multiple logical volumes on a single
physical flash device and spreads the I/O load (i.e wear-leveling)
across the whole flash chip.
maintainers:
- Daniel Golle <daniel@makrotopia.org>
allOf:
- $ref: partition.yaml#
properties:
compatible:
const: linux,ubi
volumes:
type: object
description: UBI Volumes
patternProperties:
"^ubi-volume-.*$":
$ref: /schemas/mtd/partitions/ubi-volume.yaml#
unevaluatedProperties: false
required:
- compatible
unevaluatedProperties: false
examples:
- |
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
reg = <0x0 0x100000>;
label = "bootloader";
read-only;
};
partition@100000 {
reg = <0x100000 0x1ff00000>;
label = "ubi";
compatible = "linux,ubi";
volumes {
ubi-volume-caldata {
volid = <2>;
volname = "rf";
nvmem-layout {
compatible = "fixed-layout";
#address-cells = <1>;
#size-cells = <1>;
eeprom@0 {
reg = <0x0 0x1000>;
};
};
};
};
};
};

View file

@ -0,0 +1,40 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/partitions/ubi-volume.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: UBI volume
description: |
This binding describes a single UBI volume. Volumes can be matches either
by their ID or their name, or both.
maintainers:
- Daniel Golle <daniel@makrotopia.org>
properties:
volid:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Match UBI volume ID
volname:
$ref: /schemas/types.yaml#/definitions/string
description:
Match UBI volume ID
nvmem-layout:
$ref: /schemas/nvmem/layouts/nvmem-layout.yaml#
description:
This container may reference an NVMEM layout parser.
anyOf:
- required:
- volid
- required:
- volname
# This is a generic file other binding inherit from and extend
additionalProperties: true

View file

@ -110,7 +110,11 @@ properties:
const: 1
compatible:
const: riscv,cpu-intc
oneOf:
- items:
- const: andestech,cpu-intc
- const: riscv,cpu-intc
- const: riscv,cpu-intc
interrupt-controller: true

View file

@ -477,5 +477,12 @@ properties:
latency, as ratified in commit 56ed795 ("Update
riscv-crypto-spec-vector.adoc") of riscv-crypto.
- const: xandespmu
description:
The Andes Technology performance monitor extension for counter overflow
and privilege mode filtering. For more details, see Counter Related
Registers in the AX45MP datasheet.
https://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
additionalProperties: true
...

View file

@ -1,31 +0,0 @@
Abracon ABX80X I2C ultra low power RTC/Alarm chip
The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
is the superset of ab180x.
Required properties:
- "compatible": should one of:
"abracon,abx80x"
"abracon,ab0801"
"abracon,ab0803"
"abracon,ab0804"
"abracon,ab0805"
"abracon,ab1801"
"abracon,ab1803"
"abracon,ab1804"
"abracon,ab1805"
"microcrystal,rv1805"
Using "abracon,abx80x" will enable chip autodetection.
- "reg": I2C bus address of the device
Optional properties:
The abx804 and abx805 have a trickle charger that is able to charge the
connected battery or supercap. Both the following properties have to be defined
and valid to enable charging:
- "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
- "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
resistor, the other values are in kOhm.

View file

@ -0,0 +1,98 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rtc/abracon,abx80x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Abracon ABX80X I2C ultra low power RTC/Alarm chip
maintainers:
- linux-rtc@vger.kernel.org
properties:
compatible:
description:
The wildcard 'abracon,abx80x' may be used to support a mix
of different abracon rtc`s. In this case the driver
must perform auto-detection from ID register.
enum:
- abracon,abx80x
- abracon,ab0801
- abracon,ab0803
- abracon,ab0804
- abracon,ab0805
- abracon,ab1801
- abracon,ab1803
- abracon,ab1804
- abracon,ab1805
- microcrystal,rv1805
reg:
maxItems: 1
interrupts:
maxItems: 1
abracon,tc-diode:
description:
Trickle-charge diode type.
Required to enable charging backup battery.
Supported are 'standard' diodes with a 0.6V drop
and 'schottky' diodes with a 0.3V drop.
$ref: /schemas/types.yaml#/definitions/string
enum:
- standard
- schottky
abracon,tc-resistor:
description:
Trickle-charge resistor value in kOhm.
Required to enable charging backup battery.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 3, 6, 11]
dependentRequired:
abracon,tc-diode: ["abracon,tc-resistor"]
abracon,tc-resistor: ["abracon,tc-diode"]
required:
- compatible
- reg
allOf:
- $ref: rtc.yaml#
- if:
properties:
compatible:
not:
contains:
enum:
- abracon,abx80x
- abracon,ab0804
- abracon,ab1804
- abracon,ab0805
- abracon,ab1805
then:
properties:
abracon,tc-diode: false
abracon,tc-resistor: false
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
rtc@69 {
compatible = "abracon,abx80x";
reg = <0x69>;
abracon,tc-diode = "schottky";
abracon,tc-resistor = <3>;
interrupts = <44 IRQ_TYPE_EDGE_FALLING>;
};
};

View file

@ -19,7 +19,9 @@ properties:
- items:
- const: atmel,at91sam9260-rtt
- items:
- const: microchip,sam9x60-rtt
- enum:
- microchip,sam9x60-rtt
- microchip,sam9x7-rtt
- const: atmel,at91sam9260-rtt
- items:
- const: microchip,sama7g5-rtt

View file

@ -0,0 +1,39 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/rtc/mediatek,mt2712-rtc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek MT2712 on-SoC RTC
allOf:
- $ref: rtc.yaml#
maintainers:
- Ran Bi <ran.bi@mediatek.com>
properties:
compatible:
const: mediatek,mt2712-rtc
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- reg
- interrupts
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
rtc@10011000 {
compatible = "mediatek,mt2712-rtc";
reg = <0x10011000 0x1000>;
interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
};

View file

@ -0,0 +1,52 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/rtc/mediatek,mt7622-rtc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek MT7622 on-SoC RTC
allOf:
- $ref: rtc.yaml#
maintainers:
- Sean Wang <sean.wang@mediatek.com>
properties:
compatible:
items:
- const: mediatek,mt7622-rtc
- const: mediatek,soc-rtc
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: rtc
required:
- reg
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/mt7622-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
rtc@10212800 {
compatible = "mediatek,mt7622-rtc", "mediatek,soc-rtc";
reg = <0x10212800 0x200>;
interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_RTC>;
clock-names = "rtc";
};

View file

@ -1,14 +0,0 @@
Device-Tree bindings for MediaTek SoC based RTC
Required properties:
- compatible : Should be "mediatek,mt2712-rtc" : for MT2712 SoC
- reg : Specifies base physical address and size of the registers;
- interrupts : Should contain the interrupt for RTC alarm;
Example:
rtc: rtc@10011000 {
compatible = "mediatek,mt2712-rtc";
reg = <0 0x10011000 0 0x1000>;
interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
};

View file

@ -1,21 +0,0 @@
Device-Tree bindings for MediaTek SoC based RTC
Required properties:
- compatible : Should be
"mediatek,mt7622-rtc", "mediatek,soc-rtc" : for MT7622 SoC
- reg : Specifies base physical address and size of the registers;
- interrupts : Should contain the interrupt for RTC alarm;
- clocks : Specifies list of clock specifiers, corresponding to
entries in clock-names property;
- clock-names : Should contain "rtc" entries
Example:
rtc: rtc@10212800 {
compatible = "mediatek,mt7622-rtc",
"mediatek,soc-rtc";
reg = <0 0x10212800 0 0x200>;
interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_RTC>;
clock-names = "rtc";
};

View file

@ -18,7 +18,13 @@ allOf:
properties:
compatible:
const: xlnx,zynqmp-rtc
oneOf:
- const: xlnx,zynqmp-rtc
- items:
- enum:
- xlnx,versal-rtc
- xlnx,versal-net-rtc
- const: xlnx,zynqmp-rtc
reg:
maxItems: 1
@ -48,6 +54,9 @@ properties:
default: 0x198233
deprecated: true
power-domains:
maxItems: 1
required:
- compatible
- reg

View file

@ -185,11 +185,12 @@ properties:
gpio-ranges:
items:
- description: A phandle to the CODEC pinctrl node
minimum: 0
- const: 0
- const: 0
- const: 3
- items:
- description: A phandle to the CODEC pinctrl node
minimum: 0
- const: 0
- const: 0
- const: 3
patternProperties:
"-state$":

View file

@ -32,12 +32,23 @@ properties:
description: |
Bit width of the timer, necessary if not 16.
"#pwm-cells":
const: 3
required:
- compatible
- reg
- interrupts
- clocks
allOf:
- if:
not:
required:
- "#pwm-cells"
then:
required:
- interrupts
additionalProperties: false
examples:
@ -50,3 +61,12 @@ examples:
clocks = <&cpu_clk 3>;
timer-width = <32>;
};
- |
pwm: pwm@f8002000 {
compatible = "cdns,ttc";
reg = <0xf8002000 0x1000>;
clocks = <&cpu_clk 3>;
timer-width = <32>;
#pwm-cells = <3>;
};

View file

@ -18,7 +18,9 @@ description: |
properties:
compatible:
const: nxp,sysctr-timer
enum:
- nxp,imx95-sysctr-timer
- nxp,sysctr-timer
reg:
maxItems: 1

View file

@ -0,0 +1,38 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/timer/ralink,cevt-systick.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: System tick counter present in Ralink family SoCs
maintainers:
- Sergio Paracuellos <sergio.paracuellos@gmail.com>
properties:
compatible:
const: ralink,cevt-systick
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
systick@d00 {
compatible = "ralink,cevt-systick";
reg = <0xd00 0x10>;
interrupt-parent = <&cpuintc>;
interrupts = <7>;
};
...

View file

@ -23,7 +23,7 @@ properties:
- enum:
- renesas,r7s72100-ostm # RZ/A1H
- renesas,r7s9210-ostm # RZ/A2M
- renesas,r9a07g043-ostm # RZ/G2UL
- renesas,r9a07g043-ostm # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ostm # RZ/G2{L,LC}
- renesas,r9a07g054-ostm # RZ/V2L
- const: renesas,ostm # Generic

View file

@ -46,7 +46,19 @@ properties:
interrupts:
minItems: 2
maxItems: 3
items:
- description: Underflow interrupt, channel 0
- description: Underflow interrupt, channel 1
- description: Underflow interrupt, channel 2
- description: Input capture interrupt, channel 2
interrupt-names:
minItems: 2
items:
- const: tuni0
- const: tuni1
- const: tuni2
- const: ticpi2
clocks:
maxItems: 1
@ -100,7 +112,9 @@ examples:
reg = <0xffd80000 0x30>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
clock-names = "fck";
power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;

View file

@ -26,6 +26,7 @@ properties:
- items:
- enum:
- axis,artpec8-mct
- google,gs101-mct
- samsung,exynos3250-mct
- samsung,exynos5250-mct
- samsung,exynos5260-mct
@ -127,6 +128,7 @@ allOf:
contains:
enum:
- axis,artpec8-mct
- google,gs101-mct
- samsung,exynos5260-mct
- samsung,exynos5420-mct
- samsung,exynos5433-mct

View file

@ -10,6 +10,22 @@
# Rely on implicit context synchronization as a result of exception return
# when returning from IPI handler, and when returning to user-space.
#
# * riscv
#
# riscv uses xRET as return from interrupt and to return to user-space.
#
# Given that xRET is not core serializing, we rely on FENCE.I for providing
# core serialization:
#
# - by calling sync_core_before_usermode() on return from interrupt (cf.
# ipi_sync_core()),
#
# - via switch_mm() and sync_core_before_usermode() (respectively, for
# uthread->uthread and kthread->uthread transitions) before returning
# to user-space.
#
# The serialization in switch_mm() is activated by prepare_sync_core_cmd().
#
# * x86
#
# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
@ -43,7 +59,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
| riscv: | TODO |
| riscv: | ok |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |

View file

@ -3,3 +3,13 @@
==========
Page Cache
==========
The page cache is the primary way that the user and the rest of the kernel
interact with filesystems. It can be bypassed (e.g. with O_DIRECT),
but normal reads, writes and mmaps go through the page cache.
Folios
======
The folio is the unit of memory management within the page cache.
Operations

View file

@ -7,6 +7,7 @@ Scheduler
completion
membarrier
sched-arch
sched-bwc
sched-deadline

View file

@ -0,0 +1,39 @@
.. SPDX-License-Identifier: GPL-2.0
========================
membarrier() System Call
========================
MEMBARRIER_CMD_{PRIVATE,GLOBAL}_EXPEDITED - Architecture requirements
=====================================================================
Memory barriers before updating rq->curr
----------------------------------------
The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED
require each architecture to have a full memory barrier after coming from
user-space, before updating rq->curr. This barrier is implied by the sequence
rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
barrier in the proximity of the membarrier system call exit, cf.
membarrier_{private,global}_expedited().
Memory barriers after updating rq->curr
---------------------------------------
The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED
require each architecture to have a full memory barrier after updating rq->curr,
before returning to user-space. The schemes providing this barrier on the various
architectures are as follows.
- alpha, arc, arm, hexagon, mips rely on the full barrier implied by
spin_unlock() in finish_lock_switch().
- arm64 relies on the full barrier implied by switch_to().
- powerpc, riscv, s390, sparc, x86 rely on the full barrier implied by
switch_mm(), if mm is not NULL; they rely on the full barrier implied
by mmdrop(), otherwise. On powerpc and riscv, switch_mm() relies on
membarrier_arch_switch_mm().
The barrier matches a full barrier in the proximity of the membarrier system call
entry, cf. membarrier_{private,global}_expedited().

View file

@ -100,6 +100,9 @@ which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
for desktop workloads. SCHED_BATCH is handled by the CFS scheduler module too.
In case CONFIG_HZ results in base_slice_ns < TICK_NSEC, the value of
base_slice_ns will have little to no impact on the workloads.
Due to its design, the CFS scheduler is not prone to any of the "attacks" that
exist today against the heuristics of the stock scheduler: fiftyp.c, thud.c,
chew.c, ring-test.c, massive_intr.c all work fine and do not impact

View file

@ -6,7 +6,7 @@ Supported board/chip:
* National Semiconductor LM70 LLP evaluation board
Datasheet: http://www.national.com/pf/LM/LM70.html
Datasheet: https://www.ti.com/lit/gpn/lm70
Author:
Kaiwan N Billimoria <kaiwan@designergraphix.com>
@ -28,7 +28,7 @@ Hardware Interfacing
The schematic for this particular board (the LM70EVAL-LLP) is
available (on page 4) here:
http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
https://download.datasheets.com/pdfs/documentation/nat/kit&board/lm70llpevalmanual.pdf
The hardware interfacing on the LM70 LLP eval board is as follows:

View file

@ -61,7 +61,7 @@ the spidev driver failing to probe.
Sysfs also supports userspace driven binding/unbinding of drivers to
devices that do not bind automatically using one of the tables above.
To make the spidev driver bind to such a device, use the following:
To make the spidev driver bind to such a device, use the following::
echo spidev > /sys/bus/spi/devices/spiB.C/driver_override
echo spiB.C > /sys/bus/spi/drivers/spidev/bind

View file

@ -2377,8 +2377,8 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
F: Documentation/devicetree/bindings/rtc/mediatek,mt2712-rtc.yaml
F: Documentation/devicetree/bindings/rtc/mediatek,mt7622-rtc.yaml
F: drivers/rtc/rtc-mt2712.c
F: drivers/rtc/rtc-mt6397.c
F: drivers/rtc/rtc-mt7622.c
@ -2617,6 +2617,7 @@ F: drivers/pci/controller/dwc/pcie-qcom.c
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
F: drivers/rtc/rtc-pm8xxx.c
F: drivers/spi/spi-geni-qcom.c
F: drivers/spi/spi-qcom-qspi.c
F: drivers/spi/spi-qup.c
@ -3941,8 +3942,7 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
R: Florent Revest <revest@chromium.org>
R: Brendan Jackman <jackmanb@chromium.org>
R: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
@ -3967,7 +3967,7 @@ F: kernel/bpf/bpf_lru*
F: kernel/bpf/cgroup.c
BPF [TOOLING] (bpftool)
M: Quentin Monnet <quentin@isovalent.com>
M: Quentin Monnet <qmo@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/disasm.*
@ -6172,7 +6172,6 @@ F: include/uapi/linux/dm-*.h
DEVICE-MAPPER VDO TARGET
M: Matthew Sakai <msakai@redhat.com>
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
F: Documentation/admin-guide/device-mapper/vdo*.rst
@ -7940,6 +7939,7 @@ M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org>
R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
R: Sandeep Dhavale <dhavale@google.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
@ -13133,6 +13133,7 @@ F: drivers/net/ethernet/marvell/mvpp2/
MARVELL MWIFIEX WIRELESS DRIVER
M: Brian Norris <briannorris@chromium.org>
R: Francesco Dolcini <francesco@dolcini.it>
L: linux-wireless@vger.kernel.org
S: Odd Fixes
F: drivers/net/wireless/marvell/mwifiex/
@ -14133,7 +14134,9 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@kernel.org>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/powerpc/include/asm/membarrier.h
F: Documentation/scheduler/membarrier.rst
F: arch/*/include/asm/membarrier.h
F: arch/*/include/asm/sync_core.h
F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c
@ -18642,18 +18645,21 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw88/
REALTEK WIRELESS DRIVER (rtw89)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtw89/
REDPINE WIRELESS DRIVER
@ -18724,13 +18730,24 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS
RENESAS ETHERNET AVB DRIVER
R: Sergey Shtylyov <s.shtylyov@omp.ru>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,*.yaml
F: drivers/net/ethernet/renesas/
F: include/linux/sh_eth.h
F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml
F: drivers/net/ethernet/renesas/Kconfig
F: drivers/net/ethernet/renesas/Makefile
F: drivers/net/ethernet/renesas/ravb*
RENESAS ETHERNET SWITCH DRIVER
R: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,*ether-switch.yaml
F: drivers/net/ethernet/renesas/Kconfig
F: drivers/net/ethernet/renesas/Makefile
F: drivers/net/ethernet/renesas/rcar_gen4*
F: drivers/net/ethernet/renesas/rswitch*
RENESAS IDT821034 ASoC CODEC
M: Herve Codina <herve.codina@bootlin.com>
@ -18840,6 +18857,16 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
F: drivers/i2c/busses/i2c-rzv2m.c
RENESAS SUPERH ETHERNET DRIVER
R: Sergey Shtylyov <s.shtylyov@omp.ru>
L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,ether.yaml
F: drivers/net/ethernet/renesas/Kconfig
F: drivers/net/ethernet/renesas/Makefile
F: drivers/net/ethernet/renesas/sh_eth*
F: include/linux/sh_eth.h
RENESAS USB PHY DRIVER
M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-renesas-soc@vger.kernel.org
@ -19176,12 +19203,14 @@ M: Hin-Tak Leung <hintak.leung@gmail.com>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
T: git https://github.com/pkshih/rtw.git
F: drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
@ -22573,6 +22602,7 @@ F: include/uapi/misc/uacce/
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
R: Zhihao Cheng <chengzhihao1@huawei.com>
L: linux-mtd@lists.infradead.org
S: Supported
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
@ -22718,6 +22748,7 @@ F: drivers/ufs/host/ufs-renesas.c
UNSORTED BLOCK IMAGES (UBI)
M: Richard Weinberger <richard@nod.at>
R: Zhihao Cheng <chengzhihao1@huawei.com>
L: linux-mtd@lists.infradead.org
S: Supported
W: http://www.linux-mtd.infradead.org/

View file

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 8
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -799,7 +799,7 @@ config CFI_CLANG
depends on ARCH_SUPPORTS_CFI_CLANG
depends on $(cc-option,-fsanitize=kcfi)
help
This option enables Clangs forward-edge Control Flow Integrity
This option enables Clang's forward-edge Control Flow Integrity
(CFI) checking, where the compiler injects a runtime check to each
indirect function call to ensure the target is a valid function with
the correct static type. This restricts possible call targets and

View file

@ -505,8 +505,8 @@ source "arch/arm/mm/Kconfig"
config IWMMXT
bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
default y if PXA27x || PXA3xx || ARCH_MMP
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.

View file

@ -90,9 +90,6 @@ config BACKTRACE_VERBOSE
In most cases, say N here, unless you are intending to debug the
kernel and have access to the kernel binary image.
config FRAME_POINTER
bool
config DEBUG_USER
bool "Verbose user fault messages"
help

View file

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
#include <asm/system_info.h>
#include <uapi/asm/mman.h>
static inline bool arch_memory_deny_write_exec_supported(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6;
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif /* __ASM_MMAN_H__ */

View file

@ -10,6 +10,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/types.h>
struct pt_regs {
@ -35,8 +36,8 @@ struct svc_pt_regs {
#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
(((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
(FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
#else
#define isa_mode(regs) 1 /* Thumb */
#endif

View file

@ -76,8 +76,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \

View file

@ -18,18 +18,6 @@
#include <asm/assembler.h>
#include "iwmmxt.h"
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
#define XSC(code...)
#elif defined(CONFIG_CPU_MOHAWK) || \
defined(CONFIG_CPU_XSC3) || \
defined(CONFIG_CPU_XSCALE)
#define PJ4(code...)
#define XSC(code...) code
#else
#error "Unsupported iWMMXt architecture"
#endif
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
mrc p15, 0, r2, c15, c1, 0
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
tst r2, #0x3
bne 4f @ if so no business here
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
orr r2, r2, #0x3
mcr p15, 0, r2, c15, c1, 0
ldr r3, =concan_owner
ldr r2, [r0, #S_PC] @ current task pc value
@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
bne 1f @ no: quit
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r4, c15, c1, 0
orr r4, r4, #0x3
mcr p15, 0, r4, c15, c1, 0
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
bl concan_save
@ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
bic r4, r4, #0x3
mcr p15, 0, r4, c15, c1, 0
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
XSC(mrc p15, 0, r1, c15, c1, 0)
PJ4(mrc p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c15, c1, 0
@ CP0 and CP1 accessible?
XSC(tst r1, #0x3)
PJ4(tst r1, #0xf)
tst r1, #0x3
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
retne lr @ no: leave Concan disabled
1: @ flip Concan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
eor r1, r1, #0x3
mcr p15, 0, r1, c15, c1, 0
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return

View file

@ -1,135 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/pj4-cp0.c
*
* PJ4 iWMMXt coprocessor context switching and handling
*
* Copyright (c) 2010 Marvell International Inc.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
#include <asm/cputype.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_EXIT:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
static u32 __init pj4_cp_access_read(void)
{
u32 value;
__asm__ __volatile__ (
"mrc p15, 0, %0, c1, c0, 2\n\t"
: "=r" (value));
return value;
}
static void __init pj4_cp_access_write(u32 value)
{
u32 temp;
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
#ifdef CONFIG_THUMB2_KERNEL
"isb\n\t"
#else
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
#endif
: "=r" (temp) : "r" (value));
}
static int __init pj4_get_iwmmxt_version(void)
{
u32 cp_access, wcid;
cp_access = pj4_cp_access_read();
pj4_cp_access_write(cp_access | 0xf);
/* check if coprocessor 0 and 1 are available */
if ((pj4_cp_access_read() & 0xf) != 0xf) {
pj4_cp_access_write(cp_access);
return -ENODEV;
}
/* read iWMMXt coprocessor id register p1, c0 */
__asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
pj4_cp_access_write(cp_access);
/* iWMMXt v1 */
if ((wcid & 0xffffff00) == 0x56051000)
return 1;
/* iWMMXt v2 */
if ((wcid & 0xffffff00) == 0x56052000)
return 2;
return -EINVAL;
}
/*
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
* switch code handle iWMMXt context switching.
*/
static int __init pj4_cp0_init(void)
{
u32 __maybe_unused cp_access;
int vers;
if (!cpu_is_pj4())
return 0;
vers = pj4_get_iwmmxt_version();
if (vers < 0)
return 0;
#ifndef CONFIG_IWMMXT
pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
#else
cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access);
pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
register_iwmmxt_undef_handler();
#endif
return 0;
}
late_initcall(pj4_cp0_init);

View file

@ -220,7 +220,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
unsigned int fp, mode;
int ok = 1;
printk("%sBacktrace: ", loglvl);
printk("%sCall trace: ", loglvl);
if (!tsk)
tsk = current;

View file

@ -524,6 +524,8 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
{
struct stackframe frame;
printk("%sCall trace: ", loglvl);
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)

View file

@ -25,6 +25,13 @@
#include "fault.h"
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long addr = (unsigned long)unsafe_src;
return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
}
#ifdef CONFIG_MMU
/*
@ -588,6 +595,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
pr_alert("8<--- cut here ---\n");
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);

View file

@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
return;
folio = page_folio(pfn_to_page(pfn));
if (folio_test_reserved(folio))
return;
if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio);
else

View file

@ -418,7 +418,7 @@ static void set_section_perms(struct section_perm *perms, int n, bool set,
}
/**
/*
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.

View file

@ -120,6 +120,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select CPUMASK_OFFSTACK if NR_CPUS > 256
select CRC32
select DCACHE_WORD_ACCESS
select DYNAMIC_FTRACE if FUNCTION_TRACER
@ -1425,7 +1426,7 @@ config SCHED_SMT
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
default "256"
default "512"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"

View file

@ -943,7 +943,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
emit(A64_REV32(is64, dst, dst), ctx);
emit(A64_REV32(0, dst, dst), ctx);
/* upper 32 bits already cleared */
break;
case 64:
@ -1256,7 +1256,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
} else {
emit_a64_mov_i(1, tmp, off, ctx);
if (sign_extend)
emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
emit(A64_LDRSW(dst, src, tmp), ctx);
else
emit(A64_LDR32(dst, src, tmp), ctx);
}

View file

@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
.hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}

View file

@ -15,6 +15,7 @@ config LOONGARCH
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
@ -104,6 +105,7 @@ config LOONGARCH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
@ -133,20 +135,24 @@ config LOONGARCH
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC
select HAVE_RETHOOK
select HAVE_RSEQ
select HAVE_RUST
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
@ -624,6 +630,8 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
source "kernel/livepatch/Kconfig"
endmenu
config ARCH_SELECT_MEMORY_MODEL

View file

@ -26,4 +26,15 @@ config UNWINDER_PROLOGUE
Some of the addresses it reports may be incorrect (but better than the
Guess unwinder).
config UNWINDER_ORC
bool "ORC unwinder"
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
unwinding kernel stack traces. It uses a custom data format which is
a simplified version of the DWARF Call Frame Information standard.
Enabling this option will increase the kernel's runtime memory usage
by roughly 2-4MB, depending on your kernel config.
endchoice

View file

@ -26,6 +26,18 @@ endif
32bit-emul = elf32loongarch
64bit-emul = elf64loongarch
ifdef CONFIG_UNWINDER_ORC
orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h
orc_hash_sh := $(srctree)/scripts/orc_hash.sh
targets += $(orc_hash_h)
quiet_cmd_orc_hash = GEN $@
cmd_orc_hash = mkdir -p $(dir $@); \
$(CONFIG_SHELL) $(orc_hash_sh) < $< > $@
$(orc_hash_h): $(srctree)/arch/loongarch/include/asm/orc_types.h $(orc_hash_sh) FORCE
$(call if_changed,orc_hash)
archprepare: $(orc_hash_h)
endif
ifdef CONFIG_DYNAMIC_FTRACE
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
@ -72,8 +84,6 @@ KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdirect-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
@ -82,6 +92,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
KBUILD_AFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
ifdef CONFIG_OBJTOOL
KBUILD_CFLAGS += -fno-jump-tables
endif
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic

View file

@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32C(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {

View file

@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += orc_hash.h
generic-y += dma-contiguous.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += user.h

View file

@ -44,6 +44,7 @@
do { \
instrumentation_begin(); \
__BUG_FLAGS(BUGFLAG_WARNING|(flags)); \
annotate_reachable(); \
instrumentation_end(); \
} while (0)

View file

@ -37,8 +37,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
@ -47,7 +45,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)

View file

@ -6,6 +6,8 @@
#include <asm/ptrace.h>
#include <linux/kprobes.h>
extern void *exception_table[];
void show_registers(struct pt_regs *regs);
asmlinkage void cache_parity_error(void);

View file

@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
#define __io_aw() mmiowb()
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE

View file

@ -6,6 +6,7 @@
#define _ASM_MODULE_H
#include <asm/inst.h>
#include <asm/orc_types.h>
#include <asm-generic/module.h>
#define RELA_STACK_DEPTH 16
@ -21,6 +22,12 @@ struct mod_arch_specific {
struct mod_section plt;
struct mod_section plt_idx;
#ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
#endif
/* For CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
};

View file

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_HEADER_H
#define _ORC_HEADER_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/orc_hash.h>
/*
* The header is currently a 20-byte hash of the ORC entry definition; see
* scripts/orc_hash.sh.
*/
#define ORC_HEADER \
__used __section(".orc_header") __aligned(4) \
static const u8 orc_header[] = { ORC_HASH }
#endif /* _ORC_HEADER_H */

View file

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_LOOKUP_H
#define _ORC_LOOKUP_H
/*
* This is a lookup table for speeding up access to the .orc_unwind table.
* Given an input address offset, the corresponding lookup table entry
* specifies a subset of the .orc_unwind table to search.
*
* Each block represents the end of the previous range and the start of the
* next range. An extra block is added to give the last range an end.
*
* The block size should be a power of 2 to avoid a costly 'div' instruction.
*
* A block size of 256 was chosen because it roughly doubles unwinder
* performance while only adding ~5% to the ORC data footprint.
*/
#define LOOKUP_BLOCK_ORDER 8
#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER)
#ifndef LINKER_SCRIPT
extern unsigned int orc_lookup[];
extern unsigned int orc_lookup_end[];
#define LOOKUP_START_IP (unsigned long)_stext
#define LOOKUP_STOP_IP (unsigned long)_etext
#endif /* LINKER_SCRIPT */
#endif /* _ORC_LOOKUP_H */

View file

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ORC_TYPES_H
#define _ORC_TYPES_H
#include <linux/types.h>
/*
* The ORC_REG_* registers are base registers which are used to find other
* registers on the stack.
*
* ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
* address of the previous frame: the caller's SP before it called the current
* function.
*
* ORC_REG_UNDEFINED means the corresponding register's value didn't change in
* the current frame.
*
* The most commonly used base registers are SP and FP -- which the previous SP
* is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is
* usually based on.
*
* The rest of the base registers are needed for special cases like entry code
* and GCC realigned stacks.
*/
#define ORC_REG_UNDEFINED 0
#define ORC_REG_PREV_SP 1
#define ORC_REG_SP 2
#define ORC_REG_FP 3
#define ORC_REG_MAX 4
#define ORC_TYPE_UNDEFINED 0
#define ORC_TYPE_END_OF_STACK 1
#define ORC_TYPE_CALL 2
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
* CFI, simplified for ease of access by the in-kernel unwinder. It tells the
* unwinder how to find the previous SP and FP (and sometimes entry regs) on
* the stack for a given code address. Each instance of the struct corresponds
* to one or more code locations.
*/
struct orc_entry {
s16 sp_offset;
s16 fp_offset;
s16 ra_offset;
unsigned int sp_reg:4;
unsigned int fp_reg:4;
unsigned int ra_reg:4;
unsigned int type:3;
unsigned int signal:1;
};
#endif /* __ASSEMBLY__ */
#endif /* _ORC_TYPES_H */

View file

@ -75,6 +75,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
struct page *dmw_virt_to_page(unsigned long kaddr);
struct page *tlb_virt_to_page(unsigned long kaddr);
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
#define virt_to_page(kaddr) \

View file

@ -29,7 +29,12 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset = off;
csr_write64(off, PERCPU_BASE_KS);
}
#define __my_cpu_offset __my_cpu_offset
#define __my_cpu_offset \
({ \
__asm__ __volatile__("":"+r"(__my_cpu_offset)); \
__my_cpu_offset; \
})
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \

View file

@ -363,9 +363,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
struct page *dmw_virt_to_page(unsigned long kaddr);
struct page *tlb_virt_to_page(unsigned long kaddr);
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..

View file

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_QSPINLOCK_H
#define _ASM_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
#define queued_spin_unlock queued_spin_unlock
static inline void queued_spin_unlock(struct qspinlock *lock)
{
compiletime_assert_atomic_type(lock->locked);
c_sync();
WRITE_ONCE(lock->locked, 0);
}
#include <asm-generic/qspinlock.h>
#endif /* _ASM_QSPINLOCK_H */

View file

@ -13,6 +13,7 @@
#include <asm/asm-offsets.h>
#include <asm/loongarch.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
/* Make the addition of cfi info a little easier. */
.macro cfi_rel_offset reg offset=0 docfi=0
@ -162,6 +163,7 @@
li.w t0, CSR_CRMD_WE
csrxchg t0, t0, LOONGARCH_CSR_CRMD
#endif
UNWIND_HINT_REGS
.endm
.macro SAVE_ALL docfi=0
@ -219,6 +221,7 @@
.macro RESTORE_SP_AND_RET docfi=0
cfi_ld sp, PT_R3, \docfi
UNWIND_HINT_FUNC
ertn
.endm

View file

@ -86,6 +86,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
#define TIF_PATCH_PENDING 21 /* pending live patching update */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@ -105,6 +106,7 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
#define _TIF_USEDLBT (1<<TIF_USEDLBT)
#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */

View file

@ -16,6 +16,7 @@
enum unwinder_type {
UNWINDER_GUESS,
UNWINDER_PROLOGUE,
UNWINDER_ORC,
};
struct unwind_state {
@ -24,7 +25,7 @@ struct unwind_state {
struct task_struct *task;
bool first, error, reset;
int graph_idx;
unsigned long sp, pc, ra;
unsigned long sp, fp, pc, ra;
};
bool default_next_frame(struct unwind_state *state);
@ -61,14 +62,17 @@ static __always_inline void __unwind_start(struct unwind_state *state,
state->sp = regs->regs[3];
state->pc = regs->csr_era;
state->ra = regs->regs[1];
state->fp = regs->regs[22];
} else if (task && task != current) {
state->sp = thread_saved_fp(task);
state->pc = thread_saved_ra(task);
state->ra = 0;
state->fp = 0;
} else {
state->sp = (unsigned long)__builtin_frame_address(0);
state->pc = (unsigned long)__builtin_return_address(0);
state->ra = 0;
state->fp = 0;
}
state->task = task;
get_stack_info(state->sp, state->task, &state->stack_info);
@ -77,6 +81,18 @@ static __always_inline void __unwind_start(struct unwind_state *state,
static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state)
{
return unwind_done(state) ? 0 : state->pc;
if (unwind_done(state))
return 0;
return __kernel_text_address(state->pc) ? state->pc : 0;
}
#ifdef CONFIG_UNWINDER_ORC
void unwind_init(void);
void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size);
#else
static inline void unwind_init(void) {}
static inline void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {}
#endif
#endif /* _ASM_UNWIND_H */

View file

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_UNWIND_HINTS_H
#define _ASM_LOONGARCH_UNWIND_HINTS_H
#include <linux/objtool.h>
#include <asm/orc_types.h>
#ifdef __ASSEMBLY__
.macro UNWIND_HINT_UNDEFINED
UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED
.endm
.macro UNWIND_HINT_END_OF_STACK
UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK
.endm
.macro UNWIND_HINT_REGS
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_REGS
.endm
.macro UNWIND_HINT_FUNC
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */

View file

@ -3,6 +3,8 @@
# Makefile for the Linux/LoongArch kernel.
#
OBJECT_FILES_NON_STANDARD_head.o := y
extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
@ -21,6 +23,7 @@ obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_FUNCTION_TRACER
@ -62,6 +65,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o

View file

@ -14,11 +14,13 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
.text
.cfi_sections .debug_frame
.align 5
SYM_CODE_START(handle_syscall)
UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
@ -57,6 +59,7 @@ SYM_CODE_START(handle_syscall)
cfi_st fp, PT_R22
SAVE_STATIC
UNWIND_HINT_REGS
#ifdef CONFIG_KGDB
li.w t1, CSR_CRMD_WE
@ -75,6 +78,7 @@ SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, sp
bl syscall_exit_to_user_mode
@ -84,6 +88,7 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
SYM_CODE_START(ret_from_kernel_thread)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, s1
jirl ra, s0, 0

View file

@ -15,6 +15,7 @@
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
#define FPU_REG_WIDTH 8
#define LSX_REG_WIDTH 16
@ -526,3 +527,9 @@ SYM_FUNC_END(_restore_lasx_context)
.L_fpu_fault:
li.w a0, -EFAULT # failure
jr ra
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD _restore_fp
STACK_FRAME_NON_STANDARD _restore_lsx
STACK_FRAME_NON_STANDARD _restore_lasx
#endif

View file

@ -32,6 +32,7 @@ SYM_FUNC_START(__arch_cpu_idle)
SYM_FUNC_END(__arch_cpu_idle)
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
@ -49,6 +50,7 @@ SYM_CODE_START(handle_vint)
SYM_CODE_END(handle_vint)
SYM_CODE_START(except_vec_cex)
UNWIND_HINT_UNDEFINED
b cache_parity_error
SYM_CODE_END(except_vec_cex)
@ -67,6 +69,7 @@ SYM_CODE_END(except_vec_cex)
.macro BUILD_HANDLER exception handler prep
.align 5
SYM_CODE_START(handle_\exception)
UNWIND_HINT_UNDEFINED
666:
BACKUP_T0T1
SAVE_ALL
@ -77,7 +80,9 @@ SYM_CODE_END(except_vec_cex)
668:
RESTORE_ALL_AND_RET
SYM_CODE_END(handle_\exception)
.pushsection ".data", "aw", %progbits
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
.popsection
.endm
BUILD_HANDLER ade ade badv
@ -94,6 +99,7 @@ SYM_CODE_END(except_vec_cex)
BUILD_HANDLER reserved reserved none /* others */
SYM_CODE_START(handle_sys)
UNWIND_HINT_UNDEFINED
la_abs t0, handle_syscall
jr t0
SYM_CODE_END(handle_sys)

View file

@ -11,6 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
#define SCR_REG_WIDTH 8
@ -153,3 +154,5 @@ SYM_FUNC_END(_restore_ftop_context)
.L_lbt_fault:
li.w a0, -EFAULT # failure
jr ra
STACK_FRAME_NON_STANDARD _restore_ftop_context

View file

@ -73,6 +73,7 @@ SYM_FUNC_START(ftrace_stub)
SYM_FUNC_END(ftrace_stub)
SYM_CODE_START(ftrace_common)
UNWIND_HINT_UNDEFINED
PTR_ADDI a0, ra, -8 /* arg0: ip */
move a1, t0 /* arg1: parent_ip */
la.pcrel t1, function_trace_op
@ -113,12 +114,14 @@ ftrace_common_return:
SYM_CODE_END(ftrace_common)
SYM_CODE_START(ftrace_caller)
UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=0
b ftrace_common
SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_CODE_START(ftrace_regs_caller)
UNWIND_HINT_UNDEFINED
ftrace_regs_entry allregs=1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
@ -126,6 +129,7 @@ SYM_CODE_END(ftrace_regs_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
UNWIND_HINT_UNDEFINED
PTR_L a0, sp, PT_ERA
PTR_ADDI a0, a0, -8 /* arg0: self_addr */
PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
@ -134,6 +138,7 @@ SYM_CODE_START(ftrace_graph_caller)
SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
/* Save return value regs */
PTR_ADDI sp, sp, -FGRET_REGS_SIZE
PTR_S a0, sp, FGRET_REGS_A0
@ -155,6 +160,7 @@ SYM_CODE_END(return_to_handler)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
UNWIND_HINT_UNDEFINED
jr t0
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */

View file

@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/inst.h>
#include <asm/unwind.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{
@ -515,15 +516,28 @@ static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *s, *alt = NULL, *orc = NULL, *orc_ip = NULL, *ftrace = NULL;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
alt = s;
if (!strcmp(".orc_unwind", secstrs + s->sh_name))
orc = s;
if (!strcmp(".orc_unwind_ip", secstrs + s->sh_name))
orc_ip = s;
if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
module_init_ftrace_plt(hdr, s, mod);
ftrace = s;
}
if (alt)
apply_alternatives((void *)alt->sh_addr, (void *)alt->sh_addr + alt->sh_size);
if (orc && orc_ip)
unwind_module_init(mod, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size);
if (ftrace)
module_init_ftrace_plt(hdr, ftrace, mod);
return 0;
}

View file

@ -15,6 +15,7 @@
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
UNWIND_HINT_UNDEFINED
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
@ -90,6 +91,7 @@ SYM_CODE_END(relocate_new_kernel)
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
UNWIND_HINT_UNDEFINED
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
@ -106,6 +108,5 @@ SYM_CODE_END(kexec_smp_wait)
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
.section ".data"
SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel)

View file

@ -76,6 +76,7 @@
.endm
SYM_CODE_START(arch_rethook_trampoline)
UNWIND_HINT_UNDEFINED
addi.d sp, sp, -PT_SIZE
save_all_base_regs

View file

@ -47,6 +47,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/unwind.h>
#define SMBIOS_BIOSSIZE_OFFSET 0x09
#define SMBIOS_BIOSEXTERN_OFFSET 0x13
@ -587,6 +588,7 @@ static void __init prefill_possible_map(void)
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
unwind_init();
init_environ();
efi_init();

View file

@ -29,6 +29,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
regs->regs[22] = 0;
}
for (unwind_start(&state, task, regs);
@ -39,6 +40,46 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
}
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
unsigned long addr;
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
struct unwind_state state;
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
regs->regs[22] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
/*
* A NULL or invalid return address probably means there's some
* generated code which __kernel_text_address() doesn't know about.
*/
if (!addr)
return -EINVAL;
if (!consume_entry(cookie, addr))
return -EINVAL;
}
/* Check for stack corruption */
if (unwind_error(&state))
return -EINVAL;
return 0;
}
static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{

View file

@ -53,6 +53,32 @@
#include "access-helper.h"
void *exception_table[EXCCODE_INT_START] = {
[0 ... EXCCODE_INT_START - 1] = handle_reserved,
[EXCCODE_TLBI] = handle_tlb_load,
[EXCCODE_TLBL] = handle_tlb_load,
[EXCCODE_TLBS] = handle_tlb_store,
[EXCCODE_TLBM] = handle_tlb_modify,
[EXCCODE_TLBNR] = handle_tlb_protect,
[EXCCODE_TLBNX] = handle_tlb_protect,
[EXCCODE_TLBPE] = handle_tlb_protect,
[EXCCODE_ADE] = handle_ade,
[EXCCODE_ALE] = handle_ale,
[EXCCODE_BCE] = handle_bce,
[EXCCODE_SYS] = handle_sys,
[EXCCODE_BP] = handle_bp,
[EXCCODE_INE] = handle_ri,
[EXCCODE_IPE] = handle_ri,
[EXCCODE_FPDIS] = handle_fpu,
[EXCCODE_LSXDIS] = handle_lsx,
[EXCCODE_LASXDIS] = handle_lasx,
[EXCCODE_FPE] = handle_fpe,
[EXCCODE_WATCH] = handle_watch,
[EXCCODE_BTDIS] = handle_lbt,
};
EXPORT_SYMBOL_GPL(exception_table);
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
@ -1150,19 +1176,9 @@ void __init trap_init(void)
for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
set_handler(i * VECSIZE, handle_vint, VECSIZE);
set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
/* Set exception vector handler */
for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
set_handler(i * VECSIZE, exception_table[i], VECSIZE);
cache_error_setup();

View file

@ -0,0 +1,528 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/objtool.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/exception.h>
#include <asm/orc_header.h>
#include <asm/orc_lookup.h>
#include <asm/orc_types.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/stacktrace.h>
#include <asm/tlb.h>
#include <asm/unwind.h>
ORC_HEADER;
#define orc_warn(fmt, ...) \
printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
extern int __start_orc_unwind_ip[];
extern int __stop_orc_unwind_ip[];
extern struct orc_entry __start_orc_unwind[];
extern struct orc_entry __stop_orc_unwind[];
static bool orc_init __ro_after_init;
static unsigned int lookup_num_blocks __ro_after_init;
/* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = {
.sp_reg = ORC_REG_FP,
.sp_offset = 16,
.fp_reg = ORC_REG_PREV_SP,
.fp_offset = -16,
.ra_reg = ORC_REG_PREV_SP,
.ra_offset = -8,
.type = ORC_TYPE_CALL
};
/*
* If we crash with IP==0, the last successfully executed instruction
* was probably an indirect function call with a NULL function pointer,
* and we don't have unwind information for NULL.
* This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
* pointer into its parent and then continue normally from there.
*/
static struct orc_entry orc_null_entry = {
.sp_reg = ORC_REG_SP,
.sp_offset = sizeof(long),
.fp_reg = ORC_REG_UNDEFINED,
.type = ORC_TYPE_CALL
};
static inline unsigned long orc_ip(const int *ip)
{
return (unsigned long)ip + *ip;
}
static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
unsigned int num_entries, unsigned long ip)
{
int *first = ip_table;
int *mid = first, *found = first;
int *last = ip_table + num_entries - 1;
if (!num_entries)
return NULL;
/*
* Do a binary range search to find the rightmost duplicate of a given
* starting address. Some entries are section terminators which are
* "weak" entries for ensuring there are no gaps. They should be
* ignored when they conflict with a real entry.
*/
while (first <= last) {
mid = first + ((last - first) / 2);
if (orc_ip(mid) <= ip) {
found = mid;
first = mid + 1;
} else
last = mid - 1;
}
return u_table + (found - ip_table);
}
#ifdef CONFIG_MODULES
static struct orc_entry *orc_module_find(unsigned long ip)
{
struct module *mod;
mod = __module_address(ip);
if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
return NULL;
return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip);
}
#else
static struct orc_entry *orc_module_find(unsigned long ip)
{
return NULL;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static struct orc_entry *orc_find(unsigned long ip);
/*
* Ftrace dynamic trampolines do not have orc entries of their own.
* But they are copies of the ftrace entries that are static and
* defined in ftrace_*.S, which do have orc entries.
*
* If the unwinder comes across a ftrace trampoline, then find the
* ftrace function that was used to create it, and use that ftrace
* function's orc entry, as the placement of the return code in
* the stack will be identical.
*/
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
unsigned long tramp_addr, offset;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
/* Set tramp_addr to the start of the code copied by the trampoline */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
tramp_addr = (unsigned long)ftrace_regs_caller;
else
tramp_addr = (unsigned long)ftrace_caller;
/* Now place tramp_addr to the location within the trampoline ip is at */
offset = ip - ops->trampoline;
tramp_addr += offset;
/* Prevent unlikely recursion */
if (ip == tramp_addr)
return NULL;
return orc_find(tramp_addr);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
return NULL;
}
#endif
static struct orc_entry *orc_find(unsigned long ip)
{
static struct orc_entry *orc;
if (ip == 0)
return &orc_null_entry;
/* For non-init vmlinux addresses, use the fast lookup table: */
if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
unsigned int idx, start, stop;
idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
if (unlikely((idx >= lookup_num_blocks-1))) {
orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
idx, lookup_num_blocks, (void *)ip);
return NULL;
}
start = orc_lookup[idx];
stop = orc_lookup[idx + 1] + 1;
if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
(__start_orc_unwind + stop > __stop_orc_unwind))) {
orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
idx, lookup_num_blocks, start, stop, (void *)ip);
return NULL;
}
return __orc_find(__start_orc_unwind_ip + start,
__start_orc_unwind + start, stop - start, ip);
}
/* vmlinux .init slow lookup: */
if (is_kernel_inittext(ip))
return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
/* Module lookup: */
orc = orc_module_find(ip);
if (orc)
return orc;
return orc_ftrace_find(ip);
}
#ifdef CONFIG_MODULES
static DEFINE_MUTEX(sort_mutex);
static int *cur_orc_ip_table = __start_orc_unwind_ip;
static struct orc_entry *cur_orc_table = __start_orc_unwind;
static void orc_sort_swap(void *_a, void *_b, int size)
{
int delta = _b - _a;
int *a = _a, *b = _b, tmp;
struct orc_entry *orc_a, *orc_b;
/* Swap the .orc_unwind_ip entries: */
tmp = *a;
*a = *b + delta;
*b = tmp - delta;
/* Swap the corresponding .orc_unwind entries: */
orc_a = cur_orc_table + (a - cur_orc_ip_table);
orc_b = cur_orc_table + (b - cur_orc_ip_table);
swap(*orc_a, *orc_b);
}
static int orc_sort_cmp(const void *_a, const void *_b)
{
const int *a = _a, *b = _b;
unsigned long a_val = orc_ip(a);
unsigned long b_val = orc_ip(b);
struct orc_entry *orc_a;
if (a_val > b_val)
return 1;
if (a_val < b_val)
return -1;
/*
* The "weak" section terminator entries need to always be first
* to ensure the lookup code skips them in favor of real entries.
* These terminator entries exist to handle any gaps created by
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}
void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
void *_orc, size_t orc_size)
{
int *orc_ip = _orc_ip;
struct orc_entry *orc = _orc;
unsigned int num_entries = orc_ip_size / sizeof(int);
WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(*orc) != 0 ||
num_entries != orc_size / sizeof(*orc));
/*
* The 'cur_orc_*' globals allow the orc_sort_swap() callback to
* associate an .orc_unwind_ip table entry with its corresponding
* .orc_unwind entry so they can both be swapped.
*/
mutex_lock(&sort_mutex);
cur_orc_ip_table = orc_ip;
cur_orc_table = orc;
sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
mutex_unlock(&sort_mutex);
mod->arch.orc_unwind_ip = orc_ip;
mod->arch.orc_unwind = orc;
mod->arch.num_orcs = num_entries;
}
#endif
void __init unwind_init(void)
{
int i;
size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
size_t num_entries = orc_ip_size / sizeof(int);
struct orc_entry *orc;
if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(struct orc_entry) != 0 ||
num_entries != orc_size / sizeof(struct orc_entry)) {
orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
return;
}
/*
* Note, the orc_unwind and orc_unwind_ip tables were already
* sorted at build time via the 'sorttable' tool.
* It's ready for binary search straight away, no need to sort it.
*/
/* Initialize the fast lookup table: */
lookup_num_blocks = orc_lookup_end - orc_lookup;
for (i = 0; i < lookup_num_blocks-1; i++) {
orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
if (!orc) {
orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
return;
}
orc_lookup[i] = orc - __start_orc_unwind;
}
/* Initialize the ending block: */
orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP);
if (!orc) {
orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
return;
}
orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
orc_init = true;
}
static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len)
{
unsigned long begin = info->begin;
unsigned long end = info->end;
return (info->type != STACK_TYPE_UNKNOWN &&
addr >= begin && addr < end && addr + len > begin && addr + len <= end);
}
static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len)
{
struct stack_info *info = &state->stack_info;
if (on_stack(info, addr, len))
return true;
return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len);
}
unsigned long unwind_get_return_address(struct unwind_state *state)
{
return __unwind_get_return_address(state);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
__unwind_start(state, task, regs);
state->type = UNWINDER_ORC;
if (!unwind_done(state) && !__kernel_text_address(state->pc))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_start);
static bool is_entry_func(unsigned long addr)
{
extern u32 kernel_entry;
extern u32 kernel_entry_end;
return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end;
}
static inline unsigned long bt_address(unsigned long ra)
{
extern unsigned long eentry;
if (__kernel_text_address(ra))
return ra;
if (__module_text_address(ra))
return ra;
if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
unsigned long func;
unsigned long type = (ra - eentry) / VECSIZE;
unsigned long offset = (ra - eentry) % VECSIZE;
switch (type) {
case 0 ... EXCCODE_INT_START - 1:
func = (unsigned long)exception_table[type];
break;
case EXCCODE_INT_START ... EXCCODE_INT_END:
func = (unsigned long)handle_vint;
break;
default:
func = (unsigned long)handle_reserved;
break;
}
return func + offset;
}
return ra;
}
bool unwind_next_frame(struct unwind_state *state)
{
unsigned long *p, pc;
struct pt_regs *regs;
struct orc_entry *orc;
struct stack_info *info = &state->stack_info;
if (unwind_done(state))
return false;
/* Don't let modules unload while we're reading their ORC data. */
preempt_disable();
if (is_entry_func(state->pc))
goto end;
orc = orc_find(state->pc);
if (!orc) {
/*
* As a fallback, try to assume this code uses a frame pointer.
* This is useful for generated code, like BPF, which ORC
* doesn't know about. This is just a guess, so the rest of
* the unwind is no longer considered reliable.
*/
orc = &orc_fp_entry;
state->error = true;
} else {
if (orc->type == ORC_TYPE_UNDEFINED)
goto err;
if (orc->type == ORC_TYPE_END_OF_STACK)
goto end;
}
switch (orc->sp_reg) {
case ORC_REG_SP:
if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
orc->type = ORC_TYPE_REGS;
else
state->sp = state->sp + orc->sp_offset;
break;
case ORC_REG_FP:
state->sp = state->fp;
break;
default:
orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc);
goto err;
}
switch (orc->fp_reg) {
case ORC_REG_PREV_SP:
p = (unsigned long *)(state->sp + orc->fp_offset);
if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
goto err;
state->fp = *p;
break;
case ORC_REG_UNDEFINED:
/* Nothing. */
break;
default:
orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc);
goto err;
}
switch (orc->type) {
case ORC_TYPE_CALL:
if (orc->ra_reg == ORC_REG_PREV_SP) {
p = (unsigned long *)(state->sp + orc->ra_offset);
if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
goto err;
pc = unwind_graph_addr(state, *p, state->sp);
pc -= LOONGARCH_INSN_SIZE;
} else if (orc->ra_reg == ORC_REG_UNDEFINED) {
if (!state->ra || state->ra == state->pc)
goto err;
pc = unwind_graph_addr(state, state->ra, state->sp);
pc -= LOONGARCH_INSN_SIZE;
state->ra = 0;
} else {
orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc);
goto err;
}
break;
case ORC_TYPE_REGS:
if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
regs = (struct pt_regs *)info->next_sp;
else
regs = (struct pt_regs *)state->sp;
if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs)))
goto err;
if ((info->end == (unsigned long)regs + sizeof(*regs)) &&
!regs->regs[3] && !regs->regs[1])
goto end;
if (user_mode(regs))
goto end;
pc = regs->csr_era;
if (!__kernel_text_address(pc))
goto err;
state->sp = regs->regs[3];
state->ra = regs->regs[1];
state->fp = regs->regs[22];
get_stack_info(state->sp, state->task, info);
break;
default:
orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc);
goto err;
}
state->pc = bt_address(pc);
if (!state->pc) {
pr_err("cannot find unwind pc at %pK\n", (void *)pc);
goto err;
}
if (!__kernel_text_address(state->pc))
goto err;
preempt_enable();
return true;
err:
state->error = true;
end:
preempt_enable();
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);

View file

@ -2,6 +2,7 @@
#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/orc_lookup.h>
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
@ -122,6 +123,8 @@ SECTIONS
}
#endif
ORC_UNWIND_TABLE
.sdata : {
*(.sdata)
}

View file

@ -8,7 +8,7 @@
#include <asm/asmmacro.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/unwind_hints.h>
#define HGPR_OFFSET(x) (PT_R0 + 8*x)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
@ -112,6 +112,7 @@
.text
.cfi_sections .debug_frame
SYM_CODE_START(kvm_exc_entry)
UNWIND_HINT_UNDEFINED
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
addi.d a2, a2, KVM_VCPU_ARCH
@ -273,3 +274,9 @@ SYM_FUNC_END(kvm_restore_lasx)
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu
STACK_FRAME_NON_STANDARD kvm_restore_lsx
STACK_FRAME_NON_STANDARD kvm_restore_lasx
#endif

View file

@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
/*
@ -204,3 +205,5 @@ SYM_FUNC_START(__clear_user_fast)
_asm_extable 28b, .Lsmall_fixup
_asm_extable 29b, .Lexit
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast

View file

@ -10,6 +10,7 @@
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
/*
@ -278,3 +279,5 @@ SYM_FUNC_START(__copy_user_fast)
_asm_extable 58b, .Lexit
_asm_extable 59b, .Lexit
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast

View file

@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
.section .noinstr.text, "ax"
@ -197,3 +198,5 @@ SYM_FUNC_START(__memcpy_fast)
jr ra
SYM_FUNC_END(__memcpy_fast)
_ASM_NOKPROBE(__memcpy_fast)
STACK_FRAME_NON_STANDARD __memcpy_small

View file

@ -9,6 +9,7 @@
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
@ -166,3 +167,5 @@ SYM_FUNC_START(__memset_fast)
jr ra
SYM_FUNC_END(__memset_fast)
_ASM_NOKPROBE(__memset_fast)
STACK_FRAME_NON_STANDARD __memset_fast

View file

@ -9,8 +9,9 @@
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@ -266,24 +267,20 @@ static void setup_tlb_handler(int cpu)
setup_ptwalker();
local_flush_tlb_all();
if (cpu_has_ptw) {
exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw;
exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw;
exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw;
exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw;
}
/* The tlb handlers are generated only once */
if (cpu == 0) {
memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
if (!cpu_has_ptw) {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
} else {
set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
}
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++)
set_handler(i * VECSIZE, exception_table[i], VECSIZE);
} else {
int vec_sz __maybe_unused;
void *addr __maybe_unused;

View file

@ -18,6 +18,7 @@
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
UNWIND_HINT_UNDEFINED
SAVE_ALL
csrrd a2, LOONGARCH_CSR_BADV
move a0, sp
@ -32,6 +33,7 @@
tlb_do_page_fault 1
SYM_CODE_START(handle_tlb_protect)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
move a0, sp
@ -44,6 +46,7 @@ SYM_CODE_START(handle_tlb_protect)
SYM_CODE_END(handle_tlb_protect)
SYM_CODE_START(handle_tlb_load)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -190,6 +193,7 @@ nopage_tlb_load:
SYM_CODE_END(handle_tlb_load)
SYM_CODE_START(handle_tlb_load_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_0
@ -197,6 +201,7 @@ SYM_CODE_START(handle_tlb_load_ptw)
SYM_CODE_END(handle_tlb_load_ptw)
SYM_CODE_START(handle_tlb_store)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -346,6 +351,7 @@ nopage_tlb_store:
SYM_CODE_END(handle_tlb_store)
SYM_CODE_START(handle_tlb_store_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@ -353,6 +359,7 @@ SYM_CODE_START(handle_tlb_store_ptw)
SYM_CODE_END(handle_tlb_store_ptw)
SYM_CODE_START(handle_tlb_modify)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@ -500,6 +507,7 @@ nopage_tlb_modify:
SYM_CODE_END(handle_tlb_modify)
SYM_CODE_START(handle_tlb_modify_ptw)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
@ -507,6 +515,7 @@ SYM_CODE_START(handle_tlb_modify_ptw)
SYM_CODE_END(handle_tlb_modify_ptw)
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3

View file

@ -4,6 +4,7 @@
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
OBJECT_FILES_NON_STANDARD := y
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile

View file

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
#include <uapi/asm/mman.h>
/* PARISC cannot allow mdwe as it needs writable stacks */
static inline bool arch_memory_deny_write_exec_supported(void)
{
return false;
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif /* __ASM_MMAN_H__ */

View file

@ -607,11 +607,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config ARCH_SUPPORTS_KEXEC
def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP)
config ARCH_SELECTS_KEXEC
def_bool y
depends on KEXEC
select CRASH_DUMP
config ARCH_SUPPORTS_KEXEC_FILE
def_bool PPC64
@ -622,7 +617,6 @@ config ARCH_SELECTS_KEXEC_FILE
def_bool y
depends on KEXEC_FILE
select KEXEC_ELF
select CRASH_DUMP
select HAVE_IMA_KEXEC if IMA
config PPC64_BIG_ENDIAN_ELF_ABI_V2
@ -694,8 +688,7 @@ config ARCH_SELECTS_CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
depends on PPC64 && (PPC_RTAS || PPC_POWERNV)
select CRASH_DUMP
depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,

View file

@ -55,59 +55,18 @@
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC_CORE
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
ppc_save_regs(newregs);
}
struct kimage;
struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern void crash_ipi_callback(struct pt_regs *);
extern int crash_wake_offline;
struct kimage;
struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
void kexec_copy_flush(struct kimage *image);
#if defined(CONFIG_CRASH_DUMP)
bool is_kdump_kernel(void);
#define is_kdump_kernel is_kdump_kernel
#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif /* CONFIG_PPC_RTAS */
#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
@ -152,15 +111,56 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
#endif /* CONFIG_KEXEC_FILE */
#else /* !CONFIG_KEXEC_CORE */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
#endif /* CONFIG_KEXEC_CORE */
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
#ifdef CONFIG_CRASH_RESERVE
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
#else
static inline void reserve_crashkernel(void) {}
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
#endif
#if defined(CONFIG_CRASH_DUMP)
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
*/
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
return 0;
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
ppc_save_regs(newregs);
}
static inline void reserve_crashkernel(void) { ; }
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern void crash_ipi_callback(struct pt_regs *regs);
extern int crash_wake_offline;
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
}
bool is_kdump_kernel(void);
#define is_kdump_kernel is_kdump_kernel
#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif /* CONFIG_PPC_RTAS */
#else /* !CONFIG_CRASH_DUMP */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int crash_shutdown_register(crash_shutdown_t handler)
{
@ -183,7 +183,7 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
}
#endif /* CONFIG_KEXEC_CORE */
#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kexec.h>

View file

@ -475,7 +475,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
tce_alloc_end = *lprop;
#endif
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_RESERVE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;

View file

@ -110,7 +110,7 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
#ifdef CONFIG_VMCORE_INFO
#ifdef CONFIG_CRASH_DUMP
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif

View file

@ -588,7 +588,7 @@ void smp_send_debugger_break(void)
}
#endif
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
int cpu;
@ -631,7 +631,7 @@ void crash_smp_send_stop(void)
stopped = true;
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_CRASH_DUMP
if (kexec_crash_image) {
crash_kexec_prepare();
return;

View file

@ -3,12 +3,13 @@
# Makefile for the linux kernel.
#
obj-y += core.o crash.o core_$(BITS).o
obj-y += core.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_CRASH_DUMP) += crash.o
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n

View file

@ -44,10 +44,12 @@ void machine_kexec_mask_interrupts(void) {
}
}
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
}
#endif
void machine_kexec_cleanup(struct kimage *image)
{
@ -77,6 +79,7 @@ void machine_kexec(struct kimage *image)
for(;;);
}
#ifdef CONFIG_CRASH_RESERVE
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem_sz;
@ -251,3 +254,4 @@ static int __init kexec_setup(void)
return 0;
}
late_initcall(kexec_setup);
#endif /* CONFIG_CRASH_RESERVE */

Some files were not shown because too many files have changed in this diff Show more