Merge branch 'x86/urgent' into x86/apic, to resolve conflicts

Conflicts:
	arch/x86/kernel/cpu/common.c
	arch/x86/kernel/cpu/intel.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2024-02-27 10:09:49 +01:00
commit 9b9c280b9a
511 changed files with 3808 additions and 2121 deletions

View file

@ -191,10 +191,11 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com> Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com> Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com> Geliang Tang <geliang@kernel.org> <geliang.tang@linux.dev>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com> Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com> Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com> Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org> Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>

View file

@ -1,4 +1,4 @@
What: /sys/class/<iface>/statistics/collisions What: /sys/class/net/<iface>/statistics/collisions
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -6,7 +6,7 @@ Description:
Indicates the number of collisions seen by this network device. Indicates the number of collisions seen by this network device.
This value might not be relevant with all MAC layers. This value might not be relevant with all MAC layers.
What: /sys/class/<iface>/statistics/multicast What: /sys/class/net/<iface>/statistics/multicast
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -14,7 +14,7 @@ Description:
Indicates the number of multicast packets received by this Indicates the number of multicast packets received by this
network device. network device.
What: /sys/class/<iface>/statistics/rx_bytes What: /sys/class/net/<iface>/statistics/rx_bytes
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -23,7 +23,7 @@ Description:
See the network driver for the exact meaning of when this See the network driver for the exact meaning of when this
value is incremented. value is incremented.
What: /sys/class/<iface>/statistics/rx_compressed What: /sys/class/net/<iface>/statistics/rx_compressed
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -32,7 +32,7 @@ Description:
network device. This value might only be relevant for interfaces network device. This value might only be relevant for interfaces
that support packet compression (e.g: PPP). that support packet compression (e.g: PPP).
What: /sys/class/<iface>/statistics/rx_crc_errors What: /sys/class/net/<iface>/statistics/rx_crc_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -41,7 +41,7 @@ Description:
by this network device. Note that the specific meaning might by this network device. Note that the specific meaning might
depend on the MAC layer used by the interface. depend on the MAC layer used by the interface.
What: /sys/class/<iface>/statistics/rx_dropped What: /sys/class/net/<iface>/statistics/rx_dropped
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -51,7 +51,7 @@ Description:
packet processing. See the network driver for the exact packet processing. See the network driver for the exact
meaning of this value. meaning of this value.
What: /sys/class/<iface>/statistics/rx_errors What: /sys/class/net/<iface>/statistics/rx_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -59,7 +59,7 @@ Description:
Indicates the number of receive errors on this network device. Indicates the number of receive errors on this network device.
See the network driver for the exact meaning of this value. See the network driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_fifo_errors What: /sys/class/net/<iface>/statistics/rx_fifo_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -68,7 +68,7 @@ Description:
network device. See the network driver for the exact network device. See the network driver for the exact
meaning of this value. meaning of this value.
What: /sys/class/<iface>/statistics/rx_frame_errors What: /sys/class/net/<iface>/statistics/rx_frame_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -78,7 +78,7 @@ Description:
on the MAC layer protocol used. See the network driver for on the MAC layer protocol used. See the network driver for
the exact meaning of this value. the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_length_errors What: /sys/class/net/<iface>/statistics/rx_length_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -87,7 +87,7 @@ Description:
error, oversized or undersized. See the network driver for the error, oversized or undersized. See the network driver for the
exact meaning of this value. exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_missed_errors What: /sys/class/net/<iface>/statistics/rx_missed_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -96,7 +96,7 @@ Description:
due to lack of capacity in the receive side. See the network due to lack of capacity in the receive side. See the network
driver for the exact meaning of this value. driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/rx_nohandler What: /sys/class/net/<iface>/statistics/rx_nohandler
Date: February 2016 Date: February 2016
KernelVersion: 4.6 KernelVersion: 4.6
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -104,7 +104,7 @@ Description:
Indicates the number of received packets that were dropped on Indicates the number of received packets that were dropped on
an inactive device by the network core. an inactive device by the network core.
What: /sys/class/<iface>/statistics/rx_over_errors What: /sys/class/net/<iface>/statistics/rx_over_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -114,7 +114,7 @@ Description:
(e.g: larger than MTU). See the network driver for the exact (e.g: larger than MTU). See the network driver for the exact
meaning of this value. meaning of this value.
What: /sys/class/<iface>/statistics/rx_packets What: /sys/class/net/<iface>/statistics/rx_packets
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -122,7 +122,7 @@ Description:
Indicates the total number of good packets received by this Indicates the total number of good packets received by this
network device. network device.
What: /sys/class/<iface>/statistics/tx_aborted_errors What: /sys/class/net/<iface>/statistics/tx_aborted_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -132,7 +132,7 @@ Description:
a medium collision). See the network driver for the exact a medium collision). See the network driver for the exact
meaning of this value. meaning of this value.
What: /sys/class/<iface>/statistics/tx_bytes What: /sys/class/net/<iface>/statistics/tx_bytes
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -143,7 +143,7 @@ Description:
transmitted packets or all packets that have been queued for transmitted packets or all packets that have been queued for
transmission. transmission.
What: /sys/class/<iface>/statistics/tx_carrier_errors What: /sys/class/net/<iface>/statistics/tx_carrier_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -152,7 +152,7 @@ Description:
because of carrier errors (e.g: physical link down). See the because of carrier errors (e.g: physical link down). See the
network driver for the exact meaning of this value. network driver for the exact meaning of this value.
What: /sys/class/<iface>/statistics/tx_compressed What: /sys/class/net/<iface>/statistics/tx_compressed
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -161,7 +161,7 @@ Description:
this might only be relevant for devices that support this might only be relevant for devices that support
compression (e.g: PPP). compression (e.g: PPP).
What: /sys/class/<iface>/statistics/tx_dropped What: /sys/class/net/<iface>/statistics/tx_dropped
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -170,7 +170,7 @@ Description:
See the driver for the exact reasons as to why the packets were See the driver for the exact reasons as to why the packets were
dropped. dropped.
What: /sys/class/<iface>/statistics/tx_errors What: /sys/class/net/<iface>/statistics/tx_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -179,7 +179,7 @@ Description:
a network device. See the driver for the exact reasons as to a network device. See the driver for the exact reasons as to
why the packets were dropped. why the packets were dropped.
What: /sys/class/<iface>/statistics/tx_fifo_errors What: /sys/class/net/<iface>/statistics/tx_fifo_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -188,7 +188,7 @@ Description:
FIFO error. See the driver for the exact reasons as to why the FIFO error. See the driver for the exact reasons as to why the
packets were dropped. packets were dropped.
What: /sys/class/<iface>/statistics/tx_heartbeat_errors What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -197,7 +197,7 @@ Description:
reported as heartbeat errors. See the driver for the exact reported as heartbeat errors. See the driver for the exact
reasons as to why the packets were dropped. reasons as to why the packets were dropped.
What: /sys/class/<iface>/statistics/tx_packets What: /sys/class/net/<iface>/statistics/tx_packets
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org
@ -206,7 +206,7 @@ Description:
device. See the driver for whether this reports the number of all device. See the driver for whether this reports the number of all
attempted or successful transmissions. attempted or successful transmissions.
What: /sys/class/<iface>/statistics/tx_window_errors What: /sys/class/net/<iface>/statistics/tx_window_errors
Date: April 2005 Date: April 2005
KernelVersion: 2.6.12 KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org Contact: netdev@vger.kernel.org

View file

@ -4,18 +4,18 @@ KernelVersion: 6.5
Contact: Miquel Raynal <miquel.raynal@bootlin.com> Contact: Miquel Raynal <miquel.raynal@bootlin.com>
Description: Description:
The "cells" folder contains one file per cell exposed by the The "cells" folder contains one file per cell exposed by the
NVMEM device. The name of the file is: <name>@<where>, with NVMEM device. The name of the file is: "<name>@<byte>,<bit>",
<name> being the cell name and <where> its location in the NVMEM with <name> being the cell name and <where> its location in
device, in hexadecimal (without the '0x' prefix, to mimic device the NVMEM device, in hexadecimal bytes and bits (without the
tree node names). The length of the file is the size of the cell '0x' prefix, to mimic device tree node names). The length of
(when known). The content of the file is the binary content of the file is the size of the cell (when known). The content of
the cell (may sometimes be ASCII, likely without trailing the file is the binary content of the cell (may sometimes be
character). ASCII, likely without trailing character).
Note: This file is only present if CONFIG_NVMEM_SYSFS Note: This file is only present if CONFIG_NVMEM_SYSFS
is enabled. is enabled.
Example:: Example::
hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d,0
00000000 54 4e 34 38 4d 2d 50 2d 44 4e |TN48M-P-DN| 00000000 54 4e 34 38 4d 2d 50 2d 44 4e |TN48M-P-DN|
0000000a 0000000a

View file

@ -243,3 +243,10 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ASR | ASR8601 | #8601001 | N/A | | ASR | ASR8601 | #8601001 | N/A |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Microsoft | Azure Cobalt 100| #2139208 | ARM64_ERRATUM_2139208 |
+----------------+-----------------+-----------------+-----------------------------+
| Microsoft | Azure Cobalt 100| #2067961 | ARM64_ERRATUM_2067961 |
+----------------+-----------------+-----------------+-----------------------------+
| Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 |
+----------------+-----------------+-----------------+-----------------------------+

View file

@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers() mds_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
(idle) transitions. (idle) transitions.
@ -138,17 +141,30 @@ Mitigation points
When transitioning from kernel to user space the CPU buffers are flushed When transitioning from kernel to user space the CPU buffers are flushed
on affected CPUs when the mitigation is not disabled on the kernel on affected CPUs when the mitigation is not disabled on the kernel
command line. The migitation is enabled through the static key command line. The mitigation is enabled through the feature flag
mds_user_clear. X86_FEATURE_CLEAR_CPU_BUF.
The mitigation is invoked in prepare_exit_to_usermode() which covers The mitigation is invoked just before transitioning to userspace after
all but one of the kernel to user space transitions. The exception user registers are restored. This is done to minimize the window in
is when we return from a Non Maskable Interrupt (NMI), which is which kernel data could be accessed after VERW e.g. via an NMI after
handled directly in do_nmi(). VERW.
(The reason that NMI is special is that prepare_exit_to_usermode() can **Corner case not handled**
enable IRQs. In NMI context, NMIs are blocked, and we don't want to Interrupts returning to kernel don't clear CPUs buffers since the
enable IRQs with NMIs blocked.) exit-to-user path is expected to do that anyways. But, there could be
a case when an NMI is generated in kernel after the exit-to-user path
has cleared the buffers. This case is not handled and NMI returning to
kernel don't clear CPU buffers because:
1. It is rare to get an NMI after VERW, but before returning to userspace.
2. For an unprivileged user, there is no known way to make that NMI
less rare or target it.
3. It would take a large number of these precisely-timed NMIs to mount
an actual attack. There's presumably not enough bandwidth.
4. The NMI in question occurs after a VERW, i.e. when user state is
restored and most interesting data is already scrubbed. Whats left
is only the data that NMI touches, and that may or may not be of
any interest.
2. C-State transition 2. C-State transition

View file

@ -28,7 +28,10 @@ $(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \ find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \
-name 'processed-schema*' \) -name 'processed-schema*' \)
find_cmd = $(find_all_cmd) | sed 's|^$(srctree)/$(src)/||' | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | sed 's|^|$(srctree)/$(src)/|' find_cmd = $(find_all_cmd) | \
sed 's|^$(srctree)/||' | \
grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | \
sed 's|^|$(srctree)/|'
CHK_DT_DOCS := $(shell $(find_cmd)) CHK_DT_DOCS := $(shell $(find_cmd))
quiet_cmd_yamllint = LINT $(src) quiet_cmd_yamllint = LINT $(src)

View file

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ceva AHCI SATA Controller title: Ceva AHCI SATA Controller
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com> - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: | description: |
The Ceva SATA controller mostly conforms to the AHCI interface with some The Ceva SATA controller mostly conforms to the AHCI interface with some

View file

@ -29,19 +29,22 @@ properties:
audio-ports: audio-ports:
description: description:
Array of 8-bit values, 2 values per DAI (Documentation/sound/soc/dai.rst). Array of 2 values per DAI (Documentation/sound/soc/dai.rst).
The implementation allows one or two DAIs. The implementation allows one or two DAIs.
If two DAIs are defined, they must be of different type. If two DAIs are defined, they must be of different type.
$ref: /schemas/types.yaml#/definitions/uint32-matrix $ref: /schemas/types.yaml#/definitions/uint32-matrix
minItems: 1
maxItems: 2
items: items:
minItems: 1
items: items:
- description: | - description: |
The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S
(see include/dt-bindings/display/tda998x.h). (see include/dt-bindings/display/tda998x.h).
enum: [ 1, 2 ]
- description: - description:
The second value defines the tda998x AP_ENA reg content when the The second value defines the tda998x AP_ENA reg content when the
DAI in question is used. DAI in question is used.
maximum: 0xff
'#sound-dai-cells': '#sound-dai-cells':
enum: [ 0, 1 ] enum: [ 0, 1 ]

View file

@ -12,7 +12,8 @@ description:
PS_MODE). Every pin can be configured as input/output. PS_MODE). Every pin can be configured as input/output.
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com> - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties: properties:
compatible: compatible:

View file

@ -78,8 +78,8 @@ examples:
pcie@0 { pcie@0 {
#address-cells = <3>; #address-cells = <3>;
#size-cells = <2>; #size-cells = <2>;
ranges = <0x0 0x0 0x0 0x0 0x0 0x0>; ranges = <0x02000000 0x0 0x100000 0x10000000 0x0 0x0>;
reg = <0x0 0x0 0x0 0x0 0x0 0x0>; reg = <0x0 0x1000>;
device_type = "pci"; device_type = "pci";
switch@0,0 { switch@0,0 {

View file

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Zynq UltraScale+ MPSoC and Versal reset title: Zynq UltraScale+ MPSoC and Versal reset
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com> - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: | description: |
The Zynq UltraScale+ MPSoC and Versal has several different resets. The Zynq UltraScale+ MPSoC and Versal has several different resets.

View file

@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Google SC7280-Herobrine ASoC sound card driver title: Google SC7280-Herobrine ASoC sound card driver
maintainers: maintainers:
- Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
- Judy Hsiao <judyhsiao@chromium.org> - Judy Hsiao <judyhsiao@chromium.org>
description: description:

View file

@ -42,7 +42,7 @@ properties:
resets: resets:
description: Reset controller to reset the TPM description: Reset controller to reset the TPM
$ref: /schemas/types.yaml#/definitions/phandle maxItems: 1
reset-gpios: reset-gpios:
description: Output GPIO pin to reset the TPM description: Output GPIO pin to reset the TPM

View file

@ -55,9 +55,12 @@ properties:
samsung,sysreg: samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array $ref: /schemas/types.yaml#/definitions/phandle-array
description: Should be phandle/offset pair. The phandle to the syscon node items:
which indicates the FSYSx sysreg interface and the offset of - items:
the control register for UFS io coherency setting. - description: phandle to FSYSx sysreg node
- description: offset of the control register for UFS io coherency setting
description:
Phandle and offset to the FSYSx sysreg for UFS io coherency setting.
dma-coherent: true dma-coherent: true

View file

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SuperSpeed DWC3 USB SoC controller title: Xilinx SuperSpeed DWC3 USB SoC controller
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com> - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties: properties:
compatible: compatible:

View file

@ -16,8 +16,9 @@ description:
USB 2.0 traffic. USB 2.0 traffic.
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com>
- Michal Simek <michal.simek@amd.com> - Michal Simek <michal.simek@amd.com>
- Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties: properties:
compatible: compatible:

View file

@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx udc controller title: Xilinx udc controller
maintainers: maintainers:
- Piyush Mehta <piyush.mehta@amd.com> - Mubin Sayyed <mubin.sayyed@amd.com>
- Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties: properties:
compatible: compatible:

View file

@ -16,13 +16,13 @@
# that are possible for CORE. So for example if CORE_BELL_A_ADVANCED is 'y', # that are possible for CORE. So for example if CORE_BELL_A_ADVANCED is 'y',
# CORE must be 'y' too. # CORE must be 'y' too.
# #
# * What influences CORE_BELL_A_ADVANCED ? # * What influences CORE_BELL_A_ADVANCED?
# #
# As the name implies CORE_BELL_A_ADVANCED is an advanced feature of # As the name implies CORE_BELL_A_ADVANCED is an advanced feature of
# CORE_BELL_A so naturally it depends on CORE_BELL_A. So if CORE_BELL_A is 'y' # CORE_BELL_A so naturally it depends on CORE_BELL_A. So if CORE_BELL_A is 'y'
# we know CORE_BELL_A_ADVANCED can be 'y' too. # we know CORE_BELL_A_ADVANCED can be 'y' too.
# #
# * What influences CORE_BELL_A ? # * What influences CORE_BELL_A?
# #
# CORE_BELL_A depends on CORE, so CORE influences CORE_BELL_A. # CORE_BELL_A depends on CORE, so CORE influences CORE_BELL_A.
# #
@ -34,7 +34,7 @@
# the "recursive dependency detected" error. # the "recursive dependency detected" error.
# #
# Reading the Documentation/kbuild/Kconfig.recursion-issue-01 file it may be # Reading the Documentation/kbuild/Kconfig.recursion-issue-01 file it may be
# obvious that an easy to solution to this problem should just be the removal # obvious that an easy solution to this problem should just be the removal
# of the "select CORE" from CORE_BELL_A_ADVANCED as that is implicit already # of the "select CORE" from CORE_BELL_A_ADVANCED as that is implicit already
# since CORE_BELL_A depends on CORE. Recursive dependency issues are not always # since CORE_BELL_A depends on CORE. Recursive dependency issues are not always
# so trivial to resolve, we provide another example below of practical # so trivial to resolve, we provide another example below of practical

View file

@ -384,8 +384,6 @@ operations:
- type - type
dump: dump:
pre: dpll-lock-dumpit
post: dpll-unlock-dumpit
reply: *dev-attrs reply: *dev-attrs
- -
@ -473,8 +471,6 @@ operations:
- fractional-frequency-offset - fractional-frequency-offset
dump: dump:
pre: dpll-lock-dumpit
post: dpll-unlock-dumpit
request: request:
attributes: attributes:
- id - id

View file

@ -126,7 +126,7 @@ Users may also set the RoCE capability of the function using
`devlink port function set roce` command. `devlink port function set roce` command.
Users may also set the function as migratable using Users may also set the function as migratable using
'devlink port function set migratable' command. `devlink port function set migratable` command.
Users may also set the IPsec crypto capability of the function using Users may also set the IPsec crypto capability of the function using
`devlink port function set ipsec_crypto` command. `devlink port function set ipsec_crypto` command.

View file

@ -136,8 +136,8 @@ struct_netpoll_info* npinfo -
possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish
void* ml_priv void* ml_priv
enum_netdev_ml_priv_type ml_priv_type enum_netdev_ml_priv_type ml_priv_type
struct_pcpu_lstats__percpu* lstats struct_pcpu_lstats__percpu* lstats read_mostly dev_lstats_add()
struct_pcpu_sw_netstats__percpu* tstats struct_pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add()
struct_pcpu_dstats__percpu* dstats struct_pcpu_dstats__percpu* dstats
struct_garp_port* garp_port struct_garp_port* garp_port
struct_mrp_port* mrp_port struct_mrp_port* mrp_port

View file

@ -38,13 +38,13 @@ u32 max_window read_mostly -
u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx) u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx)
u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window
u32 rcv_ssthresh read_mostly - __tcp_select_window u32 rcv_ssthresh read_mostly - __tcp_select_window
u82 scaling_ratio u8 scaling_ratio read_mostly read_mostly tcp_win_from_space
struct tcp_rack struct tcp_rack
u16 advmss - read_mostly tcp_rcv_space_adjust u16 advmss - read_mostly tcp_rcv_space_adjust
u8 compressed_ack u8 compressed_ack
u8:2 dup_ack_counter u8:2 dup_ack_counter
u8:1 tlp_retrans u8:1 tlp_retrans
u8:1 tcp_usec_ts u8:1 tcp_usec_ts read_mostly read_mostly
u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data) u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)

View file

@ -0,0 +1,121 @@
====
CVEs
====
Common Vulnerabilities and Exposure (CVE®) numbers were developed as an
unambiguous way to identify, define, and catalog publicly disclosed
security vulnerabilities. Over time, their usefulness has declined with
regards to the kernel project, and CVE numbers were very often assigned
in inappropriate ways and for inappropriate reasons. Because of this,
the kernel development community has tended to avoid them. However, the
combination of continuing pressure to assign CVEs and other forms of
security identifiers, and ongoing abuses by individuals and companies
outside of the kernel community has made it clear that the kernel
community should have control over those assignments.
The Linux kernel developer team does have the ability to assign CVEs for
potential Linux kernel security issues. This assignment is independent
of the :doc:`normal Linux kernel security bug reporting
process<../process/security-bugs>`.
A list of all assigned CVEs for the Linux kernel can be found in the
archives of the linux-cve mailing list, as seen on
https://lore.kernel.org/linux-cve-announce/. To get notice of the
assigned CVEs, please `subscribe
<https://subspace.kernel.org/subscribing.html>`_ to that mailing list.
Process
=======
As part of the normal stable release process, kernel changes that are
potentially security issues are identified by the developers responsible
for CVE number assignments and have CVE numbers automatically assigned
to them. These assignments are published on the linux-cve-announce
mailing list as announcements on a frequent basis.
Note, due to the layer at which the Linux kernel is in a system, almost
any bug might be exploitable to compromise the security of the kernel,
but the possibility of exploitation is often not evident when the bug is
fixed. Because of this, the CVE assignment team is overly cautious and
assign CVE numbers to any bugfix that they identify. This
explains the seemingly large number of CVEs that are issued by the Linux
kernel team.
If the CVE assignment team misses a specific fix that any user feels
should have a CVE assigned to it, please email them at <cve@kernel.org>
and the team there will work with you on it. Note that no potential
security issues should be sent to this alias, it is ONLY for assignment
of CVEs for fixes that are already in released kernel trees. If you
feel you have found an unfixed security issue, please follow the
:doc:`normal Linux kernel security bug reporting
process<../process/security-bugs>`.
No CVEs will be automatically assigned for unfixed security issues in
the Linux kernel; assignment will only automatically happen after a fix
is available and applied to a stable kernel tree, and it will be tracked
that way by the git commit id of the original fix. If anyone wishes to
have a CVE assigned before an issue is resolved with a commit, please
contact the kernel CVE assignment team at <cve@kernel.org> to get an
identifier assigned from their batch of reserved identifiers.
No CVEs will be assigned for any issue found in a version of the kernel
that is not currently being actively supported by the Stable/LTS kernel
team. A list of the currently supported kernel branches can be found at
https://kernel.org/releases.html
Disputes of assigned CVEs
=========================
The authority to dispute or modify an assigned CVE for a specific kernel
change lies solely with the maintainers of the relevant subsystem
affected. This principle ensures a high degree of accuracy and
accountability in vulnerability reporting. Only those individuals with
deep expertise and intimate knowledge of the subsystem can effectively
assess the validity and scope of a reported vulnerability and determine
its appropriate CVE designation. Any attempt to modify or dispute a CVE
outside of this designated authority could lead to confusion, inaccurate
reporting, and ultimately, compromised systems.
Invalid CVEs
============
If a security issue is found in a Linux kernel that is only supported by
a Linux distribution due to the changes that have been made by that
distribution, or due to the distribution supporting a kernel version
that is no longer one of the kernel.org supported releases, then a CVE
can not be assigned by the Linux kernel CVE team, and must be asked for
from that Linux distribution itself.
Any CVE that is assigned against the Linux kernel for an actively
supported kernel version, by any group other than the kernel assignment
CVE team should not be treated as a valid CVE. Please notify the
kernel CVE assignment team at <cve@kernel.org> so that they can work to
invalidate such entries through the CNA remediation process.
Applicability of specific CVEs
==============================
As the Linux kernel can be used in many different ways, with many
different ways of accessing it by external users, or no access at all,
the applicability of any specific CVE is up to the user of Linux to
determine, it is not up to the CVE assignment team. Please do not
contact us to attempt to determine the applicability of any specific
CVE.
Also, as the source tree is so large, and any one system only uses a
small subset of the source tree, any users of Linux should be aware that
large numbers of assigned CVEs are not relevant for their systems.
In short, we do not know your use case, and we do not know what portions
of the kernel that you use, so there is no way for us to determine if a
specific CVE is relevant for your system.
As always, it is best to take all released kernel changes, as they are
tested together in a unified whole by many community members, and not as
individual cherry-picked changes. Also note that for many bugs, the
solution to the overall problem is not found in a single change, but by
the sum of many fixes on top of each other. Ideally CVEs will be
assigned to all fixes for all issues, but sometimes we will fail to
notice fixes, therefore assume that some changes without a CVE assigned
might be relevant to take.

View file

@ -81,6 +81,7 @@ of special classes of bugs: regressions and security problems.
handling-regressions handling-regressions
security-bugs security-bugs
cve
embargoed-hardware-issues embargoed-hardware-issues
Maintainer information Maintainer information

View file

@ -99,9 +99,8 @@ CVE assignment
The security team does not assign CVEs, nor do we require them for The security team does not assign CVEs, nor do we require them for
reports or fixes, as this can needlessly complicate the process and may reports or fixes, as this can needlessly complicate the process and may
delay the bug handling. If a reporter wishes to have a CVE identifier delay the bug handling. If a reporter wishes to have a CVE identifier
assigned, they should find one by themselves, for example by contacting assigned for a confirmed issue, they can contact the :doc:`kernel CVE
MITRE directly. However under no circumstances will a patch inclusion assignment team<../process/cve>` to obtain one.
be delayed to wait for a CVE identifier to arrive.
Non-disclosure agreements Non-disclosure agreements
------------------------- -------------------------

View file

@ -109,7 +109,7 @@ class KernelFeat(Directive):
else: else:
out_lines += line + "\n" out_lines += line + "\n"
nodeList = self.nestedParse(out_lines, fname) nodeList = self.nestedParse(out_lines, self.arguments[0])
return nodeList return nodeList
def nestedParse(self, lines, fname): def nestedParse(self, lines, fname):

View file

@ -5610,6 +5610,11 @@ S: Maintained
F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
F: drivers/net/can/ctucanfd/ F: drivers/net/can/ctucanfd/
CVE ASSIGNMENT CONTACT
M: CVE Assignment Team <cve@kernel.org>
S: Maintained
F: Documentation/process/cve.rst
CW1200 WLAN driver CW1200 WLAN driver
S: Orphan S: Orphan
F: drivers/net/wireless/st/cw1200/ F: drivers/net/wireless/st/cw1200/
@ -15324,7 +15329,7 @@ K: \bmdo_
NETWORKING [MPTCP] NETWORKING [MPTCP]
M: Matthieu Baerts <matttbe@kernel.org> M: Matthieu Baerts <matttbe@kernel.org>
M: Mat Martineau <martineau@kernel.org> M: Mat Martineau <martineau@kernel.org>
R: Geliang Tang <geliang.tang@linux.dev> R: Geliang Tang <geliang@kernel.org>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev L: mptcp@lists.linux.dev
S: Maintained S: Maintained
@ -16837,6 +16842,7 @@ F: drivers/pci/controller/dwc/*designware*
PCI DRIVER FOR TI DRA7XX/J721E PCI DRIVER FOR TI DRA7XX/J721E
M: Vignesh Raghavendra <vigneshr@ti.com> M: Vignesh Raghavendra <vigneshr@ti.com>
R: Siddharth Vadapalli <s-vadapalli@ti.com>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -22009,6 +22015,14 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
F: drivers/media/i2c/ds90* F: drivers/media/i2c/ds90*
F: include/media/i2c/ds90* F: include/media/i2c/ds90*
TI HDC302X HUMIDITY DRIVER
M: Javier Carrasco <javier.carrasco.cruz@gmail.com>
M: Li peiyu <579lpy@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml
F: drivers/iio/humidity/hdc3020.c
TI ICSSG ETHERNET DRIVER (ICSSG) TI ICSSG ETHERNET DRIVER (ICSSG)
R: MD Danish Anwar <danishanwar@ti.com> R: MD Danish Anwar <danishanwar@ti.com>
R: Roger Quadros <rogerq@kernel.org> R: Roger Quadros <rogerq@kernel.org>

View file

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 8 PATCHLEVEL = 8
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION* # *DOCUMENTATION*
@ -294,15 +294,15 @@ may-sync-config := 1
single-build := single-build :=
ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
need-config := need-config :=
endif endif
endif endif
ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),) ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),) ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
may-sync-config := may-sync-config :=
endif endif
endif endif
need-compiler := $(may-sync-config) need-compiler := $(may-sync-config)
@ -323,9 +323,9 @@ endif
# We cannot build single targets and the others at the same time # We cannot build single targets and the others at the same time
ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),) ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),)
single-build := 1 single-build := 1
ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),) ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
mixed-build := 1 mixed-build := 1
endif endif
endif endif
# For "make -j clean all", "make -j mrproper defconfig all", etc. # For "make -j clean all", "make -j mrproper defconfig all", etc.

View file

@ -83,7 +83,7 @@ struct arm64_ftr_bits {
* to full-0 denotes that this field has no override * to full-0 denotes that this field has no override
* *
* A @mask field set to full-0 with the corresponding @val field set * A @mask field set to full-0 with the corresponding @val field set
* to full-1 denotes thath this field has an invalid override. * to full-1 denotes that this field has an invalid override.
*/ */
struct arm64_ftr_override { struct arm64_ftr_override {
u64 val; u64 val;

View file

@ -61,6 +61,7 @@
#define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61 #define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0 #define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_IMP_MICROSOFT 0x6D
#define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_FOUNDATION 0xD00
@ -135,6 +136,8 @@
#define AMPERE_CPU_PART_AMPERE1 0xAC3 #define AMPERE_CPU_PART_AMPERE1 0xAC3
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@ -193,6 +196,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX

View file

@ -62,13 +62,13 @@ static inline void cpacr_restore(unsigned long cpacr)
* When we defined the maximum SVE vector length we defined the ABI so * When we defined the maximum SVE vector length we defined the ABI so
* that the maximum vector length included all the reserved for future * that the maximum vector length included all the reserved for future
* expansion bits in ZCR rather than those just currently defined by * expansion bits in ZCR rather than those just currently defined by
* the architecture. While SME follows a similar pattern the fact that * the architecture. Using this length to allocate worst size buffers
* it includes a square matrix means that any allocations that attempt * results in excessively large allocations, and this effect is even
* to cover the maximum potential vector length (such as happen with * more pronounced for SME due to ZA. Define more suitable VLs for
* the regset used for ptrace) end up being extremely large. Define * these situations.
* the much lower actual limit for use in such situations.
*/ */
#define SME_VQ_MAX 16 #define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
struct task_struct; struct task_struct;

View file

@ -15,6 +15,10 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
/*
* Prefer the constraint "S" to support PIC with GCC. Clang before 19 does not
* support "S" on a symbol with a constant offset, so we use "i" as a fallback.
*/
static __always_inline bool arch_static_branch(struct static_key * const key, static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch) const bool branch)
{ {
@ -23,9 +27,9 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
" .pushsection __jump_table, \"aw\" \n\t" " .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t" " .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t" " .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t" " .quad (%[key] - .) + %[bit0] \n\t"
" .popsection \n\t" " .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes); : : [key]"Si"(key), [bit0]"i"(branch) : : l_yes);
return false; return false;
l_yes: l_yes:
@ -40,9 +44,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
" .pushsection __jump_table, \"aw\" \n\t" " .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t" " .align 3 \n\t"
" .long 1b - ., %l[l_yes] - . \n\t" " .long 1b - ., %l[l_yes] - . \n\t"
" .quad %c0 - . \n\t" " .quad (%[key] - .) + %[bit0] \n\t"
" .popsection \n\t" " .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes); : : [key]"Si"(key), [bit0]"i"(branch) : : l_yes);
return false; return false;
l_yes: l_yes:

View file

@ -374,6 +374,7 @@ static const struct midr_range erratum_1463225[] = {
static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2139208 #ifdef CONFIG_ARM64_ERRATUM_2139208
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2119858 #ifdef CONFIG_ARM64_ERRATUM_2119858
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
@ -387,6 +388,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
static const struct midr_range tsb_flush_fail_cpus[] = { static const struct midr_range tsb_flush_fail_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2067961 #ifdef CONFIG_ARM64_ERRATUM_2067961
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2054223 #ifdef CONFIG_ARM64_ERRATUM_2054223
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
@ -399,6 +401,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
static struct midr_range trbe_write_out_of_range_cpus[] = { static struct midr_range trbe_write_out_of_range_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2253138 #ifdef CONFIG_ARM64_ERRATUM_2253138
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2224489 #ifdef CONFIG_ARM64_ERRATUM_2224489
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),

View file

@ -1635,7 +1635,7 @@ void fpsimd_preserve_current_state(void)
void fpsimd_signal_preserve_current_state(void) void fpsimd_signal_preserve_current_state(void)
{ {
fpsimd_preserve_current_state(); fpsimd_preserve_current_state();
if (test_thread_flag(TIF_SVE)) if (current->thread.fp_type == FP_STATE_SVE)
sve_to_fpsimd(current); sve_to_fpsimd(current);
} }

View file

@ -1500,7 +1500,8 @@ static const struct user_regset aarch64_regsets[] = {
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
[REGSET_SVE] = { /* Scalable Vector Extension */ [REGSET_SVE] = { /* Scalable Vector Extension */
.core_note_type = NT_ARM_SVE, .core_note_type = NT_ARM_SVE,
.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
SVE_PT_REGS_SVE),
SVE_VQ_BYTES), SVE_VQ_BYTES),
.size = SVE_VQ_BYTES, .size = SVE_VQ_BYTES,
.align = SVE_VQ_BYTES, .align = SVE_VQ_BYTES,

View file

@ -242,7 +242,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
vl = task_get_sme_vl(current); vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
flags |= SVE_SIG_FLAG_SM; flags |= SVE_SIG_FLAG_SM;
} else if (test_thread_flag(TIF_SVE)) { } else if (current->thread.fp_type == FP_STATE_SVE) {
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
} }
@ -878,7 +878,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
if (system_supports_sve() || system_supports_sme()) { if (system_supports_sve() || system_supports_sme()) {
unsigned int vq = 0; unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE) || if (add_all || current->thread.fp_type == FP_STATE_SVE ||
thread_sm_enabled(&current->thread)) { thread_sm_enabled(&current->thread)) {
int vl = max(sve_max_vl(), sme_max_vl()); int vl = max(sve_max_vl(), sme_max_vl());

View file

@ -3,7 +3,6 @@
# KVM configuration # KVM configuration
# #
source "virt/lib/Kconfig"
source "virt/kvm/Kconfig" source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION

View file

@ -1419,7 +1419,6 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
level + 1); level + 1);
if (ret) { if (ret) {
kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level); kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
mm_ops->put_page(pgtable);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -1502,7 +1501,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_try_break_pte(ctx, mmu)) { if (!stage2_try_break_pte(ctx, mmu)) {
kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level); kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
mm_ops->put_page(childp);
return -EAGAIN; return -EAGAIN;
} }

View file

@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
hyp_mem_base); hyp_mem_base);
} }
static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{
if (host_kvm->arch.pkvm.handle) {
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
host_kvm->arch.pkvm.handle));
}
host_kvm->arch.pkvm.handle = 0;
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
}
/* /*
* Allocates and donates memory for hypervisor VM structs at EL2. * Allocates and donates memory for hypervisor VM structs at EL2.
* *
@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
return 0; return 0;
destroy_vm: destroy_vm:
pkvm_destroy_hyp_vm(host_kvm); __pkvm_destroy_hyp_vm(host_kvm);
return ret; return ret;
free_vm: free_vm:
free_pages_exact(hyp_vm, hyp_vm_sz); free_pages_exact(hyp_vm, hyp_vm_sz);
@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
{ {
int ret = 0; int ret = 0;
mutex_lock(&host_kvm->lock); mutex_lock(&host_kvm->arch.config_lock);
if (!host_kvm->arch.pkvm.handle) if (!host_kvm->arch.pkvm.handle)
ret = __pkvm_create_hyp_vm(host_kvm); ret = __pkvm_create_hyp_vm(host_kvm);
mutex_unlock(&host_kvm->lock); mutex_unlock(&host_kvm->arch.config_lock);
return ret; return ret;
} }
void pkvm_destroy_hyp_vm(struct kvm *host_kvm) void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{ {
if (host_kvm->arch.pkvm.handle) { mutex_lock(&host_kvm->arch.config_lock);
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, __pkvm_destroy_hyp_vm(host_kvm);
host_kvm->arch.pkvm.handle)); mutex_unlock(&host_kvm->arch.config_lock);
}
host_kvm->arch.pkvm.handle = 0;
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
} }
int pkvm_init_host_vm(struct kvm *host_kvm) int pkvm_init_host_vm(struct kvm *host_kvm)

View file

@ -15,10 +15,10 @@
KBUILD_DEFCONFIG := multi_defconfig KBUILD_DEFCONFIG := multi_defconfig
ifdef cross_compiling ifdef cross_compiling
ifeq ($(CROSS_COMPILE),) ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \ CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-) m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
endif endif
endif endif
# #

View file

@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" .set pop" " .set pop"
: "=&r" (sum), "=&r" (tmp) : "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr), : "r" (saddr), "r" (daddr),
"0" (htonl(len)), "r" (htonl(proto)), "r" (sum)); "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
: "memory");
return csum_fold(sum); return csum_fold(sum);
} }

View file

@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val) unsigned long val)
{ {
regs->cp0_epc = val; regs->cp0_epc = val;
regs->cp0_cause &= ~CAUSEF_BD;
} }
/* Query offset/name of register from its name/offset */ /* Query offset/name of register from its name/offset */
@ -154,6 +155,8 @@ static inline long regs_return_value(struct pt_regs *regs)
} }
#define instruction_pointer(regs) ((regs)->cp0_epc) #define instruction_pointer(regs) ((regs)->cp0_epc)
extern unsigned long exception_ip(struct pt_regs *regs);
#define exception_ip(regs) exception_ip(regs)
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall); extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);

View file

@ -31,6 +31,7 @@
#include <linux/seccomp.h> #include <linux/seccomp.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <asm/branch.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpu-info.h> #include <asm/cpu-info.h>
@ -48,6 +49,12 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
unsigned long exception_ip(struct pt_regs *regs)
{
return exception_epc(regs);
}
EXPORT_SYMBOL(exception_ip);
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching..
* *

View file

@ -50,12 +50,12 @@ export CROSS32CC
# Set default cross compiler for kernel build # Set default cross compiler for kernel build
ifdef cross_compiling ifdef cross_compiling
ifeq ($(CROSS_COMPILE),) ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS_COMPILE := $(call cc-cross-prefix, \ CROSS_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES), \ $(foreach a,$(CC_ARCHES), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
endif endif
endif endif
ifdef CONFIG_DYNAMIC_FTRACE ifdef CONFIG_DYNAMIC_FTRACE

View file

@ -20,14 +20,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void _mcount(void); extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
addr += MCOUNT_INSN_SIZE;
return addr;
}
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp); unsigned long sp);
@ -142,8 +134,10 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
void ftrace_free_init_tramp(void); void ftrace_free_init_tramp(void);
unsigned long ftrace_call_adjust(unsigned long addr);
#else #else
static inline void ftrace_free_init_tramp(void) { } static inline void ftrace_free_init_tramp(void) { }
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
#endif #endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View file

@ -32,7 +32,7 @@ typedef struct {
*/ */
struct papr_sysparm_buf { struct papr_sysparm_buf {
__be16 len; __be16 len;
char val[PAPR_SYSPARM_MAX_OUTPUT]; u8 val[PAPR_SYSPARM_MAX_OUTPUT];
}; };
struct papr_sysparm_buf *papr_sysparm_buf_alloc(void); struct papr_sysparm_buf *papr_sysparm_buf_alloc(void);

View file

@ -617,6 +617,8 @@
#endif #endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */ #define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */
#define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */ #define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */

View file

@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
extern char __head_end[]; extern char __head_end[];
extern char __srwx_boundary[]; extern char __srwx_boundary[];
extern char __exittext_begin[], __exittext_end[];
/* Patch sites */ /* Patch sites */
extern s32 patch__call_flush_branch_caches1; extern s32 patch__call_flush_branch_caches1;

View file

@ -14,7 +14,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) #define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
#else #else
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT #define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT

View file

@ -14,7 +14,7 @@ enum {
struct papr_sysparm_io_block { struct papr_sysparm_io_block {
__u32 parameter; __u32 parameter;
__u16 length; __u16 length;
char data[PAPR_SYSPARM_MAX_OUTPUT]; __u8 data[PAPR_SYSPARM_MAX_OUTPUT];
}; };
/** /**

View file

@ -26,6 +26,15 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches bl setup_common_caches
/*
* This assumes that all cores using __setup_cpu_603 with
* MMU_FTR_USE_HIGH_BATS are G2_LE compatible
*/
BEGIN_MMU_FTR_SECTION
bl setup_g2_le_hid2
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtlr r5 mtlr r5
blr blr
_GLOBAL(__setup_cpu_604) _GLOBAL(__setup_cpu_604)
@ -115,6 +124,16 @@ SYM_FUNC_START_LOCAL(setup_604_hid0)
blr blr
SYM_FUNC_END(setup_604_hid0) SYM_FUNC_END(setup_604_hid0)
/* Enable high BATs for G2_LE and derivatives like e300cX */
SYM_FUNC_START_LOCAL(setup_g2_le_hid2)
mfspr r11,SPRN_HID2_G2_LE
oris r11,r11,HID2_G2_LE_HBE@h
mtspr SPRN_HID2_G2_LE,r11
sync
isync
blr
SYM_FUNC_END(setup_g2_le_hid2)
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here. * erratas we work around here.
* Moto MPC710CE.pdf describes them, those are errata * Moto MPC710CE.pdf describes them, those are errata
@ -495,4 +514,3 @@ _GLOBAL(__restore_cpu_setup)
mtcr r7 mtcr r7
blr blr
_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup) _ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)

View file

@ -8,7 +8,8 @@
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_HAS_FPU | PPC_FEATURE_64) PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \
PPC_FEATURE_BOOKE)
#else #else
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE) PPC_FEATURE_BOOKE)

View file

@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
std r10,0(r1) std r10,0(r1)
std r11,_NIP(r1) std r11,_LINK(r1)
std r11,_NIP(r1) /* Saved LR is also the next instruction */
std r12,_MSR(r1) std r12,_MSR(r1)
std r0,GPR0(r1) std r0,GPR0(r1)
std r10,GPR1(r1) std r10,GPR1(r1)
@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
std r9,GPR13(r1) std r9,GPR13(r1)
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
std r11,_XER(r1) std r11,_XER(r1)
std r11,_LINK(r1)
std r11,_CTR(r1) std r11,_CTR(r1)
li r11,\trapnr li r11,\trapnr

View file

@ -1289,8 +1289,10 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct iommu_table_group *table_group; struct iommu_table_group *table_group;
/* At first attach the ownership is already set */ /* At first attach the ownership is already set */
if (!domain) if (!domain) {
iommu_group_put(grp);
return 0; return 0;
}
table_group = iommu_group_get_iommudata(grp); table_group = iommu_group_get_iommudata(grp);
/* /*

View file

@ -27,10 +27,22 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/syscall.h> #include <asm/syscall.h>
#include <asm/inst.h> #include <asm/inst.h>
#include <asm/sections.h>
#define NUM_FTRACE_TRAMPS 2 #define NUM_FTRACE_TRAMPS 2
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
unsigned long ftrace_call_adjust(unsigned long addr)
{
if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
return 0;
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
addr += MCOUNT_INSN_SIZE;
return addr;
}
static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link) static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
{ {
ppc_inst_t op; ppc_inst_t op;

View file

@ -37,6 +37,11 @@
#define NUM_FTRACE_TRAMPS 8 #define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
static ppc_inst_t static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link) ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{ {

View file

@ -281,7 +281,9 @@ SECTIONS
* to deal with references from __bug_table * to deal with references from __bug_table
*/ */
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
__exittext_begin = .;
EXIT_TEXT EXIT_TEXT
__exittext_end = .;
} }
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);

View file

@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
if (ret) if (ret)
return ret; return ret;
k_start = k_start & PAGE_MASK;
block = memblock_alloc(k_end - k_start, PAGE_SIZE); block = memblock_alloc(k_end - k_start, PAGE_SIZE);
if (!block) if (!block)
return -ENOMEM; return -ENOMEM;

View file

@ -27,7 +27,7 @@
#include "mpc85xx.h" #include "mpc85xx.h"
void __init mpc8536_ds_pic_init(void) static void __init mpc8536_ds_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC "); 0, 256, " OpenPIC ");

View file

@ -21,7 +21,7 @@
#include "mpc85xx.h" #include "mpc85xx.h"
void __init mvme2500_pic_init(void) static void __init mvme2500_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, struct mpic *mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU,

View file

@ -24,7 +24,7 @@
#include "mpc85xx.h" #include "mpc85xx.h"
void __init p1010_rdb_pic_init(void) static void __init p1010_rdb_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU, MPIC_SINGLE_DEST_CPU,

View file

@ -370,7 +370,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
* *
* @pixclock: the wavelength, in picoseconds, of the clock * @pixclock: the wavelength, in picoseconds, of the clock
*/ */
void p1022ds_set_pixel_clock(unsigned int pixclock) static void p1022ds_set_pixel_clock(unsigned int pixclock)
{ {
struct device_node *guts_np = NULL; struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts; struct ccsr_guts __iomem *guts;
@ -418,7 +418,7 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
/** /**
* p1022ds_valid_monitor_port: set the monitor port for sysfs * p1022ds_valid_monitor_port: set the monitor port for sysfs
*/ */
enum fsl_diu_monitor_port static enum fsl_diu_monitor_port
p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port) p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
{ {
switch (port) { switch (port) {
@ -432,7 +432,7 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
#endif #endif
void __init p1022_ds_pic_init(void) static void __init p1022_ds_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU, MPIC_SINGLE_DEST_CPU,

View file

@ -40,7 +40,7 @@
* *
* @pixclock: the wavelength, in picoseconds, of the clock * @pixclock: the wavelength, in picoseconds, of the clock
*/ */
void p1022rdk_set_pixel_clock(unsigned int pixclock) static void p1022rdk_set_pixel_clock(unsigned int pixclock)
{ {
struct device_node *guts_np = NULL; struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts; struct ccsr_guts __iomem *guts;
@ -88,7 +88,7 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock)
/** /**
* p1022rdk_valid_monitor_port: set the monitor port for sysfs * p1022rdk_valid_monitor_port: set the monitor port for sysfs
*/ */
enum fsl_diu_monitor_port static enum fsl_diu_monitor_port
p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port) p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
{ {
return FSL_DIU_PORT_DVI; return FSL_DIU_PORT_DVI;
@ -96,7 +96,7 @@ p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
#endif #endif
void __init p1022_rdk_pic_init(void) static void __init p1022_rdk_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU, MPIC_SINGLE_DEST_CPU,

View file

@ -8,6 +8,8 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/io.h> #include <linux/io.h>
#include "socrates_fpga_pic.h"
/* /*
* The FPGA supports 9 interrupt sources, which can be routed to 3 * The FPGA supports 9 interrupt sources, which can be routed to 3
* interrupt request lines of the MPIC. The line to be used can be * interrupt request lines of the MPIC. The line to be used can be

View file

@ -37,7 +37,7 @@
#define MPC85xx_L2CTL_L2I 0x40000000 /* L2 flash invalidate */ #define MPC85xx_L2CTL_L2I 0x40000000 /* L2 flash invalidate */
#define MPC85xx_L2CTL_L2SIZ_MASK 0x30000000 /* L2 SRAM size (R/O) */ #define MPC85xx_L2CTL_L2SIZ_MASK 0x30000000 /* L2 SRAM size (R/O) */
void __init xes_mpc85xx_pic_init(void) static void __init xes_mpc85xx_pic_init(void)
{ {
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC "); 0, 256, " OpenPIC ");

View file

@ -662,8 +662,12 @@ u64 pseries_paravirt_steal_clock(int cpu)
{ {
struct lppaca *lppaca = &lppaca_of(cpu); struct lppaca *lppaca = &lppaca_of(cpu);
return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + /*
be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)); * VPA steal time counters are reported at TB frequency. Hence do a
* conversion to ns before returning
*/
return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)));
} }
#endif #endif

View file

@ -41,7 +41,7 @@ struct memcons memcons = {
.input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
}; };
void memcons_putc(char c) static void memcons_putc(char c)
{ {
char *new_output_pos; char *new_output_pos;
@ -54,7 +54,7 @@ void memcons_putc(char c)
memcons.output_pos = new_output_pos; memcons.output_pos = new_output_pos;
} }
int memcons_getc_poll(void) static int memcons_getc_poll(void)
{ {
char c; char c;
char *new_input_pos; char *new_input_pos;
@ -77,7 +77,7 @@ int memcons_getc_poll(void)
return -1; return -1;
} }
int memcons_getc(void) static int memcons_getc(void)
{ {
int c; int c;

View file

@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc); early_param("no-steal-acc", parse_no_stealacc);
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64); static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static bool __init has_pv_steal_clock(void) static bool __init has_pv_steal_clock(void)
{ {
@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
static u64 pv_time_steal_clock(int cpu) static u64 pv_time_steal_clock(int cpu)
{ {
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu); struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
u32 sequence; __le32 sequence;
u64 steal; __le64 steal;
/* /*
* Check the sequence field before and after reading the steal * Check the sequence field before and after reading the steal

View file

@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{ {
gpa_t shmem = vcpu->arch.sta.shmem; gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal; u64 last_steal = vcpu->arch.sta.last_steal;
u32 *sequence_ptr, sequence; __le32 __user *sequence_ptr;
u64 *steal_ptr, steal; __le64 __user *steal_ptr;
__le32 sequence_le;
__le64 steal_le;
u32 sequence;
u64 steal;
unsigned long hva; unsigned long hva;
gfn_t gfn; gfn_t gfn;
@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
return; return;
} }
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence)); offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (u64 *)(hva + offset_in_page(shmem) + steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal)); offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence, sequence_ptr))) if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return; return;
sequence = le32_to_cpu(sequence); sequence = le32_to_cpu(sequence_le);
sequence += 1; sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return; return;
if (!WARN_ON(get_user(steal, steal_ptr))) { if (!WARN_ON(get_user(steal_le, steal_ptr))) {
steal = le64_to_cpu(steal); steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal; steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));

View file

@ -112,13 +112,13 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed # temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding KBUILD_CFLAGS += -ffreestanding
ifeq ($(CONFIG_STACKPROTECTOR),y) ifeq ($(CONFIG_STACKPROTECTOR),y)
ifeq ($(CONFIG_SMP),y) ifeq ($(CONFIG_SMP),y)
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
else else
KBUILD_CFLAGS += -mstack-protector-guard=global KBUILD_CFLAGS += -mstack-protector-guard=global
endif
endif endif
endif
else else
BITS := 64 BITS := 64
UTS_MACHINE := x86_64 UTS_MACHINE := x86_64

View file

@ -6,6 +6,9 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
.pushsection .noinstr.text, "ax" .pushsection .noinstr.text, "ax"
@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
EXPORT_SYMBOL_GPL(entry_ibpb); EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection .popsection
/*
* Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be
* used late in exit-to-user path after page tables are switched.
*/
.pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
.word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel);
/* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel);
.popsection

View file

@ -885,6 +885,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1 BUG_IF_WRONG_CR3 no_user_check=1
popfl popfl
popl %eax popl %eax
CLEAR_CPU_BUFFERS
/* /*
* Return back to the vDSO, which will pop ecx and edx. * Return back to the vDSO, which will pop ecx and edx.
@ -954,6 +955,7 @@ restore_all_switch_stack:
/* Restore user state */ /* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code RESTORE_REGS pop=4 # skip orig_eax/error_code
CLEAR_CPU_BUFFERS
.Lirq_return: .Lirq_return:
/* /*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@ -1146,6 +1148,7 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */ /* Not on SYSENTER stack. */
call exc_nmi call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return jmp .Lnmi_return
.Lnmi_from_sysenter_stack: .Lnmi_from_sysenter_stack:

View file

@ -161,6 +161,7 @@ syscall_return_via_sysret:
SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL) SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
swapgs swapgs
CLEAR_CPU_BUFFERS
sysretq sysretq
SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL) SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
@ -573,6 +574,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
.Lswapgs_and_iret: .Lswapgs_and_iret:
swapgs swapgs
CLEAR_CPU_BUFFERS
/* Assert that the IRET frame indicates user mode. */ /* Assert that the IRET frame indicates user mode. */
testb $3, 8(%rsp) testb $3, 8(%rsp)
jnz .Lnative_iret jnz .Lnative_iret
@ -723,6 +725,8 @@ native_irq_return_ldt:
*/ */
popq %rax /* Restore user RAX */ popq %rax /* Restore user RAX */
CLEAR_CPU_BUFFERS
/* /*
* RSP now points to an ordinary IRET frame, except that the page * RSP now points to an ordinary IRET frame, except that the page
* is read-only and RSP[31:16] are preloaded with the userspace * is read-only and RSP[31:16] are preloaded with the userspace
@ -1449,6 +1453,12 @@ nmi_restore:
std std
movq $0, 5*8(%rsp) /* clear "NMI executing" */ movq $0, 5*8(%rsp) /* clear "NMI executing" */
/*
* Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
* NMI in kernel after user state is restored. For an unprivileged user
* these conditions are hard to meet.
*/
/* /*
* iretq reads the "iret" frame and exits the NMI stack in a * iretq reads the "iret" frame and exits the NMI stack in a
* single instruction. We are returning to kernel mode, so this * single instruction. We are returning to kernel mode, so this
@ -1466,6 +1476,7 @@ SYM_CODE_START(entry_SYSCALL32_ignore)
UNWIND_HINT_END_OF_STACK UNWIND_HINT_END_OF_STACK
ENDBR ENDBR
mov $-ENOSYS, %eax mov $-ENOSYS, %eax
CLEAR_CPU_BUFFERS
sysretl sysretl
SYM_CODE_END(entry_SYSCALL32_ignore) SYM_CODE_END(entry_SYSCALL32_ignore)

View file

@ -270,6 +270,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
xorl %r9d, %r9d xorl %r9d, %r9d
xorl %r10d, %r10d xorl %r10d, %r10d
swapgs swapgs
CLEAR_CPU_BUFFERS
sysretl sysretl
SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL) SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR

View file

@ -95,7 +95,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */

View file

@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void) static __always_inline void arch_exit_to_user_mode(void)
{ {
mds_user_clear_cpu_buffers();
amd_clear_divider(); amd_clear_divider();
} }
#define arch_exit_to_user_mode arch_exit_to_user_mode #define arch_exit_to_user_mode arch_exit_to_user_mode

View file

@ -315,6 +315,17 @@
#endif #endif
.endm .endm
/*
* Macro to execute VERW instruction that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
*
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
ALTERNATIVE "", __stringify(verw mds_verw_sel), X86_FEATURE_CLEAR_CPU_BUF
.endm
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \ #define ANNOTATE_RETPOLINE_SAFE \
@ -529,13 +540,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel;
#include <asm/segment.h> #include <asm/segment.h>
/** /**
@ -561,17 +573,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
} }
/**
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
*/
static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
}
/** /**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
* *

View file

@ -111,9 +111,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */ /* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before returning to user space */
DEFINE_STATIC_KEY_FALSE(mds_user_clear);
EXPORT_SYMBOL_GPL(mds_user_clear);
/* Control MDS CPU buffer clear before idling (halt, mwait) */ /* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear); DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear); EXPORT_SYMBOL_GPL(mds_idle_clear);
@ -252,7 +249,7 @@ static void __init mds_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV; mds_mitigation = MDS_MITIGATION_VMWERV;
static_branch_enable(&mds_user_clear); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt())) (mds_nosmt || cpu_mitigations_auto_nosmt()))
@ -356,7 +353,7 @@ static void __init taa_select_mitigation(void)
* For guests that can't determine whether the correct microcode is * For guests that can't determine whether the correct microcode is
* present on host, enable the mitigation for UCODE_NEEDED as well. * present on host, enable the mitigation for UCODE_NEEDED as well.
*/ */
static_branch_enable(&mds_user_clear); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (taa_nosmt || cpu_mitigations_auto_nosmt()) if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false); cpu_smt_disable(false);
@ -424,7 +421,7 @@ static void __init mmio_select_mitigation(void)
*/ */
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM))) boot_cpu_has(X86_FEATURE_RTM)))
static_branch_enable(&mds_user_clear); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else else
static_branch_enable(&mmio_stale_data_clear); static_branch_enable(&mmio_stale_data_clear);
@ -484,12 +481,12 @@ static void __init md_clear_update_mitigation(void)
if (cpu_mitigations_off()) if (cpu_mitigations_off())
return; return;
if (!static_key_enabled(&mds_user_clear)) if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
goto out; goto out;
/* /*
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
* mitigation, if necessary. * Stale Data mitigation, if necessary.
*/ */
if (mds_mitigation == MDS_MITIGATION_OFF && if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) { boot_cpu_has_bug(X86_BUG_MDS)) {

View file

@ -1543,6 +1543,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
get_cpu_vendor(c); get_cpu_vendor(c);
get_cpu_cap(c); get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID); setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
cpu_parse_early_param(); cpu_parse_early_param();
cpu_init_topology(c); cpu_init_topology(c);
@ -1557,11 +1558,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
this_cpu->c_bsp_init(c); this_cpu->c_bsp_init(c);
} else { } else {
setup_clear_cpu_cap(X86_FEATURE_CPUID); setup_clear_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
cpu_init_topology(c); cpu_init_topology(c);
} }
get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_ALWAYS); setup_force_cpu_cap(X86_FEATURE_ALWAYS);
cpu_set_bug_bits(c); cpu_set_bug_bits(c);

View file

@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false; return false;
} }
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme_early(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void early_init_intel(struct cpuinfo_x86 *c) static void early_init_intel(struct cpuinfo_x86 *c)
{ {
u64 misc_enable; u64 misc_enable;
@ -315,6 +399,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
} }
check_memory_type_self_snoop_errata(c); check_memory_type_self_snoop_errata(c);
/*
* Adjust the number of physical bits early because it affects the
* valid bits of the MTRR mask registers.
*/
if (cpu_has(c, X86_FEATURE_TME))
detect_tme_early(c);
} }
static void bsp_init_intel(struct cpuinfo_x86 *c) static void bsp_init_intel(struct cpuinfo_x86 *c)
@ -475,90 +566,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif #endif
} }
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void init_cpuid_fault(struct cpuinfo_x86 *c) static void init_cpuid_fault(struct cpuinfo_x86 *c)
{ {
u64 msr; u64 msr;
@ -677,9 +684,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_ia32_feat_ctl(c); init_ia32_feat_ctl(c);
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
init_intel_misc_features(c); init_intel_misc_features(c);
split_lock_init(); split_lock_init();

View file

@ -563,9 +563,6 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
} }
if (this_cpu_dec_return(nmi_state)) if (this_cpu_dec_return(nmi_state))
goto nmi_restart; goto nmi_restart;
if (user_mode(regs))
mds_user_clear_cpu_buffers();
} }
#if IS_ENABLED(CONFIG_KVM_INTEL) #if IS_ENABLED(CONFIG_KVM_INTEL)

View file

@ -71,7 +71,7 @@ static int fixed_pmc_events[] = {
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{ {
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
int i; int i;
pmu->fixed_ctr_ctrl = data; pmu->fixed_ctr_ctrl = data;

View file

@ -2,7 +2,10 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H #ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H
#define VMX_RUN_VMRESUME (1 << 0) #define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1) #define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */ #endif /* __KVM_X86_VMX_RUN_FLAGS_H */

View file

@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov (%_ASM_SP), %_ASM_AX mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */ /* Check if vmlaunch or vmresume is needed */
test $VMX_RUN_VMRESUME, %ebx bt $VMX_RUN_VMRESUME_SHIFT, %ebx
/* Load guest registers. Don't clobber flags. */ /* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@ -161,8 +161,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */ /* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */ /* Clobbers EFLAGS.ZF */
jz .Lvmlaunch CLEAR_CPU_BUFFERS
/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
jnc .Lvmlaunch
/* /*
* After a successful VMRESUME/VMLAUNCH, control flow "magically" * After a successful VMRESUME/VMLAUNCH, control flow "magically"

View file

@ -388,7 +388,16 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{ {
vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) && /*
* Disable VERW's behavior of clearing CPU buffers for the guest if the
* CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
* the mitigation. Disabling the clearing behavior provides a
* performance boost for guests that aren't aware that manually clearing
* CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
* and VM-Exit.
*/
vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
(host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
!boot_cpu_has_bug(X86_BUG_MDS) && !boot_cpu_has_bug(X86_BUG_MDS) &&
!boot_cpu_has_bug(X86_BUG_TAA); !boot_cpu_has_bug(X86_BUG_TAA);
@ -7224,11 +7233,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_enter_irqoff(); guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */ /*
* L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush)) if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mds_user_clear))
mds_clear_cpu_buffers();
else if (static_branch_unlikely(&mmio_stale_data_clear) && else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm)) kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers(); mds_clear_cpu_buffers();

View file

@ -1704,22 +1704,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
struct kvm_msr_entry msr; struct kvm_msr_entry msr;
int r; int r;
/* Unconditionally clear the output for simplicity */
msr.data = 0;
msr.index = index; msr.index = index;
r = kvm_get_msr_feature(&msr); r = kvm_get_msr_feature(&msr);
if (r == KVM_MSR_RET_INVALID) { if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
/* Unconditionally clear the output for simplicity */ r = 0;
*data = 0;
if (kvm_msr_ignored_check(index, 0, false))
r = 0;
}
if (r)
return r;
*data = msr.data; *data = msr.data;
return 0; return r;
} }
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@ -2511,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static inline int gtod_is_based_on_tsc(int mode) static inline bool gtod_is_based_on_tsc(int mode)
{ {
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
} }
@ -5458,7 +5453,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_pending = 0;
atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
kvm_make_request(KVM_REQ_NMI, vcpu); if (events->nmi.pending)
kvm_make_request(KVM_REQ_NMI, vcpu);
} }
static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);

View file

@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) { for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr); pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd; pmd_t *pmd;
bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE; next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end) if (next > end)
next = end; next = end;
if (info->direct_gbpages) { /* if this is already a gbpage, this portion is already mapped */
if (pud_large(*pud))
continue;
/* Is using a gbpage allowed? */
use_gbpage = info->direct_gbpages;
/* Don't use gbpage if it maps more than the requested region. */
/* at the begining: */
use_gbpage &= ((addr & ~PUD_MASK) == 0);
/* ... or at the end: */
use_gbpage &= ((next & ~PUD_MASK) == 0);
/* Never overwrite existing mappings */
use_gbpage &= !pud_present(*pud);
if (use_gbpage) {
pud_t pudval; pud_t pudval;
if (pud_present(*pud))
continue;
addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag); pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval); set_pud(pud, pudval);
continue; continue;

View file

@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name; char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
if (!resched_name)
goto fail_mem;
per_cpu(xen_resched_irq, cpu).name = resched_name; per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu, cpu,
@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_resched_irq, cpu).irq = rc; per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
if (!callfunc_name)
goto fail_mem;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu, cpu,
@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
if (!xen_fifo_events) { if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
if (!debug_name)
goto fail_mem;
per_cpu(xen_debug_irq, cpu).name = debug_name; per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt, xen_debug_interrupt,
@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
} }
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
if (!callfunc_name)
goto fail_mem;
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu, cpu,
@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
return 0; return 0;
fail_mem:
rc = -ENOMEM;
fail: fail:
xen_smp_intr_free(cpu); xen_smp_intr_free(cpu);
return rc; return rc;

View file

@ -510,16 +510,6 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret; return ret;
} }
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
{
ivpu_boot_dpu_active_drive(vdev, false);
ivpu_boot_pwr_island_isolation_drive(vdev, true);
ivpu_boot_pwr_island_trickle_drive(vdev, false);
ivpu_boot_pwr_island_drive(vdev, false);
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
}
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{ {
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
return 0; return 0;
} }
static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
if (IVPU_WA(punit_disabled))
return 0;
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{ {
int ret = 0; int ret = 0;
if (ivpu_boot_pwr_domain_disable(vdev)) { if (ivpu_hw_37xx_ip_reset(vdev)) {
ivpu_err(vdev, "Failed to disable power domain\n"); ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO; ret = -EIO;
} }
@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{ {
int ret; int ret;
/* PLL requests may fail when powering down, so issue WP 0 here */
ret = ivpu_pll_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
ret = ivpu_hw_37xx_d0i3_disable(vdev); ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret) if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);

View file

@ -58,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{ {
int ret; int ret;
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_shutdown(vdev); ret = ivpu_shutdown(vdev);
if (ret) { if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
return ret;
} pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret; return ret;
} }
@ -71,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev)
{ {
int ret; int ret;
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
pci_restore_state(to_pci_dev(vdev->drm.dev));
retry: retry:
ret = ivpu_hw_power_up(vdev); ret = ivpu_hw_power_up(vdev);
if (ret) { if (ret) {
@ -120,15 +126,20 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
ivpu_fw_log_dump(vdev); ivpu_fw_log_dump(vdev);
retry: atomic_inc(&vdev->pm->reset_counter);
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); atomic_set(&vdev->pm->reset_pending, 1);
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { down_write(&vdev->pm->reset_lock);
cond_resched();
goto retry;
}
if (ret && ret != -EAGAIN) ivpu_suspend(vdev);
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_mark_last_busy(vdev->drm.dev);
@ -200,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_suspend(vdev); ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev); ivpu_pm_prepare_warm_boot(vdev);
pci_save_state(to_pci_dev(dev));
pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
ivpu_dbg(vdev, PM, "Suspend done.\n"); ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0; return 0;
@ -216,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_dbg(vdev, PM, "Resume..\n"); ivpu_dbg(vdev, PM, "Resume..\n");
pci_set_power_state(to_pci_dev(dev), PCI_D0);
pci_restore_state(to_pci_dev(dev));
ret = ivpu_resume(vdev); ret = ivpu_resume(vdev);
if (ret) if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret); ivpu_err(vdev, "Failed to resume: %d\n", ret);

View file

@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data; struct cpufreq_policy *policy = data;
int cpu; int cpu;
if (!raw_capacity)
return 0;
if (val != CPUFREQ_CREATE_POLICY) if (val != CPUFREQ_CREATE_POLICY)
return 0; return 0;
@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
} }
if (cpumask_empty(cpus_to_visit)) { if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale(); if (raw_capacity) {
schedule_work(&update_topology_flags_work); topology_normalize_cpu_scale();
free_raw_capacity(); schedule_work(&update_topology_flags_work);
free_raw_capacity();
}
pr_debug("cpu_capacity: parsing done\n"); pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work); schedule_work(&parsing_done_work);
} }
@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq * On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization. * information is not needed for cpu capacity initialization.
*/ */
if (!acpi_disabled || !raw_capacity) if (!acpi_disabled)
return -EINVAL; return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))

View file

@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/ */
static void __fwnode_link_cycle(struct fwnode_link *link) static void __fwnode_link_cycle(struct fwnode_link *link)
{ {
pr_debug("%pfwf: Relaxing link with %pfwf\n", pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier); link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE; link->flags |= FWLINK_FLAG_CYCLE;
} }
@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false; return false;
} }
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags) static inline bool device_link_flag_is_sync_state_only(u32 flags)
{ {
return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) == return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
} }
/** /**
@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */ /* Termination condition. */
if (sup_dev == con) { if (sup_dev == con) {
pr_debug("----- cycle: start -----\n");
ret = true; ret = true;
goto out; goto out;
} }
@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else else
par_dev = fwnode_get_next_parent_dev(sup_handle); par_dev = fwnode_get_next_parent_dev(sup_handle);
if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true; ret = true;
}
if (!sup_dev) if (!sup_dev)
goto out; goto out;
@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con, if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) { dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link); fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE; dev_link->flags |= DL_FLAG_CYCLE;
ret = true; ret = true;
@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con,
/* /*
* SYNC_STATE_ONLY device links don't block probing and supports cycles. * SYNC_STATE_ONLY device links don't block probing and supports cycles.
* So cycle detection isn't necessary and shouldn't be done. * So, one might expect that cycle detection isn't necessary for them.
* However, if the device link was marked as SYNC_STATE_ONLY because
* it's part of a cycle, then we still need to do cycle detection. This
* is because the consumer and supplier might be part of multiple cycles
* and we need to detect all those cycles.
*/ */
if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) { if (!device_link_flag_is_sync_state_only(flags) ||
flags & DL_FLAG_CYCLE) {
device_links_write_lock(); device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) { if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link); __fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags); flags = fw_devlink_get_flags(link->flags);
pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n", dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle); sup_handle);
} }

View file

@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12 #define BLOCK_TEST_SIZE 12
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
char *n = new;
int i;
get_random_bytes(new, size);
/*
* This could be nicer and more efficient but we shouldn't
* super care.
*/
for (i = 0; i < size; i++)
while (n[i] == o[i])
get_random_bytes(&n[i], 1);
}
static const struct regmap_config test_regmap_config = { static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE, .max_register = BLOCK_TEST_SIZE,
.reg_stride = 1, .reg_stride = 1,
@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map; struct regmap *map;
struct regmap_config config; struct regmap_config config;
struct regmap_ram_data *data; struct regmap_ram_data *data;
unsigned int val, val_test, val_last; unsigned int val;
u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE]; u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config; config = raw_regmap_config;
@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map; struct regmap *map;
struct regmap_config config; struct regmap_config config;
struct regmap_ram_data *data; struct regmap_ram_data *data;
u16 val[2]; u16 val[3];
u16 *hw_buf; u16 *hw_buf;
unsigned int rval; unsigned int rval;
int i; int i;
@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals; hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val)); get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */ /* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true); regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
if (config.val_format_endian == REGMAP_ENDIAN_BIG) sizeof(u16) * 2));
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
be16_to_cpu(val[0])));
else
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
le16_to_cpu(val[0])));
/* We should read back the new values, and defaults for the rest */ /* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) { for (i = 0; i < config.max_register + 1; i++) {
@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) { switch (i) {
case 2: case 2:
case 3: case 3:
case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) { if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval, KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2])); be16_to_cpu(val[i - 2]));
} else { } else {
KUNIT_EXPECT_EQ(test, rval, KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2])); le16_to_cpu(val[i - 2]));
} }
break; break;
case 4:
KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
break;
default: default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break; break;
} }
} }
/*
* The value written via _write() was translated by the core,
* translate the original copy for comparison purposes.
*/
if (config.val_format_endian == REGMAP_ENDIAN_BIG)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */ /* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val)); KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
for (i = 0; i < config.max_register + 1; i++) for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false; data->written[i] = false;
@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */ /* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
regmap_exit(map); regmap_exit(map);
} }

View file

@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0; filter_data[1] = 0;
} }
if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
cn_filter, (void *)filter_data) == -ESRCH) cn_filter, (void *)filter_data);
atomic_set(&proc_event_num_listeners, 0);
local_unlock(&local_event.lock); local_unlock(&local_event.lock);
} }

View file

@ -1199,6 +1199,7 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i; unsigned long i;
int ret = 0; int ret = 0;
mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) { ctx->idx) {
if (!dpll_pin_available(pin)) if (!dpll_pin_available(pin))
@ -1218,6 +1219,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
} }
genlmsg_end(skb, hdr); genlmsg_end(skb, hdr);
} }
mutex_unlock(&dpll_lock);
if (ret == -EMSGSIZE) { if (ret == -EMSGSIZE) {
ctx->idx = i; ctx->idx = i;
return skb->len; return skb->len;
@ -1373,6 +1376,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i; unsigned long i;
int ret = 0; int ret = 0;
mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED, xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
ctx->idx) { ctx->idx) {
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
@ -1389,6 +1393,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
} }
genlmsg_end(skb, hdr); genlmsg_end(skb, hdr);
} }
mutex_unlock(&dpll_lock);
if (ret == -EMSGSIZE) { if (ret == -EMSGSIZE) {
ctx->idx = i; ctx->idx = i;
return skb->len; return skb->len;
@ -1439,20 +1445,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
mutex_unlock(&dpll_lock); mutex_unlock(&dpll_lock);
} }
int dpll_lock_dumpit(struct netlink_callback *cb)
{
mutex_lock(&dpll_lock);
return 0;
}
int dpll_unlock_dumpit(struct netlink_callback *cb)
{
mutex_unlock(&dpll_lock);
return 0;
}
int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info) struct genl_info *info)
{ {

View file

@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
}, },
{ {
.cmd = DPLL_CMD_DEVICE_GET, .cmd = DPLL_CMD_DEVICE_GET,
.start = dpll_lock_dumpit,
.dumpit = dpll_nl_device_get_dumpit, .dumpit = dpll_nl_device_get_dumpit,
.done = dpll_unlock_dumpit,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
}, },
{ {
@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
}, },
{ {
.cmd = DPLL_CMD_PIN_GET, .cmd = DPLL_CMD_PIN_GET,
.start = dpll_lock_dumpit,
.dumpit = dpll_nl_pin_get_dumpit, .dumpit = dpll_nl_pin_get_dumpit,
.done = dpll_unlock_dumpit,
.policy = dpll_pin_get_dump_nl_policy, .policy = dpll_pin_get_dump_nl_policy,
.maxattr = DPLL_A_PIN_ID, .maxattr = DPLL_A_PIN_ID,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,

View file

@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
void void
dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info); struct genl_info *info);
int dpll_lock_dumpit(struct netlink_callback *cb);
int dpll_unlock_dumpit(struct netlink_callback *cb);
int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info); int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info); int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);

View file

@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level; extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight; extern int amdgpu_backlight;
extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info; extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable; extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask; extern uint amdgpu_ras_mask;
@ -1549,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else #else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif #endif
#if defined(CONFIG_DRM_AMD_DC) #if defined(CONFIG_DRM_AMD_DC)

View file

@ -1519,4 +1519,19 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */ #endif /* CONFIG_AMD_PMC */
} }
/**
* amdgpu_choose_low_power_state
*
* @adev: amdgpu_device_pointer
*
* Choose the target low power state for the GPU
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
}
#endif /* CONFIG_SUSPEND */ #endif /* CONFIG_SUSPEND */

View file

@ -4514,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
int i, r; int i, r;
amdgpu_choose_low_power_state(adev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0; return 0;
/* Evict the majority of BOs before starting suspend sequence */ /* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev); r = amdgpu_device_evict_resources(adev);
if (r) if (r)
return r; goto unprepare;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
@ -4529,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue; continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
if (r) if (r)
return r; goto unprepare;
} }
return 0; return 0;
unprepare:
adev->in_s0ix = adev->in_s3 = false;
return r;
} }
/** /**
@ -4569,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work); cancel_delayed_work_sync(&adev->delayed_init_work);
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev); amdgpu_ras_suspend(adev);

Some files were not shown because too many files have changed in this diff Show more