Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Adjacent changes:

kernel/bpf/verifier.c
  829955981c ("bpf: Fix verifier log for async callback return values")
  a923819fb2 ("bpf: Treat first argument as return value for bpf_throw")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-10-12 16:17:46 -07:00
commit 0e6bb5b7f4
251 changed files with 3342 additions and 1393 deletions

View File

@ -71,6 +71,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |

View File

@ -73,9 +73,6 @@ patternProperties:
"^.*@[0-9a-f]+$":
description: Devices attached to the bus
type: object
properties:
reg:
maxItems: 1
required:
- reg

View File

@ -69,7 +69,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
cache-controller@2010000 {
cache-controller@13400000 {
compatible = "andestech,ax45mp-cache", "cache";
reg = <0x13400000 0x100000>;
interrupts = <508 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -87,7 +87,7 @@ required:
- interrupts
- ports
additionalProperties: false
unevaluatedProperties: false
examples:
- |

View File

@ -106,6 +106,12 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
maximum: 4096
dma-noncoherent:
description:
Present if the GIC redistributors permit programming shareability
and cacheability attributes but are connected to a non-coherent
downstream interconnect.
msi-controller:
description:
Only present if the Message Based Interrupt functionality is
@ -193,6 +199,12 @@ patternProperties:
compatible:
const: arm,gic-v3-its
dma-noncoherent:
description:
Present if the GIC ITS permits programming shareability and
cacheability attributes but is connected to a non-coherent
downstream interconnect.
msi-controller: true
"#msi-cells":

View File

@ -37,6 +37,7 @@ properties:
- renesas,intc-ex-r8a77990 # R-Car E3
- renesas,intc-ex-r8a77995 # R-Car D3
- renesas,intc-ex-r8a779a0 # R-Car V3U
- renesas,intc-ex-r8a779f0 # R-Car S4-8
- renesas,intc-ex-r8a779g0 # R-Car V4H
- const: renesas,irqc

View File

@ -19,20 +19,19 @@ description: |
- NMI edge select (NMI is not treated as NMI exception and supports fall edge and
stand-up edge detection interrupts)
allOf:
- $ref: /schemas/interrupt-controller.yaml#
properties:
compatible:
items:
- enum:
- renesas,r9a07g043u-irqc # RZ/G2UL
- renesas,r9a07g044-irqc # RZ/G2{L,LC}
- renesas,r9a07g054-irqc # RZ/V2L
- const: renesas,rzg2l-irqc
'#interrupt-cells':
description: The first cell should contain external interrupt number (IRQ0-7) and the
second cell is used to specify the flag.
description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second
cell is used to specify the flag.
const: 2
'#address-cells':
@ -44,7 +43,96 @@ properties:
maxItems: 1
interrupts:
maxItems: 41
minItems: 41
items:
- description: NMI interrupt
- description: IRQ0 interrupt
- description: IRQ1 interrupt
- description: IRQ2 interrupt
- description: IRQ3 interrupt
- description: IRQ4 interrupt
- description: IRQ5 interrupt
- description: IRQ6 interrupt
- description: IRQ7 interrupt
- description: GPIO interrupt, TINT0
- description: GPIO interrupt, TINT1
- description: GPIO interrupt, TINT2
- description: GPIO interrupt, TINT3
- description: GPIO interrupt, TINT4
- description: GPIO interrupt, TINT5
- description: GPIO interrupt, TINT6
- description: GPIO interrupt, TINT7
- description: GPIO interrupt, TINT8
- description: GPIO interrupt, TINT9
- description: GPIO interrupt, TINT10
- description: GPIO interrupt, TINT11
- description: GPIO interrupt, TINT12
- description: GPIO interrupt, TINT13
- description: GPIO interrupt, TINT14
- description: GPIO interrupt, TINT15
- description: GPIO interrupt, TINT16
- description: GPIO interrupt, TINT17
- description: GPIO interrupt, TINT18
- description: GPIO interrupt, TINT19
- description: GPIO interrupt, TINT20
- description: GPIO interrupt, TINT21
- description: GPIO interrupt, TINT22
- description: GPIO interrupt, TINT23
- description: GPIO interrupt, TINT24
- description: GPIO interrupt, TINT25
- description: GPIO interrupt, TINT26
- description: GPIO interrupt, TINT27
- description: GPIO interrupt, TINT28
- description: GPIO interrupt, TINT29
- description: GPIO interrupt, TINT30
- description: GPIO interrupt, TINT31
- description: Bus error interrupt
interrupt-names:
minItems: 41
items:
- const: nmi
- const: irq0
- const: irq1
- const: irq2
- const: irq3
- const: irq4
- const: irq5
- const: irq6
- const: irq7
- const: tint0
- const: tint1
- const: tint2
- const: tint3
- const: tint4
- const: tint5
- const: tint6
- const: tint7
- const: tint8
- const: tint9
- const: tint10
- const: tint11
- const: tint12
- const: tint13
- const: tint14
- const: tint15
- const: tint16
- const: tint17
- const: tint18
- const: tint19
- const: tint20
- const: tint21
- const: tint22
- const: tint23
- const: tint24
- const: tint25
- const: tint26
- const: tint27
- const: tint28
- const: tint29
- const: tint30
- const: tint31
- const: bus-err
clocks:
maxItems: 2
@ -72,6 +160,23 @@ required:
- power-domains
- resets
allOf:
- $ref: /schemas/interrupt-controller.yaml#
- if:
properties:
compatible:
contains:
const: renesas,r9a07g043u-irqc
then:
properties:
interrupts:
minItems: 42
interrupt-names:
minItems: 42
required:
- interrupt-names
unevaluatedProperties: false
examples:
@ -80,55 +185,66 @@ examples:
#include <dt-bindings/clock/r9a07g044-cpg.h>
irqc: interrupt-controller@110a0000 {
compatible = "renesas,r9a07g044-irqc", "renesas,rzg2l-irqc";
reg = <0x110a0000 0x10000>;
#interrupt-cells = <2>;
#address-cells = <0>;
interrupt-controller;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
<&cpg CPG_MOD R9A07G044_IA55_PCLK>;
clock-names = "clk", "pclk";
power-domains = <&cpg>;
resets = <&cpg R9A07G044_IA55_RESETN>;
compatible = "renesas,r9a07g044-irqc", "renesas,rzg2l-irqc";
reg = <0x110a0000 0x10000>;
#interrupt-cells = <2>;
#address-cells = <0>;
interrupt-controller;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nmi",
"irq0", "irq1", "irq2", "irq3",
"irq4", "irq5", "irq6", "irq7",
"tint0", "tint1", "tint2", "tint3",
"tint4", "tint5", "tint6", "tint7",
"tint8", "tint9", "tint10", "tint11",
"tint12", "tint13", "tint14", "tint15",
"tint16", "tint17", "tint18", "tint19",
"tint20", "tint21", "tint22", "tint23",
"tint24", "tint25", "tint26", "tint27",
"tint28", "tint29", "tint30", "tint31";
clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
<&cpg CPG_MOD R9A07G044_IA55_PCLK>;
clock-names = "clk", "pclk";
power-domains = <&cpg>;
resets = <&cpg R9A07G044_IA55_RESETN>;
};

View File

@ -54,6 +54,7 @@ properties:
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
properties:
endpoint:

View File

@ -69,6 +69,7 @@ properties:
properties:
port@0:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: Input port
properties:
@ -89,6 +90,7 @@ properties:
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: Output port
properties:

View File

@ -59,7 +59,6 @@ allOf:
compatible:
contains:
enum:
- fsl,imx8mq-csi
- fsl,imx8mm-csi
then:
required:

View File

@ -95,7 +95,7 @@ properties:
synchronization is selected.
default: 1
field-active-even: true
field-even-active: true
bus-width: true
@ -144,7 +144,7 @@ properties:
synchronization is selected.
default: 1
field-active-even: true
field-even-active: true
bus-width: true

View File

@ -57,6 +57,7 @@ properties:
patternProperties:
"^port@[01]$":
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description:
Camera A and camera B inputs.

View File

@ -12,7 +12,6 @@ maintainers:
allOf:
- $ref: /schemas/pci/pci-bus.yaml#
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
properties:
compatible:
@ -34,13 +33,6 @@ properties:
description: >
Base address and length of the PCIe controller I/O register space
interrupt-map: true
interrupt-map-mask: true
"#interrupt-cells":
const: 1
ranges:
minItems: 1
maxItems: 2
@ -54,16 +46,8 @@ properties:
items:
- const: pcie-phy
bus-range: true
dma-coherent: true
"#address-cells": true
"#size-cells": true
device_type: true
brcm,pcie-ob:
type: boolean
description: >
@ -78,21 +62,25 @@ properties:
msi:
type: object
$ref: /schemas/interrupt-controller/msi-controller.yaml#
unevaluatedProperties: false
properties:
compatible:
items:
- const: brcm,iproc-msi
interrupts:
maxItems: 4
brcm,pcie-msi-inten:
type: boolean
description:
Needs to be present for some older iProc platforms that require the
interrupt enable registers to be set explicitly to enable MSI
msi-parent: true
msi-controller: true
brcm,pcie-msi-inten:
type: boolean
description: >
Needs to be present for some older iProc platforms that require the
interrupt enable registers to be set explicitly to enable MSI
dependencies:
brcm,pcie-ob-axi-offset: ["brcm,pcie-ob"]
brcm,pcie-msi-inten: [msi-controller]
@ -117,68 +105,69 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
bus {
#address-cells = <1>;
#size-cells = <1>;
pcie0: pcie@18012000 {
compatible = "brcm,iproc-pcie";
reg = <0x18012000 0x1000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
linux,pci-domain = <0>;
bus-range = <0x00 0xff>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x81000000 0 0 0x28000000 0 0x00010000>,
<0x82000000 0 0x20000000 0x20000000 0 0x04000000>;
phys = <&phy 0 5>;
phy-names = "pcie-phy";
brcm,pcie-ob;
brcm,pcie-ob-axi-offset = <0x00000000>;
msi-parent = <&msi0>;
/* iProc event queue based MSI */
msi0: msi {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
<GIC_SPI 97 IRQ_TYPE_NONE>,
<GIC_SPI 98 IRQ_TYPE_NONE>,
<GIC_SPI 99 IRQ_TYPE_NONE>;
};
};
pcie1: pcie@18013000 {
compatible = "brcm,iproc-pcie";
reg = <0x18013000 0x1000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
linux,pci-domain = <1>;
bus-range = <0x00 0xff>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x81000000 0 0 0x48000000 0 0x00010000>,
<0x82000000 0 0x40000000 0x40000000 0 0x04000000>;
phys = <&phy 1 6>;
phy-names = "pcie-phy";
};
gic: interrupt-controller {
interrupt-controller;
#interrupt-cells = <3>;
};
pcie@18012000 {
compatible = "brcm,iproc-pcie";
reg = <0x18012000 0x1000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
linux,pci-domain = <0>;
bus-range = <0x00 0xff>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x81000000 0 0 0x28000000 0 0x00010000>,
<0x82000000 0 0x20000000 0x20000000 0 0x04000000>;
phys = <&phy 0 5>;
phy-names = "pcie-phy";
brcm,pcie-ob;
brcm,pcie-ob-axi-offset = <0x00000000>;
msi-parent = <&msi0>;
/* iProc event queue based MSI */
msi0: msi {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
<GIC_SPI 97 IRQ_TYPE_NONE>,
<GIC_SPI 98 IRQ_TYPE_NONE>,
<GIC_SPI 99 IRQ_TYPE_NONE>;
};
};
- |
pcie@18013000 {
compatible = "brcm,iproc-pcie";
reg = <0x18013000 0x1000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
linux,pci-domain = <1>;
bus-range = <0x00 0xff>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x81000000 0 0 0x48000000 0 0x00010000>,
<0x82000000 0 0x40000000 0x40000000 0 0x04000000>;
phys = <&phy 1 6>;
phy-names = "pcie-phy";
};

View File

@ -91,6 +91,7 @@ properties:
interrupt-controller:
type: object
additionalProperties: false
description: Describes the CPU's local interrupt controller
properties:

View File

@ -56,6 +56,9 @@ properties:
- const: clkext3
minItems: 2
"#sound-dai-cells":
const: 0
required:
- compatible
- reg

View File

@ -26,6 +26,7 @@ properties:
- const: rockchip,rk3568-spdif
- items:
- enum:
- rockchip,rk3128-spdif
- rockchip,rk3188-spdif
- rockchip,rk3288-spdif
- rockchip,rk3308-spdif

View File

@ -232,7 +232,7 @@ properties:
# MEMSIC magnetometer
- memsic,mmc35240
# MEMSIC 3-axis accelerometer
- memsic,mx4005
- memsic,mxc4005
# MEMSIC 2-axis 8-bit digital accelerometer
- memsic,mxc6225
# MEMSIC 2-axis 8-bit digital accelerometer

View File

@ -58,12 +58,14 @@ Here are the main features of EROFS:
- Support extended attributes as an option;
- Support a bloom filter that speeds up negative extended attribute lookups;
- Support POSIX.1e ACLs by using extended attributes;
- Support transparent data compression as an option:
LZ4 and MicroLZMA algorithms can be used on a per-file basis; In addition,
inplace decompression is also supported to avoid bounce compressed buffers
and page cache thrashing.
LZ4, MicroLZMA and DEFLATE algorithms can be used on a per-file basis; In
addition, inplace decompression is also supported to avoid bounce compressed
buffers and unnecessary page cache thrashing.
- Support chunk-based data deduplication and rolling-hash compressed data
deduplication;
@ -268,6 +270,38 @@ details.)
By the way, chunk-based files are all uncompressed for now.
Long extended attribute name prefixes
-------------------------------------
There are use cases where extended attributes with different values can have
only a few common prefixes (such as overlayfs xattrs). The predefined prefixes
work inefficiently in both image size and runtime performance in such cases.
The long xattr name prefixes feature is introduced to address this issue. The
overall idea is that, apart from the existing predefined prefixes, the xattr
entry could also refer to user-specified long xattr name prefixes, e.g.
"trusted.overlay.".
When referring to a long xattr name prefix, the highest bit (bit 7) of
erofs_xattr_entry.e_name_index is set, while the lower bits (bit 0-6) as a whole
represent the index of the referred long name prefix among all long name
prefixes. Therefore, only the trailing part of the name apart from the long
xattr name prefix is stored in erofs_xattr_entry.e_name, which could be empty if
the full xattr name matches exactly as its long xattr name prefix.
All long xattr prefixes are stored one by one in the packed inode as long as
the packed inode is valid, or in the meta inode otherwise. The
xattr_prefix_count (of the on-disk superblock) indicates the total number of
long xattr name prefixes, while (xattr_prefix_start * 4) indicates the start
offset of long name prefixes in the packed/meta inode. Note that, long extended
attribute name prefixes are disabled if xattr_prefix_count is 0.
Each long name prefix is stored in the format: ALIGN({__le16 len, data}, 4),
where len represents the total size of the data part. The data part is actually
represented by 'struct erofs_xattr_long_prefix', where base_index represents the
index of the predefined xattr name prefix, e.g. EROFS_XATTR_INDEX_TRUSTED for
"trusted.overlay." long name prefix, while the infix string keeps the string
after stripping the short prefix, e.g. "overlay." for the example above.
Data compression
----------------
EROFS implements fixed-sized output compression which generates fixed-sized

View File

@ -1583,6 +1583,17 @@ F: arch/arm/include/asm/arch_timer.h
F: arch/arm64/include/asm/arch_timer.h
F: drivers/clocksource/arm_arch_timer.c
ARM GENERIC INTERRUPT CONTROLLER DRIVERS
M: Marc Zyngier <maz@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/interrupt-controller/arm,gic*
F: arch/arm/include/asm/arch_gicv3.h
F: arch/arm64/include/asm/arch_gicv3.h
F: drivers/irqchip/irq-gic*.[ch]
F: include/linux/irqchip/arm-gic*.h
F: include/linux/irqchip/arm-vgic-info.h
ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
@ -2209,21 +2220,28 @@ F: arch/arm/boot/dts/ti/omap/omap3-igep*
ARM/INTEL IXP4XX ARM ARCHITECTURE
M: Linus Walleij <linusw@kernel.org>
M: Imre Kaloz <kaloz@openwrt.org>
M: Krzysztof Halasa <khalasa@piap.pl>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
F: Documentation/devicetree/bindings/memory-controllers/intel,ixp4xx-expansion*
F: Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
F: arch/arm/boot/dts/intel/ixp/
F: arch/arm/mach-ixp4xx/
F: drivers/bus/intel-ixp4xx-eb.c
F: drivers/char/hw_random/ixp4xx-rng.c
F: drivers/clocksource/timer-ixp4xx.c
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
F: drivers/net/ethernet/xscale/ixp4xx_eth.c
F: drivers/net/wan/ixp4xx_hss.c
F: drivers/soc/ixp4xx/ixp4xx-npe.c
F: drivers/soc/ixp4xx/ixp4xx-qmgr.c
F: include/linux/soc/ixp4xx/npe.h
F: include/linux/soc/ixp4xx/qmgr.h
ARM/INTEL KEEMBAY ARCHITECTURE
M: Paul J. Murphy <paul.j.murphy@intel.com>
@ -2325,7 +2343,7 @@ F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
R: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
M: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
@ -5977,8 +5995,8 @@ F: include/linux/devm-helpers.h
DEVICE-MAPPER (LVM)
M: Alasdair Kergon <agk@redhat.com>
M: Mike Snitzer <snitzer@kernel.org>
M: dm-devel@redhat.com
L: dm-devel@redhat.com
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
W: http://sources.redhat.com/dm
Q: http://patchwork.kernel.org/project/dm-devel/list/
@ -10623,22 +10641,6 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
S: Maintained
F: drivers/net/ethernet/xscale/ixp4xx_eth.c
F: drivers/net/wan/ixp4xx_hss.c
F: drivers/soc/ixp4xx/ixp4xx-npe.c
F: drivers/soc/ixp4xx/ixp4xx-qmgr.c
F: include/linux/soc/ixp4xx/npe.h
F: include/linux/soc/ixp4xx/qmgr.h
INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL KEEM BAY DRM DRIVER
M: Anitha Chrisanthus <anitha.chrisanthus@intel.com>
M: Edmund Dea <edmund.j.dea@intel.com>
@ -11064,7 +11066,7 @@ F: Documentation/devicetree/bindings/sound/irondevice,*
F: sound/soc/codecs/sma*
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Marc Zyngier <maz@kernel.org>
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/core-api/irq/irq-domain.rst
@ -11083,7 +11085,6 @@ F: lib/group_cpus.c
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
M: Marc Zyngier <maz@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@ -20489,6 +20490,7 @@ F: include/dt-bindings/clock/starfive?jh71*.h
STARFIVE JH71X0 PINCTRL DRIVERS
M: Emil Renner Berthing <kernel@esmil.dk>
M: Jianlong Huang <jianlong.huang@starfivetech.com>
M: Hal Feng <hal.feng@starfivetech.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/starfive,jh71*.yaml

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -1037,6 +1037,19 @@ config ARM64_ERRATUM_2645198
If unsure, say Y.
config ARM64_ERRATUM_2966298
bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
default y
help
This option adds the workaround for ARM Cortex-A520 erratum 2966298.
On an affected Cortex-A520 core, a speculatively executed unprivileged
load might leak data from a privileged level via a cache side channel.
Work around this problem by executing a TLBI before returning to EL0.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View File

@ -185,7 +185,7 @@ aips1: bus@44000000 {
#size-cells = <1>;
ranges;
anomix_ns_gpr: syscon@44210000 {
aonmix_ns_gpr: syscon@44210000 {
compatible = "fsl,imx93-aonmix-ns-syscfg", "syscon";
reg = <0x44210000 0x1000>;
};
@ -319,6 +319,7 @@ flexcan1: can@443a0000 {
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
assigned-clock-rates = <40000000>;
fsl,clk-source = /bits/ 8 <0>;
fsl,stop-mode = <&aonmix_ns_gpr 0x14 0>;
status = "disabled";
};
@ -591,6 +592,7 @@ flexcan2: can@425b0000 {
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
assigned-clock-rates = <40000000>;
fsl,clk-source = /bits/ 8 <0>;
fsl,stop-mode = <&wakeupmix_gpr 0x0c 2>;
status = "disabled";
};

View File

@ -905,7 +905,7 @@ sata: sata@1a200000 {
status = "disabled";
};
sata_phy: t-phy@1a243000 {
sata_phy: t-phy {
compatible = "mediatek,mt7622-tphy",
"mediatek,generic-tphy-v1";
#address-cells = <2>;

View File

@ -434,7 +434,7 @@ pcie_intc: interrupt-controller {
};
};
pcie_phy: t-phy@11c00000 {
pcie_phy: t-phy {
compatible = "mediatek,mt7986-tphy",
"mediatek,generic-tphy-v2";
#address-cells = <2>;

View File

@ -48,7 +48,7 @@ key-0 {
memory@40000000 {
device_type = "memory";
reg = <0 0x40000000 0 0x80000000>;
reg = <0 0x40000000 0x2 0x00000000>;
};
reserved-memory {
@ -56,13 +56,8 @@ reserved-memory {
#size-cells = <2>;
ranges;
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_reserved: secmon@54600000 {
no-map;
reg = <0 0x54600000 0x0 0x200000>;
};
/* 12 MiB reserved for OP-TEE (BL32)
/*
* 12 MiB reserved for OP-TEE (BL32)
* +-----------------------+ 0x43e0_0000
* | SHMEM 2MiB |
* +-----------------------+ 0x43c0_0000
@ -75,6 +70,34 @@ optee_reserved: optee@43200000 {
no-map;
reg = <0 0x43200000 0 0x00c00000>;
};
scp_mem: memory@50000000 {
compatible = "shared-dma-pool";
reg = <0 0x50000000 0 0x2900000>;
no-map;
};
vpu_mem: memory@53000000 {
compatible = "shared-dma-pool";
reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
};
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_mem: memory@54600000 {
no-map;
reg = <0 0x54600000 0x0 0x200000>;
};
snd_dma_mem: memory@60000000 {
compatible = "shared-dma-pool";
reg = <0 0x60000000 0 0x1100000>;
no-map;
};
apu_mem: memory@62000000 {
compatible = "shared-dma-pool";
reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
};
};
};

View File

@ -313,6 +313,7 @@ dsu-pmu {
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
<&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
status = "fail";
};
dmic_codec: dmic-codec {

View File

@ -3958,7 +3958,7 @@ dispcc: clock-controller@af00000 {
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm8150-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x400>;
reg = <0 0x0b220000 0 0x30000>;
qcom,pdc-ranges = <0 480 94>, <94 609 31>,
<125 63 1>;
#interrupt-cells = <2>;

View File

@ -9,6 +9,7 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#include <linux/cpuidle.h>
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
@ -44,6 +45,24 @@
#define ACPI_MADT_GICC_TRBE (offsetof(struct acpi_madt_generic_interrupt, \
trbe_interrupt) + sizeof(u16))
/*
* Arm® Functional Fixed Hardware Specification Version 1.2.
* Table 2: Arm Architecture context loss flags
*/
#define CPUIDLE_CORE_CTXT BIT(0) /* Core context Lost */
static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
{
if (arch_flags & CPUIDLE_CORE_CTXT)
return CPUIDLE_FLAG_TIMER_STOP;
return 0;
}
#define arch_get_idle_state_flags arch_get_idle_state_flags
#define CPUIDLE_TRACE_CTXT BIT(1) /* Trace context loss */
#define CPUIDLE_GICR_CTXT BIT(2) /* GICR */
#define CPUIDLE_GICD_CTXT BIT(3) /* GICD */
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI

View File

@ -79,6 +79,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
#define ARM_CPU_PART_CORTEX_X2 0xD48
@ -148,6 +149,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)

View File

@ -730,6 +730,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2966298
{
.desc = "ARM erratum 2966298",
.capability = ARM64_WORKAROUND_2966298,
/* Cortex-A520 r0p0 - r0p1 */
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
},
#endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
{
.desc = "AmpereOne erratum AC03_CPU_38",

View File

@ -428,6 +428,10 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
.if \el == 0
alternative_if ARM64_WORKAROUND_2966298
tlbi vale1, xzr
dsb nsh
alternative_else_nop_endif
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp

View File

@ -84,6 +84,7 @@ WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417
WORKAROUND_2966298
WORKAROUND_AMPERE_AC03_CPU_38
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE

View File

@ -2,39 +2,42 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
for the semaphore. */
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is implemented, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd).
Although the cache control hint is accepted by all PA 2.0 processors,
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
require 16-byte alignment. If the address is unaligned, the operation
of the instruction is undefined. The ldcw instruction does not generate
unaligned data reference traps so misaligned accesses are not detected.
This hid the problem for years. So, restore the 16-byte alignment dropped
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
#define __ldcw_align(a) ({ \
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
#define __LDCW "ldcw"
#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is specified, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
#endif /*!CONFIG_PA20*/
#else
#define __LDCW "ldcw"
#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload

View File

@ -9,15 +9,10 @@
#ifndef __ASSEMBLY__
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED \
{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
__ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
#endif
} arch_spinlock_t;

View File

@ -440,7 +440,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
if (cpu_online(cpu))
return 0;
if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
if (num_online_cpus() < nr_cpu_ids &&
num_online_cpus() < setup_max_cpus &&
smp_boot_one_cpu(cpu, tidle))
return -EIO;
return cpu_online(cpu) ? 0 : -EIO;

View File

@ -245,7 +245,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
ctx);
@ -759,8 +759,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
if (ret)
return ret;
if (save_ret)
emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
if (save_ret) {
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
}
/* update branch with beqz */
if (ctx->insns) {
@ -853,7 +855,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret) {
stack_size += 8;
stack_size += 16; /* Save both A5 (BPF R0) and A0 */
retval_off = stack_size;
}
@ -957,6 +959,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (ret)
goto out;
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
im->ip_after_call = ctx->insns + ctx->ninsns;
/* 2 nops reserved for auipc+jalr pair */
emit(rv_nop(), ctx);
@ -988,8 +991,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_args(nregs, args_off, ctx);
if (save_ret)
if (save_ret) {
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
}
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
@ -1515,7 +1520,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
if (ret)
return ret;
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
if (insn->src_reg != BPF_PSEUDO_CALL)
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */

View File

@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
* func_addr's original caller
*/
int stack_size; /* Trampoline stack size */
int backchain_off; /* Offset of backchain */
int stack_args_off; /* Offset of stack arguments for calling
* func_addr, has to be at the top
*/
@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
* for __bpf_prog_enter() return value and
* func_addr respectively
*/
int r14_off; /* Offset of saved %r14 */
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
int tccnt_off; /* Offset of saved tailcall counter */
int r14_off; /* Offset of saved %r14, has to be at the
* bottom */
int do_fexit; /* do_fexit: label */
};
@ -2247,8 +2249,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* Calculate the stack layout.
*/
/* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
/*
* Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
* ABI requires, put our backchain at the end of the allocated memory.
*/
tjit->stack_size = STACK_FRAME_OVERHEAD;
tjit->backchain_off = tjit->stack_size - sizeof(u64);
tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
tjit->ip_off = alloc_stack(tjit, sizeof(u64));
@ -2256,16 +2262,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
tjit->retval_off = alloc_stack(tjit, sizeof(u64));
tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
tjit->run_ctx_off = alloc_stack(tjit,
sizeof(struct bpf_tramp_run_ctx));
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
tjit->stack_size -= STACK_FRAME_OVERHEAD;
tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
/*
* In accordance with the s390x ABI, the caller has allocated
* STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
* backchain, and the rest we can use.
*/
tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
/* lgr %r1,%r15 */
EMIT4(0xb9040000, REG_1, REG_15);
/* aghi %r15,-stack_size */
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
/* stg %r1,backchain_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
tjit->backchain_off);
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
_EMIT6(0xd203f000 | tjit->tccnt_off,
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));

View File

@ -7,6 +7,8 @@
* Author : K. Y. Srinivasan <kys@microsoft.com>
*/
#define pr_fmt(fmt) "Hyper-V: " fmt
#include <linux/efi.h>
#include <linux/types.h>
#include <linux/bitfield.h>
@ -191,7 +193,7 @@ void set_hv_tscchange_cb(void (*cb)(void))
struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
if (!hv_reenlightenment_available()) {
pr_warn("Hyper-V: reenlightenment support is unavailable\n");
pr_warn("reenlightenment support is unavailable\n");
return;
}
@ -394,6 +396,7 @@ static void __init hv_get_partition_id(void)
local_irq_restore(flags);
}
#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
static u8 __init get_vtl(void)
{
u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
@ -416,13 +419,16 @@ static u8 __init get_vtl(void)
if (hv_result_success(ret)) {
ret = output->as64.low & HV_X64_VTL_MASK;
} else {
pr_err("Failed to get VTL(%lld) and set VTL to zero by default.\n", ret);
ret = 0;
pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
BUG();
}
local_irq_restore(flags);
return ret;
}
#else
static inline u8 get_vtl(void) { return 0; }
#endif
/*
* This function is to be invoked early in the boot sequence after the
@ -564,7 +570,7 @@ void __init hyperv_init(void)
if (cpu_feature_enabled(X86_FEATURE_IBT) &&
*(u32 *)hv_hypercall_pg != gen_endbr()) {
setup_clear_cpu_cap(X86_FEATURE_IBT);
pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
pr_warn("Disabling IBT because of Hyper-V bug\n");
}
#endif
@ -604,8 +610,10 @@ void __init hyperv_init(void)
hv_query_ext_cap(0);
/* Find the VTL */
if (!ms_hyperv.paravisor_present && hv_isolation_type_snp())
ms_hyperv.vtl = get_vtl();
ms_hyperv.vtl = get_vtl();
if (ms_hyperv.vtl > 0) /* non default VTL */
hv_vtl_early_init();
return;

View File

@ -215,7 +215,7 @@ static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
return hv_vtl_bringup_vcpu(vp_id, start_eip);
}
static int __init hv_vtl_early_init(void)
int __init hv_vtl_early_init(void)
{
/*
* `boot_cpu_has` returns the runtime feature support,
@ -230,4 +230,3 @@ static int __init hv_vtl_early_init(void)
return 0;
}
early_initcall(hv_vtl_early_init);

View File

@ -340,8 +340,10 @@ static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; }
#ifdef CONFIG_HYPERV_VTL_MODE
void __init hv_vtl_init_platform(void);
int __init hv_vtl_early_init(void);
#else
static inline void __init hv_vtl_init_platform(void) {}
static inline int __init hv_vtl_early_init(void) { return 0; }
#endif
#include <asm-generic/mshyperv.h>

View File

@ -256,7 +256,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
return 0;
}
static int sev_cpuid_hv(struct cpuid_leaf *leaf)
static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
{
int ret;
@ -279,6 +279,45 @@ static int sev_cpuid_hv(struct cpuid_leaf *leaf)
return ret;
}
static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
u32 cr4 = native_read_cr4();
int ret;
ghcb_set_rax(ghcb, leaf->fn);
ghcb_set_rcx(ghcb, leaf->subfn);
if (cr4 & X86_CR4_OSXSAVE)
/* Safe to read xcr0 */
ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
else
/* xgetbv will cause #UD - use reset value for xcr0 */
ghcb_set_xcr0(ghcb, 1);
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
if (ret != ES_OK)
return ret;
if (!(ghcb_rax_is_valid(ghcb) &&
ghcb_rbx_is_valid(ghcb) &&
ghcb_rcx_is_valid(ghcb) &&
ghcb_rdx_is_valid(ghcb)))
return ES_VMM_ERROR;
leaf->eax = ghcb->save.rax;
leaf->ebx = ghcb->save.rbx;
leaf->ecx = ghcb->save.rcx;
leaf->edx = ghcb->save.rdx;
return ES_OK;
}
static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
: __sev_cpuid_hv_msr(leaf);
}
/*
* This may be called early while still running on the initial identity
* mapping. Use RIP-relative addressing to obtain the correct address
@ -388,19 +427,20 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
return false;
}
static void snp_cpuid_hv(struct cpuid_leaf *leaf)
static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
if (sev_cpuid_hv(leaf))
if (sev_cpuid_hv(ghcb, ctxt, leaf))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
}
static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
struct cpuid_leaf *leaf)
{
struct cpuid_leaf leaf_hv = *leaf;
switch (leaf->fn) {
case 0x1:
snp_cpuid_hv(&leaf_hv);
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
/* initial APIC ID */
leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
@ -419,7 +459,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
break;
case 0xB:
leaf_hv.subfn = 0;
snp_cpuid_hv(&leaf_hv);
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
/* extended APIC ID */
leaf->edx = leaf_hv.edx;
@ -467,7 +507,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
}
break;
case 0x8000001E:
snp_cpuid_hv(&leaf_hv);
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
/* extended APIC ID */
leaf->eax = leaf_hv.eax;
@ -488,7 +528,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
static int snp_cpuid(struct cpuid_leaf *leaf)
static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@ -522,7 +562,7 @@ static int snp_cpuid(struct cpuid_leaf *leaf)
return 0;
}
return snp_cpuid_postprocess(leaf);
return snp_cpuid_postprocess(ghcb, ctxt, leaf);
}
/*
@ -544,14 +584,14 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
leaf.fn = fn;
leaf.subfn = subfn;
ret = snp_cpuid(&leaf);
ret = snp_cpuid(NULL, NULL, &leaf);
if (!ret)
goto cpuid_done;
if (ret != -EOPNOTSUPP)
goto fail;
if (sev_cpuid_hv(&leaf))
if (__sev_cpuid_hv_msr(&leaf))
goto fail;
cpuid_done:
@ -848,14 +888,15 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}
static int vc_handle_cpuid_snp(struct pt_regs *regs)
static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
struct cpuid_leaf leaf;
int ret;
leaf.fn = regs->ax;
leaf.subfn = regs->cx;
ret = snp_cpuid(&leaf);
ret = snp_cpuid(ghcb, ctxt, &leaf);
if (!ret) {
regs->ax = leaf.eax;
regs->bx = leaf.ebx;
@ -874,7 +915,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
enum es_result ret;
int snp_cpuid_ret;
snp_cpuid_ret = vc_handle_cpuid_snp(regs);
snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
if (!snp_cpuid_ret)
return ES_OK;
if (snp_cpuid_ret != -EOPNOTSUPP)

View File

@ -868,8 +868,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
unsigned long vaddr;
unsigned int npages;
unsigned long vaddr, npages;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return;

View File

@ -1217,8 +1217,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;

View File

@ -9,11 +9,6 @@
*
* The TD-2000 and certain older devices use a different protocol.
* Try the fit2 protocol module with them.
*
* NB: The FIT adapters do not appear to support the control
* registers. So, we map ALT_STATUS to STATUS and NO-OP writes
* to the device control register - this means that IDE reset
* will not work on these devices.
*/
#include <linux/module.h>
@ -37,8 +32,7 @@
static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
if (cont == 1)
return;
regr += cont << 3;
switch (pi->mode) {
case 0:
@ -59,11 +53,7 @@ static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b;
if (cont) {
if (regr != 6)
return 0xff;
regr = 7;
}
regr += cont << 3;
switch (pi->mode) {
case 0:

View File

@ -51,6 +51,13 @@ static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
ata_sff_pause(ap);
}
static void pata_parport_set_devctl(struct ata_port *ap, u8 ctl)
{
struct pi_adapter *pi = ap->host->private_data;
pi->proto->write_regr(pi, 1, 6, ctl);
}
static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
{
struct pi_adapter *pi = ap->host->private_data;
@ -64,7 +71,7 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
@ -73,6 +80,72 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
return (nsect == 0x55) && (lbal == 0xaa);
}
static int pata_parport_wait_after_reset(struct ata_link *link,
unsigned int devmask,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pi_adapter *pi = ap->host->private_data;
unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1);
int rc, ret = 0;
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
/* always check readiness of the master device */
rc = ata_sff_wait_ready(link, deadline);
if (rc) {
/*
* some adapters return bogus values if master device is not
* present, so don't abort now if a slave device is present
*/
if (!dev1)
return rc;
ret = -ENODEV;
}
/*
* if device 1 was found in ata_devchk, wait for register
* access briefly, then wait for BSY to clear.
*/
if (dev1) {
int i;
pata_parport_dev_select(ap, 1);
/*
* Wait for register access. Some ATAPI devices fail
* to set nsect/lbal after reset, so don't waste too
* much time on it. We're gonna wait for !BSY anyway.
*/
for (i = 0; i < 2; i++) {
u8 nsect, lbal;
nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
if (nsect == 1 && lbal == 1)
break;
/* give drive a breather */
ata_msleep(ap, 50);
}
rc = ata_sff_wait_ready(link, deadline);
if (rc) {
if (rc != -ENODEV)
return rc;
ret = rc;
}
}
pata_parport_dev_select(ap, 0);
if (dev1)
pata_parport_dev_select(ap, 1);
if (dev0)
pata_parport_dev_select(ap, 0);
return ret;
}
static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
@ -87,7 +160,7 @@ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
ap->last_ctl = ap->ctl;
/* wait the port to become ready */
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
return pata_parport_wait_after_reset(&ap->link, devmask, deadline);
}
static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
@ -252,6 +325,7 @@ static struct ata_port_operations pata_parport_port_ops = {
.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_set_devctl = pata_parport_set_devctl,
.sff_check_status = pata_parport_check_status,
.sff_check_altstatus = pata_parport_check_altstatus,
.sff_tf_load = pata_parport_tf_load,

View File

@ -1436,8 +1436,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
disk_force_media_change(nbd->disk);
nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);

View File

@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
return pinctrl_gpio_set_config(offset, config);
return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */

View File

@ -237,6 +237,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
case MMP_GPIO:
return false;
default:

View File

@ -2093,7 +2093,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
parent = pci_upstream_bridge(adev->pdev);
parent = pcie_find_root_port(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}

View File

@ -170,6 +170,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
csum += pia[size - 1];
if (csum) {
DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
kfree(pia);
return -EIO;
}

View File

@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)

View File

@ -355,7 +355,7 @@ static void dcn32_update_clocks_update_dentist(
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
@ -401,7 +401,7 @@ static void dcn32_update_clocks_update_dentist(
int32_t N;
int32_t j;
if (!pipe_ctx->stream)
if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)

View File

@ -2040,6 +2040,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
*states = ATTR_STATE_SUPPORTED;
break;
default:

View File

@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
u32 smu_pcie_arg;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
int ret, i;
/* PCIE gen speed and lane width override */
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = max_gen_speed;
pcie_table->pcie_lane[0] = max_lane_width;
} else {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_lane[0] = min_lane_width;
}
pcie_table->pcie_gen[1] = max_gen_speed;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |

View File

@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
"03/04/2019", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_onemix2s,
},
{}
};

View File

@ -198,7 +198,7 @@ static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
for_each_gt(gt, i915, id) {
if (!obj->mm.tlb[id])
return;
continue;
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
obj->mm.tlb[id] = 0;

View File

@ -271,8 +271,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
/*
* L3 fabric flush is needed for AUX CCS invalidation
* which happens as part of pipe-control so we can
* ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
* deals with Protected Memory which is not needed for
* AUX CCS invalidation and lead to unwanted side effects.
*/
if (mode & EMIT_FLUSH)
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */

View File

@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
/*
* Register engines early to ensure the engine list is in its final
* rb-tree form, lowering the amount of code that has to deal with
* the intermediate llist state.
*/
intel_engines_driver_register(dev_priv);
return 0;
/*
@ -1246,8 +1253,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
void i915_gem_driver_register(struct drm_i915_private *i915)
{
i915_gem_driver_register__shrinker(i915);
intel_engines_driver_register(i915);
}
void i915_gem_driver_unregister(struct drm_i915_private *i915)

View File

@ -31,6 +31,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
@ -183,6 +184,20 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
cli->abi16 = NULL;
}
static inline int
getparam_dma_ib_max(struct nvif_device *device)
{
const struct nvif_mclass dmas[] = {
{ NV03_CHANNEL_DMA, 0 },
{ NV10_CHANNEL_DMA, 0 },
{ NV17_CHANNEL_DMA, 0 },
{ NV40_CHANNEL_DMA, 0 },
{}
};
return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
}
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
@ -247,6 +262,12 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
case NOUVEAU_GETPARAM_GRAPH_UNITS:
getparam->value = nvkm_gr_units(gr);
break;
case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
int ib_max = getparam_dma_ib_max(device);
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;

View File

@ -257,10 +257,7 @@ static int
nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
static const struct {
s32 oclass;
int version;
} hosts[] = {
const struct nvif_mclass hosts[] = {
{ AMPERE_CHANNEL_GPFIFO_B, 0 },
{ AMPERE_CHANNEL_GPFIFO_A, 0 },
{ TURING_CHANNEL_GPFIFO_A, 0 },
@ -443,9 +440,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
switch (chan->user.oclass & 0x00ff) {
case 0x006b:
case 0x006e:
switch (chan->user.oclass) {
case NV03_CHANNEL_DMA:
case NV10_CHANNEL_DMA:
case NV17_CHANNEL_DMA:
case NV40_CHANNEL_DMA:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
@ -455,7 +454,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->user_get = 0x44;
chan->user_get_hi = 0x60;
chan->dma.ib_base = 0x10000 / 4;
chan->dma.ib_max = (0x02000 / 8) - 1;
chan->dma.ib_max = NV50_DMA_IB_MAX;
chan->dma.ib_put = 0;
chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
chan->dma.max = chan->dma.ib_base;

View File

@ -49,6 +49,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
/* Maximum push buffer size. */
#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
/* Maximum IBs per ring. */
#define NV50_DMA_IB_MAX ((0x02000 / 8) - 1)
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,

View File

@ -379,7 +379,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
struct nouveau_channel *chan = NULL;
struct nouveau_exec_job_args args = {};
struct drm_nouveau_exec *req = data;
int ret = 0;
int push_max, ret = 0;
if (unlikely(!abi16))
return -ENOMEM;
@ -404,9 +404,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (!chan->dma.ib_max)
return nouveau_abi16_put(abi16, -ENOSYS);
if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, NOUVEAU_GEM_MAX_PUSH);
req->push_count, push_max);
return nouveau_abi16_put(abi16, -EINVAL);
}

View File

@ -51,4 +51,14 @@ int nouveau_exec_job_init(struct nouveau_exec_job **job,
int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static inline unsigned int
nouveau_exec_push_max_from_ib_max(int ib_max)
{
/* Limit the number of IBs per job to half the size of the ring in order
* to avoid the ring running dry between submissions and preserve one
* more slot for the job's HW fence.
*/
return ib_max > 1 ? ib_max / 2 - 1 : 0;
}
#endif

View File

@ -118,7 +118,7 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
kunit_release_action(test,
kunit_action_platform_driver_unregister,
pdev);
&fake_platform_driver);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);

View File

@ -799,6 +799,8 @@ config HID_NVIDIA_SHIELD
tristate "NVIDIA SHIELD devices"
depends on USB_HID
depends on BT_HIDP
depends on LEDS_CLASS
select POWER_SUPPLY
help
Support for NVIDIA SHIELD accessories.

View File

@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
return -ENODEV;
boot_hid = usb_get_intfdata(boot_interface);
if (list_empty(&boot_hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);

View File

@ -425,6 +425,7 @@
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061

View File

@ -409,6 +409,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
HID_BATTERY_QUIRK_IGNORE },
{}
};

View File

@ -4515,7 +4515,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto hid_hw_init_fail;
}
hidpp_connect_event(hidpp);
schedule_work(&hidpp->work);
flush_work(&hidpp->work);
if (will_restart) {
/* Reset the HID node state */
@ -4677,6 +4678,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
{ /* MX Master mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
{ /* M720 Triathlon mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) },
{ /* MX Ergo trackball over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },

View File

@ -2144,6 +2144,10 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_MTP_STM)},
/* Synaptics devices */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },

View File

@ -2088,7 +2088,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
struct joycon_input_report *report;
req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
mutex_lock(&ctlr->output_mutex);
ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
mutex_unlock(&ctlr->output_mutex);
if (ret) {
hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
return ret;
@ -2117,6 +2119,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
return 0;
}
static int joycon_init(struct hid_device *hdev)
{
struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
int ret = 0;
mutex_lock(&ctlr->output_mutex);
/* if handshake command fails, assume ble pro controller */
if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
!joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
hid_dbg(hdev, "detected USB controller\n");
/* set baudrate for improved latency */
ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
if (ret) {
hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
goto out_unlock;
}
/* handshake */
ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
if (ret) {
hid_err(hdev, "Failed handshake; ret=%d\n", ret);
goto out_unlock;
}
/*
* Set no timeout (to keep controller in USB mode).
* This doesn't send a response, so ignore the timeout.
*/
joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
} else if (jc_type_is_chrggrip(ctlr)) {
hid_err(hdev, "Failed charging grip handshake\n");
ret = -ETIMEDOUT;
goto out_unlock;
}
/* get controller calibration data, and parse it */
ret = joycon_request_calibration(ctlr);
if (ret) {
/*
* We can function with default calibration, but it may be
* inaccurate. Provide a warning, and continue on.
*/
hid_warn(hdev, "Analog stick positions may be inaccurate\n");
}
/* get IMU calibration data, and parse it */
ret = joycon_request_imu_calibration(ctlr);
if (ret) {
/*
* We can function with default calibration, but it may be
* inaccurate. Provide a warning, and continue on.
*/
hid_warn(hdev, "Unable to read IMU calibration data\n");
}
/* Set the reporting mode to 0x30, which is the full report mode */
ret = joycon_set_report_mode(ctlr);
if (ret) {
hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
goto out_unlock;
}
/* Enable rumble */
ret = joycon_enable_rumble(ctlr);
if (ret) {
hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
goto out_unlock;
}
/* Enable the IMU */
ret = joycon_enable_imu(ctlr);
if (ret) {
hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
goto out_unlock;
}
out_unlock:
mutex_unlock(&ctlr->output_mutex);
return ret;
}
/* Common handler for parsing inputs */
static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data,
int size)
@ -2248,85 +2329,19 @@ static int nintendo_hid_probe(struct hid_device *hdev,
hid_device_io_start(hdev);
/* Initialize the controller */
mutex_lock(&ctlr->output_mutex);
/* if handshake command fails, assume ble pro controller */
if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
!joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
hid_dbg(hdev, "detected USB controller\n");
/* set baudrate for improved latency */
ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
if (ret) {
hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
goto err_mutex;
}
/* handshake */
ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
if (ret) {
hid_err(hdev, "Failed handshake; ret=%d\n", ret);
goto err_mutex;
}
/*
* Set no timeout (to keep controller in USB mode).
* This doesn't send a response, so ignore the timeout.
*/
joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
} else if (jc_type_is_chrggrip(ctlr)) {
hid_err(hdev, "Failed charging grip handshake\n");
ret = -ETIMEDOUT;
goto err_mutex;
}
/* get controller calibration data, and parse it */
ret = joycon_request_calibration(ctlr);
ret = joycon_init(hdev);
if (ret) {
/*
* We can function with default calibration, but it may be
* inaccurate. Provide a warning, and continue on.
*/
hid_warn(hdev, "Analog stick positions may be inaccurate\n");
}
/* get IMU calibration data, and parse it */
ret = joycon_request_imu_calibration(ctlr);
if (ret) {
/*
* We can function with default calibration, but it may be
* inaccurate. Provide a warning, and continue on.
*/
hid_warn(hdev, "Unable to read IMU calibration data\n");
}
/* Set the reporting mode to 0x30, which is the full report mode */
ret = joycon_set_report_mode(ctlr);
if (ret) {
hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
goto err_mutex;
}
/* Enable rumble */
ret = joycon_enable_rumble(ctlr);
if (ret) {
hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
goto err_mutex;
}
/* Enable the IMU */
ret = joycon_enable_imu(ctlr);
if (ret) {
hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
goto err_mutex;
hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret);
goto err_close;
}
ret = joycon_read_info(ctlr);
if (ret) {
hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
ret);
goto err_mutex;
goto err_close;
}
mutex_unlock(&ctlr->output_mutex);
/* Initialize the leds */
ret = joycon_leds_create(ctlr);
if (ret) {
@ -2352,8 +2367,6 @@ static int nintendo_hid_probe(struct hid_device *hdev,
hid_dbg(hdev, "probe - success\n");
return 0;
err_mutex:
mutex_unlock(&ctlr->output_mutex);
err_close:
hid_hw_close(hdev);
err_stop:
@ -2383,6 +2396,20 @@ static void nintendo_hid_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
#ifdef CONFIG_PM
static int nintendo_hid_resume(struct hid_device *hdev)
{
int ret = joycon_init(hdev);
if (ret)
hid_err(hdev, "Failed to restore controller after resume");
return ret;
}
#endif
static const struct hid_device_id nintendo_hid_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_PROCON) },
@ -2404,6 +2431,10 @@ static struct hid_driver nintendo_hid_driver = {
.probe = nintendo_hid_probe,
.remove = nintendo_hid_remove,
.raw_event = nintendo_hid_event,
#ifdef CONFIG_PM
.resume = nintendo_hid_resume,
#endif
};
module_hid_driver(nintendo_hid_driver);

View File

@ -801,7 +801,7 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike%d:blue:led", ts->id);
led->max_brightness = 1;
led->flags = LED_CORE_SUSPENDRESUME;
led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
led->brightness_get = &thunderstrike_led_get_brightness;
led->brightness_set = &thunderstrike_led_set_brightness;
@ -1058,7 +1058,7 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT);
if (ret) {
hid_err(hdev, "Failed to start HID device\n");
goto err_haptics;
goto err_ts_create;
}
ret = hid_hw_open(hdev);
@ -1073,9 +1073,12 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id)
err_stop:
hid_hw_stop(hdev);
err_haptics:
err_ts_create:
power_supply_unregister(ts->base.battery_dev.psy);
if (ts->haptics_dev)
input_unregister_device(ts->haptics_dev);
led_classdev_unregister(&ts->led_dev);
ida_free(&thunderstrike_ida, ts->id);
return ret;
}

View File

@ -2155,6 +2155,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
err:
usb_free_urb(sc->ghl_urb);
hid_hw_stop(hdev);
return ret;
}

View File

@ -390,7 +390,7 @@ static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev)
ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0],
write_buf, sizeof(arctis_1_battery_request),
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
if (ret < sizeof(arctis_1_battery_request)) {
if (ret < (int)sizeof(arctis_1_battery_request)) {
hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
ret = -ENODATA;
}

View File

@ -998,45 +998,29 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
return hid_driver_reset_resume(hid);
}
/**
* __do_i2c_hid_core_initial_power_up() - First time power up of the i2c-hid device.
* @ihid: The ihid object created during probe.
*
* This function is called at probe time.
*
* The initial power on is where we do some basic validation that the device
* exists, where we fetch the HID descriptor, and where we create the actual
* HID devices.
*
* Return: 0 or error code.
/*
* Check that the device exists and parse the HID descriptor.
*/
static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
static int __i2c_hid_core_probe(struct i2c_hid *ihid)
{
struct i2c_client *client = ihid->client;
struct hid_device *hid = ihid->hid;
int ret;
ret = i2c_hid_core_power_up(ihid);
if (ret)
return ret;
/* Make sure there is something at this address */
ret = i2c_smbus_read_byte(client);
if (ret < 0) {
i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
ret = -ENXIO;
goto err;
return -ENXIO;
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0) {
dev_err(&client->dev,
"Failed to fetch the HID Descriptor\n");
goto err;
return ret;
}
enable_irq(client->irq);
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID);
@ -1050,17 +1034,49 @@ static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
return 0;
}
static int i2c_hid_core_register_hid(struct i2c_hid *ihid)
{
struct i2c_client *client = ihid->client;
struct hid_device *hid = ihid->hid;
int ret;
enable_irq(client->irq);
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
hid_err(client, "can't add hid device: %d\n", ret);
goto err;
disable_irq(client->irq);
return ret;
}
return 0;
}
err:
static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
{
int ret;
ret = i2c_hid_core_power_up(ihid);
if (ret)
return ret;
ret = __i2c_hid_core_probe(ihid);
if (ret)
goto err_power_down;
ret = i2c_hid_core_register_hid(ihid);
if (ret)
goto err_power_down;
return 0;
err_power_down:
i2c_hid_core_power_down(ihid);
return ret;
}
@ -1077,7 +1093,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
* steps.
*/
if (!hid->version)
ret = __do_i2c_hid_core_initial_power_up(ihid);
ret = i2c_hid_core_probe_panel_follower(ihid);
else
ret = i2c_hid_core_resume(ihid);
@ -1136,7 +1152,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
struct device *dev = &ihid->client->dev;
int ret;
ihid->is_panel_follower = true;
ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
/*
@ -1156,30 +1171,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
return 0;
}
static int i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
{
/*
* If we're a panel follower, we'll register and do our initial power
* up when the panel turns on; otherwise we do it right away.
*/
if (drm_is_panel_follower(&ihid->client->dev))
return i2c_hid_core_register_panel_follower(ihid);
else
return __do_i2c_hid_core_initial_power_up(ihid);
}
static void i2c_hid_core_final_power_down(struct i2c_hid *ihid)
{
/*
* If we're a follower, the act of unfollowing will cause us to be
* powered down. Otherwise we need to manually do it.
*/
if (ihid->is_panel_follower)
drm_panel_remove_follower(&ihid->panel_follower);
else
i2c_hid_core_suspend(ihid, true);
}
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
u16 hid_descriptor_address, u32 quirks)
{
@ -1211,6 +1202,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
ihid->ops = ops;
ihid->client = client;
ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address);
ihid->is_panel_follower = drm_is_panel_follower(&client->dev);
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->reset_lock);
@ -1224,14 +1216,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
return ret;
device_enable_async_suspend(&client->dev);
ret = i2c_hid_init_irq(client);
if (ret < 0)
goto err_buffers_allocated;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
ret = PTR_ERR(hid);
goto err_irq;
goto err_free_buffers;
}
ihid->hid = hid;
@ -1242,19 +1230,42 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
hid->bus = BUS_I2C;
hid->initial_quirks = quirks;
ret = i2c_hid_core_initial_power_up(ihid);
/* Power on and probe unless device is a panel follower. */
if (!ihid->is_panel_follower) {
ret = i2c_hid_core_power_up(ihid);
if (ret < 0)
goto err_destroy_device;
ret = __i2c_hid_core_probe(ihid);
if (ret < 0)
goto err_power_down;
}
ret = i2c_hid_init_irq(client);
if (ret < 0)
goto err_power_down;
/*
* If we're a panel follower, we'll register when the panel turns on;
* otherwise we do it right away.
*/
if (ihid->is_panel_follower)
ret = i2c_hid_core_register_panel_follower(ihid);
else
ret = i2c_hid_core_register_hid(ihid);
if (ret)
goto err_mem_free;
goto err_free_irq;
return 0;
err_mem_free:
hid_destroy_device(hid);
err_irq:
err_free_irq:
free_irq(client->irq, ihid);
err_buffers_allocated:
err_power_down:
if (!ihid->is_panel_follower)
i2c_hid_core_power_down(ihid);
err_destroy_device:
hid_destroy_device(hid);
err_free_buffers:
i2c_hid_free_buffers(ihid);
return ret;
@ -1266,7 +1277,14 @@ void i2c_hid_core_remove(struct i2c_client *client)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
i2c_hid_core_final_power_down(ihid);
/*
* If we're a follower, the act of unfollowing will cause us to be
* powered down. Otherwise we need to manually do it.
*/
if (ihid->is_panel_follower)
drm_panel_remove_follower(&ihid->panel_follower);
else
i2c_hid_core_suspend(ihid, true);
hid = ihid->hid;
hid_destroy_device(hid);

View File

@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev)
}
wakeup = &adev->wakeup;
/*
* Call acpi_disable_gpe(), so that reference count
* gpe_event_info->runtime_count doesn't overflow.
* When gpe_event_info->runtime_count = 0, the call
* to acpi_disable_gpe() simply return.
*/
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
if (ACPI_FAILURE(acpi_sts)) {
dev_err(dev, "enable ose_gpe failed\n");

View File

@ -4968,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
struct ib_sa_multicast ib;
struct ib_sa_multicast ib = {};
enum ib_gid_type gid_type;
bool send_only;

View File

@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
return -ENOMEM;
for (i = 0; i < ports_num; i++) {
char port_str[10];
char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);

View File

@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,

View File

@ -546,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
if (count < method_elm->req_size + sizeof(hdr)) {
if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't

View File

@ -910,6 +910,10 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
list_del(&qp->list);
mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->stats.res.qp_count);
if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
atomic_dec(&rdev->stats.res.rc_qp_count);
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);

View File

@ -665,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
crsqe = &rcfw->crsqe_tbl[cookie];
crsqe->is_in_used = false;
if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
&rcfw->cmdq.flags),
@ -681,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
atomic_dec(&rcfw->timeout_send);
if (crsqe->is_waiter_alive) {
if (crsqe->resp)
if (crsqe->resp) {
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
/* Insert write memory barrier to ensure that
* response data is copied before clearing the
* flags
*/
smp_wmb();
}
if (!blocked)
wait_cmds++;
}
@ -694,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
if (!is_waiter_alive)
crsqe->resp = NULL;
crsqe->is_in_used = false;
hwq->cons += req_size;
/* This is a case to handle below scenario -

View File

@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));

View File

@ -133,8 +133,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
{
struct erdma_pd *pd = to_epd(mr->ibmr.pd);
u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
struct erdma_cmdq_reg_mr_req req;
u32 mtt_level;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
@ -147,10 +147,9 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
mtt_level = mr->mem.mtt->level;
}
} else {
} else if (mr->type != ERDMA_MR_TYPE_DMA) {
memcpy(req.phy_addr, mr->mem.mtt->buf,
MTT_SIZE(mr->mem.page_cnt));
mtt_level = ERDMA_MR_MTT_0LEVEL;
}
req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
@ -655,7 +654,7 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
if (!mtt)
return NULL;
return ERR_PTR(-ENOMEM);
mtt->size = ALIGN(size, PAGE_SIZE);
mtt->buf = vzalloc(mtt->size);

View File

@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[11];
char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;

View File

@ -2470,8 +2470,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
mlx5_steering_anchor_destroy_res(ft_prio);
put_flow_table:
put_flow_table(dev, ft_prio, true);
mutex_unlock(&dev->flow_db->lock);
free_obj:
mutex_unlock(&dev->flow_db->lock);
kfree(obj);
return err;

View File

@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
return NULL;
return "Unknown";
}
}

View File

@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
if (!dev->cache.wq)
return;
cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
mutex_lock(&dev->cache.rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
ent->disabled = true;
xa_unlock_irq(&ent->mkeys);
cancel_delayed_work_sync(&ent->dwork);
}
mutex_unlock(&dev->cache.rb_lock);
/*
* After all entries are disabled and will not reschedule on WQ,
* flush it and all async commands.
*/
flush_workqueue(dev->cache.wq);
mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
/* At this point all entries are disabled and have no concurrent work. */
mutex_lock(&dev->cache.rb_lock);
node = rb_first(root);
while (node) {
ent = rb_entry(node, struct mlx5_cache_ent, node);

View File

@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
siw_dbg_cep(cep, "no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
if (cep->listen_cep) {
siw_dbg_cep(cep,
"no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
}
}
}
release_cep = 1;
@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
siw_dbg_cep(cep, "state: %d\n", cep->state);
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
cep->state, sk->sk_state);
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:

View File

@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
u32 tag;
u16 ch_idx;
struct srp_rdma_ch *ch;
int ret;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host,
"Sending SRP abort for tag %#x\n", tag);
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
SRP_TSK_ABORT_TASK, NULL) == 0)
ret = SUCCESS;
else if (target->rport->state == SRP_RPORT_LOST)
ret = FAST_IO_FAIL;
else
ret = FAILED;
if (ret == SUCCESS) {
SRP_TSK_ABORT_TASK, NULL) == 0) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
scsi_done(scmnd);
return SUCCESS;
}
if (target->rport->state == SRP_RPORT_LOST)
return FAST_IO_FAIL;
return ret;
return FAILED;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)

View File

@ -29,4 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
#endif /* _IRQ_GIC_COMMON_H */

View File

@ -44,10 +44,6 @@
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
#define RD_LOCAL_LPI_ENABLED BIT(0)
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
@ -4754,6 +4750,14 @@ static bool __maybe_unused its_enable_rk3588001(void *data)
return true;
}
static bool its_set_non_coherent(void *data)
{
struct its_node *its = data;
its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
return true;
}
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@ -4808,6 +4812,11 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_rk3588001,
},
#endif
{
.desc = "ITS: non-coherent attribute",
.property = "dma-noncoherent",
.init = its_set_non_coherent,
},
{
}
};
@ -4817,6 +4826,10 @@ static void its_enable_quirks(struct its_node *its)
u32 iidr = readl_relaxed(its->base + GITS_IIDR);
gic_enable_quirks(iidr, its_quirks, its);
if (is_of_node(its->fwnode_handle))
gic_enable_of_quirks(to_of_node(its->fwnode_handle),
its_quirks, its);
}
static int its_save_disable(void)
@ -4952,7 +4965,7 @@ static void __init __iomem *its_map_one(struct resource *res, int *err)
return NULL;
}
static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
static int its_init_domain(struct its_node *its)
{
struct irq_domain *inner_domain;
struct msi_domain_info *info;
@ -4966,7 +4979,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain = irq_domain_create_hierarchy(its_parent,
its->msi_domain_flags, 0,
handle, &its_domain_ops,
its->fwnode_handle, &its_domain_ops,
info);
if (!inner_domain) {
kfree(info);
@ -5017,8 +5030,7 @@ static int its_init_vpe_domain(void)
return 0;
}
static int __init its_compute_its_list_map(struct resource *res,
void __iomem *its_base)
static int __init its_compute_its_list_map(struct its_node *its)
{
int its_number;
u32 ctlr;
@ -5032,15 +5044,15 @@ static int __init its_compute_its_list_map(struct resource *res,
its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
if (its_number >= GICv4_ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
&res->start);
&its->phys_base);
return -EINVAL;
}
ctlr = readl_relaxed(its_base + GITS_CTLR);
ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr &= ~GITS_CTLR_ITS_NUMBER;
ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
writel_relaxed(ctlr, its_base + GITS_CTLR);
ctlr = readl_relaxed(its_base + GITS_CTLR);
writel_relaxed(ctlr, its->base + GITS_CTLR);
ctlr = readl_relaxed(its->base + GITS_CTLR);
if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
its_number = ctlr & GITS_CTLR_ITS_NUMBER;
its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
@ -5048,75 +5060,50 @@ static int __init its_compute_its_list_map(struct resource *res,
if (test_and_set_bit(its_number, &its_list_map)) {
pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
&res->start, its_number);
&its->phys_base, its_number);
return -EINVAL;
}
return its_number;
}
static int __init its_probe_one(struct resource *res,
struct fwnode_handle *handle, int numa_node)
static int __init its_probe_one(struct its_node *its)
{
struct its_node *its;
void __iomem *its_base;
u64 baser, tmp, typer;
u64 baser, tmp;
struct page *page;
u32 ctlr;
int err;
its_base = its_map_one(res, &err);
if (!its_base)
return err;
pr_info("ITS %pR\n", res);
its = kzalloc(sizeof(*its), GFP_KERNEL);
if (!its) {
err = -ENOMEM;
goto out_unmap;
}
raw_spin_lock_init(&its->lock);
mutex_init(&its->dev_alloc_lock);
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
its->typer = typer;
its->base = its_base;
its->phys_base = res->start;
if (is_v4(its)) {
if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base);
if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its);
if (err < 0)
goto out_free_its;
goto out;
its->list_nr = err;
pr_info("ITS@%pa: Using ITS number %d\n",
&res->start, err);
&its->phys_base, err);
} else {
pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
}
if (is_v4_1(its)) {
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
if (!its->sgir_base) {
err = -ENOMEM;
goto out_free_its;
goto out;
}
its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
&res->start, its->mpidr, svpet);
&its->phys_base, its->mpidr, svpet);
}
}
its->numa_node = numa_node;
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
@ -5125,12 +5112,9 @@ static int __init its_probe_one(struct resource *res,
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
its->fwnode_handle = handle;
its->get_msi_base = its_irq_get_msi_base;
its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
its_enable_quirks(its);
err = its_alloc_tables(its);
if (err)
goto out_free_cmd;
@ -5174,7 +5158,7 @@ static int __init its_probe_one(struct resource *res,
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
err = its_init_domain(its);
if (err)
goto out_free_tables;
@ -5191,11 +5175,8 @@ static int __init its_probe_one(struct resource *res,
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
out_free_its:
kfree(its);
out_unmap:
iounmap(its_base);
pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
out:
pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
return err;
}
@ -5356,10 +5337,53 @@ static const struct of_device_id its_device_id[] = {
{},
};
static struct its_node __init *its_node_init(struct resource *res,
struct fwnode_handle *handle, int numa_node)
{
void __iomem *its_base;
struct its_node *its;
int err;
its_base = its_map_one(res, &err);
if (!its_base)
return NULL;
pr_info("ITS %pR\n", res);
its = kzalloc(sizeof(*its), GFP_KERNEL);
if (!its)
goto out_unmap;
raw_spin_lock_init(&its->lock);
mutex_init(&its->dev_alloc_lock);
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
its->typer = gic_read_typer(its_base + GITS_TYPER);
its->base = its_base;
its->phys_base = res->start;
its->numa_node = numa_node;
its->fwnode_handle = handle;
return its;
out_unmap:
iounmap(its_base);
return NULL;
}
static void its_node_destroy(struct its_node *its)
{
iounmap(its->base);
kfree(its);
}
static int __init its_of_probe(struct device_node *node)
{
struct device_node *np;
struct resource res;
int err;
/*
* Make sure *all* the ITS are reset before we probe any, as
@ -5369,8 +5393,6 @@ static int __init its_of_probe(struct device_node *node)
*/
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
int err;
if (!of_device_is_available(np) ||
!of_property_read_bool(np, "msi-controller") ||
of_address_to_resource(np, 0, &res))
@ -5383,6 +5405,8 @@ static int __init its_of_probe(struct device_node *node)
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
struct its_node *its;
if (!of_device_is_available(np))
continue;
if (!of_property_read_bool(np, "msi-controller")) {
@ -5396,7 +5420,17 @@ static int __init its_of_probe(struct device_node *node)
continue;
}
its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
if (!its)
return -ENOMEM;
its_enable_quirks(its);
err = its_probe_one(its);
if (err) {
its_node_destroy(its);
return err;
}
}
return 0;
}
@ -5508,6 +5542,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
{
struct acpi_madt_generic_translator *its_entry;
struct fwnode_handle *dom_handle;
struct its_node *its;
struct resource res;
int err;
@ -5532,11 +5567,18 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto dom_err;
}
err = its_probe_one(&res, dom_handle,
acpi_get_its_numa_node(its_entry->translation_id));
its = its_node_init(&res, dom_handle,
acpi_get_its_numa_node(its_entry->translation_id));
if (!its) {
err = -ENOMEM;
goto node_err;
}
err = its_probe_one(its);
if (!err)
return 0;
node_err:
iort_deregister_domain_token(its_entry->translation_id);
dom_err:
irq_domain_free_fwnode(dom_handle);

View File

@ -1857,6 +1857,14 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
static bool rd_set_non_coherent(void *data)
{
struct gic_chip_data *d = data;
d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
return true;
}
static const struct gic_quirk gic_quirks[] = {
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
@ -1923,6 +1931,11 @@ static const struct gic_quirk gic_quirks[] = {
.mask = 0xff0f0fff,
.init = gic_enable_quirk_arm64_2941627,
},
{
.desc = "GICv3: non-coherent attribute",
.property = "dma-noncoherent",
.init = rd_set_non_coherent,
},
{
}
};

View File

@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
reg &= ~(TSSEL_MASK << tssr_offset);
reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@ -130,8 +130,8 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned long tint = (uintptr_t)d->chip_data;
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);

View File

@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node,
* for each INTC DT node. We only need to do INTC initialization
* for the INTC DT node belonging to boot CPU (or boot HART).
*/
if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
/*
* The INTC nodes of each CPU are suppliers for downstream
* interrupt controllers (such as PLIC, IMSIC and APLIC
* direct-mode) so we should mark an INTC node as initialized
* if we are not creating IRQ domain for it.
*/
fwnode_dev_initialized(of_fwnode_handle(node), true);
return 0;
}
return riscv_intc_init_common(of_node_to_fwnode(node));
}

View File

@ -460,6 +460,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
.map = irq_map_generic_chip,
.alloc = stm32_exti_alloc,
.free = stm32_exti_free,
.xlate = irq_domain_xlate_twocell,
};
static void stm32_irq_ack(struct irq_data *d)

View File

@ -22,9 +22,20 @@
#define PDC_MAX_GPIO_IRQS 256
/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
/* Valid only on HW version >= 3.2 */
#define IRQ_i_CFG_IRQ_ENABLE 3
#define IRQ_i_CFG_TYPE_MASK GENMASK(2, 0)
#define PDC_VERSION_REG 0x1000
/* Notable PDC versions */
#define PDC_VERSION_3_2 0x30200
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
@ -37,6 +48,7 @@ static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static unsigned int pdc_version;
static void pdc_reg_write(int reg, u32 i, u32 val)
{
@ -48,20 +60,32 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
static void __pdc_enable_intr(int pin_out, bool on)
{
unsigned long enable;
if (pdc_version < PDC_VERSION_3_2) {
u32 index, mask;
index = pin_out / 32;
mask = pin_out % 32;
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
} else {
enable = pdc_reg_read(IRQ_i_CFG, pin_out);
__assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
pdc_reg_write(IRQ_i_CFG, pin_out, enable);
}
}
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
unsigned long enable;
unsigned long flags;
u32 index, mask;
index = pin_out / 32;
mask = pin_out % 32;
raw_spin_lock_irqsave(&pdc_lock, flags);
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
__pdc_enable_intr(d->hwirq, on);
raw_spin_unlock_irqrestore(&pdc_lock, flags);
}
@ -142,6 +166,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
}
old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK);
pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
ret = irq_chip_set_type_parent(d, type);
@ -246,7 +271,6 @@ static const struct irq_domain_ops qcom_pdc_ops = {
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n, i;
u32 irq_index, reg_index, val;
n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
if (n <= 0 || n % 3)
@ -276,29 +300,38 @@ static int pdc_setup_pin_mapping(struct device_node *np)
if (ret)
return ret;
for (i = 0; i < pdc_region[n].cnt; i++) {
reg_index = (i + pdc_region[n].pin_base) >> 5;
irq_index = (i + pdc_region[n].pin_base) & 0x1f;
val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index);
val &= ~BIT(irq_index);
pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val);
}
for (i = 0; i < pdc_region[n].cnt; i++)
__pdc_enable_intr(i + pdc_region[n].pin_base, 0);
}
return 0;
}
#define QCOM_PDC_SIZE 0x30000
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *parent_domain, *pdc_domain;
resource_size_t res_size;
struct resource res;
int ret;
pdc_base = of_iomap(node, 0);
/* compat with old sm8150 DT which had very small region for PDC */
if (of_address_to_resource(node, 0, &res))
return -EINVAL;
res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE);
if (res_size > resource_size(&res))
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
return -ENXIO;
}
pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%pOF: unable to find PDC's parent domain\n", node);

View File

@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
int err;
u8 *buf;
reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm);
reqsize = ALIGN(reqsize, __alignof__(__le64));
req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
if (!req)

View File

@ -748,17 +748,16 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
/*
* Cleanup zoned device information.
*/
static void dmz_put_zoned_device(struct dm_target *ti)
static void dmz_put_zoned_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
int i;
for (i = 0; i < dmz->nr_ddevs; i++) {
if (dmz->ddev[i]) {
for (i = 0; i < dmz->nr_ddevs; i++)
if (dmz->ddev[i])
dm_put_device(ti, dmz->ddev[i]);
dmz->ddev[i] = NULL;
}
}
kfree(dmz->ddev);
}
static int dmz_fixup_devices(struct dm_target *ti)
@ -948,7 +947,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
err:
kfree(dmz->dev);
kfree(dmz);
@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
bioset_exit(&dmz->bio_set);
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
mutex_destroy(&dmz->chunk_lock);

View File

@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0);
/* release batch_last before wait to avoid risk of deadlock */
if (ctx && ctx->batch_last) {
raid5_release_stripe(ctx->batch_last);
ctx->batch_last = NULL;
}
wait_event_lock_irq(conf->wait_for_stripe,
is_inactive_blocked(conf, hash),
*(conf->hash_locks + hash));

Some files were not shown because too many files have changed in this diff Show More