dmaengine updates for v6.3

New support:
  - TI AM62Ax controller support
  - Xilinx xdma driver
  - Qualcomm SM6125, SM8550, QDU1000/QRU1000 GPI controller
 
  Updates:
  - Runtime pm support for at_xdmac driver
  - IMX sdma binding conversion to yaml and HDMI audio support
  - IMX mxs binding conversion to yaml
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmP4uMkACgkQfBQHDyUj
 g0fXvhAAstzwh2KirInbEleqmH7PnjMrWut2F8oTCeFJjPUXRKx3HtXW5JyDLeT4
 6iXhEknWnkvbbEQf1EwZNfK4xC+xNvEU7XvNdhm23kyXqYJBRwiq5cjqapLLr9Vf
 XDRRZ1CK/Zhxqhk0kuOnQIsGLciAVFcXhoswjzlnaGL4yGLkZcWB8bmO1JOzITi8
 GqQD9wx+dEixSg3004qbE8GceC4TGq375fFiHVTiNO878pKK+vQF9DxsEabchrIK
 8HPCF8jVZ0A+6gDYysJMjn81cPK6AdnbIhJnvfaF7N3fdl2QUDSweM9NbF/06B2B
 E5G/NOrWe5NrSWoJbXz9U5t7Cg3pZktreLjUlREM2/i+jmqyUC3FMICQYpUneAnk
 aUSr2hRBYlbIEJZJhmSMczUfUOqdPFt5mY2bUwvtSfCKf1we5wl9oiC72IvRthmL
 hSCs6RQxczM62u7ocjH/T9j2n3zy+vcxijCuIac2Y6FEHaURq/7wcfDypePg37zi
 Kf7awaRHE0WOiXj1kc+fsEhkEJ+xEjxiJnCMzFb9jG1A0RBgtsNq6HXCgmd2O/R7
 b2Q7hUt3rr9qr4uHTGz0Ure+nkVB7Zz0eCVfNQQ3ypw2DibGc1WhjzpGiEq0tlYK
 20wKtbWIFy/MFjIOiZiQTGL2vGnDgjq4DRZgtPKdz60uCGgrM6s=
 =xtfI
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "A new driver, couple of device support and binding conversion along
  with bunch of driver updates are the main features of this.

  New hardware support:

   - TI AM62Ax controller support

   - Xilinx xdma driver

   - Qualcomm SM6125, SM8550, QDU1000/QRU1000 GPI controller

  Updates:

   - Runtime pm support for at_xdmac driver

   - IMX sdma binding conversion to yaml and HDMI audio support

   - IMX mxs binding conversion to yaml"

* tag 'dmaengine-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (35 commits)
  dmaengine: idma64: Update bytes_transferred field
  dmaengine: imx-sdma: Set DMA channel to be private
  dmaengine: dw: Move check for paused channel to dwc_get_residue()
  dmaengine: ptdma: check for null desc before calling pt_cmd_callback
  dmaengine: dw-axi-dmac: Do not dereference NULL structure
  dmaengine: idxd: Fix default allowed read buffers value in group
  dmaengine: sf-pdma: pdma_desc memory leak fix
  dmaengine: Simplify dmaenginem_async_device_register() function
  dmaengine: use sysfs_emit() to instead of scnprintf()
  dmaengine: Make an order in struct dma_device definition
  dt-bindings: dma: cleanup examples - indentation, lowercase hex
  dt-bindings: dma: drop unneeded quotes
  dmaengine: xilinx: xdma: Add user logic interrupt support
  dmaengine: xilinx: xdma: Add xilinx xdma driver
  dmaengine: drivers: Use devm_platform_ioremap_resource()
  dmaengine: at_xdmac: remove empty line
  dmaengine: at_xdmac: add runtime pm support
  dmaengine: at_xdmac: align properly function members
  dmaengine: ppc4xx: Convert to use sysfs_emit()/sysfs_emit_at() APIs
  dmaengine: sun6i: Set the maximum segment size
  ...
This commit is contained in:
Linus Torvalds 2023-02-24 17:18:54 -08:00
commit 9e6bfd42b1
88 changed files with 2120 additions and 446 deletions

View file

@ -11,7 +11,7 @@ maintainers:
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":

View file

@ -11,7 +11,7 @@ maintainers:
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":

View file

@ -11,7 +11,7 @@ maintainers:
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":

View file

@ -14,7 +14,7 @@ description: |
intellectual property (IP)
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -18,7 +18,7 @@ maintainers:
- Martin Povišer <povik+lin@cutebit.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -11,7 +11,7 @@ maintainers:
allOf:
- $ref: /schemas/arm/primecell.yaml#
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
# We need a select here so we don't match all nodes with 'arm,primecell'
select:

View file

@ -10,7 +10,7 @@ maintainers:
- Vinod Koul <vkoul@kernel.org>
allOf:
- $ref: "dma-common.yaml#"
- $ref: dma-common.yaml#
# Everything else is described in the common file
properties:

View file

@ -10,7 +10,7 @@ maintainers:
- Vinod Koul <vkoul@kernel.org>
allOf:
- $ref: "dma-common.yaml#"
- $ref: dma-common.yaml#
description:
DMA routers are transparent IP blocks used to route DMA request

View file

@ -64,7 +64,7 @@ required:
- dma-channels
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
- if:
properties:
compatible:

View file

@ -0,0 +1,149 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/fsl,imx-sdma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
maintainers:
- Joy Zou <joy.zou@nxp.com>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
oneOf:
- items:
- enum:
- fsl,imx50-sdma
- fsl,imx51-sdma
- fsl,imx53-sdma
- fsl,imx6q-sdma
- fsl,imx7d-sdma
- const: fsl,imx35-sdma
- items:
- enum:
- fsl,imx6sx-sdma
- fsl,imx6sl-sdma
- const: fsl,imx6q-sdma
- items:
- const: fsl,imx6ul-sdma
- const: fsl,imx6q-sdma
- const: fsl,imx35-sdma
- items:
- const: fsl,imx6sll-sdma
- const: fsl,imx6ul-sdma
- items:
- const: fsl,imx8mq-sdma
- const: fsl,imx7d-sdma
- items:
- enum:
- fsl,imx8mp-sdma
- fsl,imx8mn-sdma
- fsl,imx8mm-sdma
- const: fsl,imx8mq-sdma
- items:
- enum:
- fsl,imx25-sdma
- fsl,imx31-sdma
- fsl,imx35-sdma
reg:
maxItems: 1
interrupts:
maxItems: 1
fsl,sdma-ram-script-name:
$ref: /schemas/types.yaml#/definitions/string
description: Should contain the full path of SDMA RAM scripts firmware.
"#dma-cells":
const: 3
description: |
The first cell: request/event ID
The second cell: peripheral types ID
enum:
- MCU domain SSI: 0
- Shared SSI: 1
- MMC: 2
- SDHC: 3
- MCU domain UART: 4
- Shared UART: 5
- FIRI: 6
- MCU domain CSPI: 7
- Shared CSPI: 8
- SIM: 9
- ATA: 10
- CCM: 11
- External peripheral: 12
- Memory Stick Host Controller: 13
- Shared Memory Stick Host Controller: 14
- DSP: 15
- Memory: 16
- FIFO type Memory: 17
- SPDIF: 18
- IPU Memory: 19
- ASRC: 20
- ESAI: 21
- SSI Dual FIFO: 22
description: needs firmware more than ver 2
- Shared ASRC: 23
- SAI: 24
- HDMI Audio: 25
The third cell: transfer priority ID
enum:
- High: 0
- Medium: 1
- Low: 2
gpr:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle to the General Purpose Register (GPR) node
fsl,sdma-event-remap:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
maxItems: 2
items:
items:
- description: GPR register offset
- description: GPR register shift
- description: GPR register value
description: |
Register bits of sdma event remap, the format is <reg shift val>.
The order is <RX>, <TX>.
clocks:
maxItems: 2
clock-names:
items:
- const: ipg
- const: ahb
iram:
$ref: /schemas/types.yaml#/definitions/phandle
description: The phandle to the On-chip RAM (OCRAM) node.
required:
- compatible
- reg
- interrupts
- fsl,sdma-ram-script-name
additionalProperties: false
examples:
- |
sdma: dma-controller@83fb0000 {
compatible = "fsl,imx51-sdma", "fsl,imx35-sdma";
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
#dma-cells = <3>;
fsl,sdma-ram-script-name = "sdma-imx51.bin";
};
...

View file

@ -0,0 +1,80 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/fsl,mxs-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale Direct Memory Access (DMA) Controller from i.MX23/i.MX28
maintainers:
- Marek Vasut <marex@denx.de>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
oneOf:
- items:
- enum:
- fsl,imx6q-dma-apbh
- fsl,imx6sx-dma-apbh
- fsl,imx7d-dma-apbh
- const: fsl,imx28-dma-apbh
- enum:
- fsl,imx23-dma-apbh
- fsl,imx23-dma-apbx
- fsl,imx28-dma-apbh
- fsl,imx28-dma-apbx
reg:
maxItems: 1
clocks:
maxItems: 1
interrupts:
minItems: 4
maxItems: 16
"#dma-cells":
const: 1
dma-channels:
enum: [4, 8, 16]
required:
- compatible
- reg
- "#dma-cells"
- dma-channels
- interrupts
additionalProperties: false
examples:
- |
interrupt-parent = <&irqc>;
dma-controller@80004000 {
compatible = "fsl,imx28-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <82 83 84 85
88 88 88 88
88 88 88 88
87 86 0 0>;
#dma-cells = <1>;
dma-channels = <16>;
};
dma-controller@80024000 {
compatible = "fsl,imx28-dma-apbx";
reg = <0x80024000 0x2000>;
interrupts = <78 79 66 0
80 81 68 69
70 71 72 73
74 75 76 77>;
#dma-cells = <1>;
dma-channels = <16>;
};
...

View file

@ -1,118 +0,0 @@
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
Required properties:
- compatible : Should be one of
"fsl,imx25-sdma"
"fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma"
"fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma"
"fsl,imx51-sdma"
"fsl,imx53-sdma"
"fsl,imx6q-sdma"
"fsl,imx7d-sdma"
"fsl,imx6ul-sdma"
"fsl,imx8mq-sdma"
"fsl,imx8mm-sdma"
"fsl,imx8mn-sdma"
"fsl,imx8mp-sdma"
The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional
firmware.
- reg : Should contain SDMA registers location and length
- interrupts : Should contain SDMA interrupt
- #dma-cells : Must be <3>.
The first cell specifies the DMA request/event ID. See details below
about the second and third cell.
- fsl,sdma-ram-script-name : Should contain the full path of SDMA RAM
scripts firmware
The second cell of dma phandle specifies the peripheral type of DMA transfer.
The full ID of peripheral types can be found below.
ID transfer type
---------------------
0 MCU domain SSI
1 Shared SSI
2 MMC
3 SDHC
4 MCU domain UART
5 Shared UART
6 FIRI
7 MCU domain CSPI
8 Shared CSPI
9 SIM
10 ATA
11 CCM
12 External peripheral
13 Memory Stick Host Controller
14 Shared Memory Stick Host Controller
15 DSP
16 Memory
17 FIFO type Memory
18 SPDIF
19 IPU Memory
20 ASRC
21 ESAI
22 SSI Dual FIFO (needs firmware ver >= 2)
23 Shared ASRC
24 SAI
The third cell specifies the transfer priority as below.
ID transfer priority
-------------------------
0 High
1 Medium
2 Low
Optional properties:
- gpr : The phandle to the General Purpose Register (GPR) node.
- fsl,sdma-event-remap : Register bits of sdma event remap, the format is
<reg shift val>.
reg is the GPR register offset.
shift is the bit position inside the GPR register.
val is the value of the bit (0 or 1).
Examples:
sdma@83fb0000 {
compatible = "fsl,imx51-sdma", "fsl,imx35-sdma";
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
#dma-cells = <3>;
fsl,sdma-ram-script-name = "sdma-imx51.bin";
};
DMA clients connected to the i.MX SDMA controller must use the format
described in the dma.txt file.
Examples:
ssi2: ssi@70014000 {
compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
reg = <0x70014000 0x4000>;
interrupts = <30>;
clocks = <&clks 49>;
dmas = <&sdma 24 1 0>,
<&sdma 25 1 0>;
dma-names = "rx", "tx";
fsl,fifo-depth = <15>;
};
Using the fsl,sdma-event-remap property:
If we want to use SDMA on the SAI1 port on a MX6SX:
&sdma {
gpr = <&gpr>;
/* SDMA events remap for SAI1_RX and SAI1_TX */
fsl,sdma-event-remap = <0 15 1>, <0 16 1>;
};
The fsl,sdma-event-remap property in this case has two values:
- <0 15 1> means that the offset is 0, so GPR0 is the register of the
SDMA remap. Bit 15 of GPR0 selects between UART4_RX and SAI1_RX.
Setting bit 15 to 1 selects SAI1_RX.
- <0 16 1> means that the offset is 0, so GPR0 is the register of the
SDMA remap. Bit 16 of GPR0 selects between UART4_TX and SAI1_TX.
Setting bit 16 to 1 selects SAI1_TX.

View file

@ -1,60 +0,0 @@
* Freescale MXS DMA
Required properties:
- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
- reg : Should contain registers location and length
- interrupts : Should contain the interrupt numbers of DMA channels.
If a channel is empty/reserved, 0 should be filled in place.
- #dma-cells : Must be <1>. The number cell specifies the channel ID.
- dma-channels : Number of channels supported by the DMA controller
Optional properties:
- interrupt-names : Name of DMA channel interrupts
Supported chips:
imx23, imx28.
Examples:
dma_apbh: dma-apbh@80004000 {
compatible = "fsl,imx28-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <82 83 84 85
88 88 88 88
88 88 88 88
87 86 0 0>;
interrupt-names = "ssp0", "ssp1", "ssp2", "ssp3",
"gpmi0", "gmpi1", "gpmi2", "gmpi3",
"gpmi4", "gmpi5", "gpmi6", "gmpi7",
"hsadc", "lcdif", "empty", "empty";
#dma-cells = <1>;
dma-channels = <16>;
};
dma_apbx: dma-apbx@80024000 {
compatible = "fsl,imx28-dma-apbx";
reg = <0x80024000 0x2000>;
interrupts = <78 79 66 0
80 81 68 69
70 71 72 73
74 75 76 77>;
interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
"saif0", "saif1", "i2c0", "i2c1",
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
#dma-cells = <1>;
dma-channels = <16>;
};
DMA clients connected to the MXS DMA controller must use the format
described in the dma.txt file.
Examples:
auart0: serial@8006a000 {
compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x8006a000 0x2000>;
interrupts = <112>;
dmas = <&dma_apbx 8>, <&dma_apbx 9>;
dma-names = "rx", "tx";
};

View file

@ -10,7 +10,7 @@ maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -11,7 +11,7 @@ maintainers:
- mallikarjunax.reddy@intel.com
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -14,7 +14,7 @@ description: |
for the UART peripheral bus.
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -16,7 +16,7 @@ maintainers:
- Rajesh Gumasta <rgumasta@nvidia.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -14,7 +14,7 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -15,7 +15,7 @@ maintainers:
- Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -11,7 +11,7 @@ maintainers:
- Bjorn Andersson <andersson@kernel.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -14,7 +14,7 @@ description: |
peripheral buses such as I2C, UART, and SPI.
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:
@ -24,15 +24,18 @@ properties:
- qcom,sm6350-gpi-dma
- items:
- enum:
- qcom,qdu1000-gpi-dma
- qcom,sc7280-gpi-dma
- qcom,sm6115-gpi-dma
- qcom,sm6375-gpi-dma
- qcom,sm8350-gpi-dma
- qcom,sm8450-gpi-dma
- qcom,sm8550-gpi-dma
- const: qcom,sm6350-gpi-dma
- items:
- enum:
- qcom,sdm670-gpi-dma
- qcom,sm6125-gpi-dma
- qcom,sm8150-gpi-dma
- qcom,sm8250-gpi-dma
- const: qcom,sdm845-gpi-dma

View file

@ -10,7 +10,7 @@ maintainers:
- Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -10,7 +10,7 @@ maintainers:
- Biju Das <biju.das.jz@bp.renesas.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -10,7 +10,7 @@ maintainers:
- Miquel Raynal <miquel.raynal@bootlin.com>
allOf:
- $ref: "dma-router.yaml#"
- $ref: dma-router.yaml#
properties:
compatible:

View file

@ -10,7 +10,7 @@ maintainers:
- Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -23,7 +23,7 @@ description: |
https://static.dev.sifive.com/FU540-C000-v1.0.pdf
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -11,7 +11,7 @@ maintainers:
- Andy Shevchenko <andriy.shevchenko@linux.intel.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -13,7 +13,7 @@ description:
Synopsys DesignWare AXI DMA Controller DT Binding
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:
@ -113,21 +113,21 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
/* example with snps,dw-axi-dmac */
dmac: dma-controller@80000 {
compatible = "snps,axi-dma-1.01a";
reg = <0x80000 0x400>;
clocks = <&core_clk>, <&cfgr_clk>;
clock-names = "core-clk", "cfgr-clk";
interrupt-parent = <&intc>;
interrupts = <27>;
#dma-cells = <1>;
dma-channels = <4>;
snps,dma-masters = <2>;
snps,data-width = <3>;
snps,block-size = <4096 4096 4096 4096>;
snps,priority = <0 1 2 3>;
snps,axi-max-burst-len = <16>;
};
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
/* example with snps,dw-axi-dmac */
dma-controller@80000 {
compatible = "snps,axi-dma-1.01a";
reg = <0x80000 0x400>;
clocks = <&core_clk>, <&cfgr_clk>;
clock-names = "core-clk", "cfgr-clk";
interrupt-parent = <&intc>;
interrupts = <27>;
#dma-cells = <1>;
dma-channels = <4>;
snps,dma-masters = <2>;
snps,data-width = <3>;
snps,block-size = <4096 4096 4096 4096>;
snps,priority = <0 1 2 3>;
snps,axi-max-burst-len = <16>;
};

View file

@ -14,7 +14,7 @@ maintainers:
- Masahiro Yamada <yamada.masahiro@socionext.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -15,7 +15,7 @@ maintainers:
- Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
compatible:

View file

@ -53,7 +53,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":

View file

@ -10,7 +10,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- $ref: "dma-router.yaml#"
- $ref: dma-router.yaml#
properties:
"#dma-cells":

View file

@ -53,7 +53,7 @@ maintainers:
- Amelie Delaunay <amelie.delaunay@foss.st.com>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":

View file

@ -10,7 +10,7 @@ maintainers:
- Linus Walleij <linus.walleij@linaro.org>
allOf:
- $ref: "dma-controller.yaml#"
- $ref: dma-controller.yaml#
properties:
"#dma-cells":
@ -147,13 +147,13 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mfd/dbx500-prcmu.h>
dma-controller@801C0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
reg = <0x801C0000 0x1000>, <0x40010000 0x800>;
reg-names = "base", "lcpa";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;
clocks = <&prcmu_clk PRCMU_DMACLK>;
dma-controller@801c0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
reg = <0x801c0000 0x1000>, <0x40010000 0x800>;
reg-names = "base", "lcpa";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;
clocks = <&prcmu_clk PRCMU_DMACLK>;
};
...

View file

@ -28,13 +28,19 @@ description: |
PDMAs can be configured via BCDMA split channel's peer registers to match with
the configuration of the legacy peripheral.
allOf:
- $ref: /schemas/dma/dma-controller.yaml#
- $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
properties:
compatible:
const: ti,am64-dmss-bcdma
enum:
- ti,am62a-dmss-bcdma-csirx
- ti,am64-dmss-bcdma
reg:
minItems: 3
maxItems: 5
reg-names:
minItems: 3
maxItems: 5
"#dma-cells":
const: 3
@ -65,19 +71,13 @@ properties:
cell 3: ASEL value for the channel
reg:
maxItems: 5
reg-names:
items:
- const: gcfg
- const: bchanrt
- const: rchanrt
- const: tchanrt
- const: ringrt
msi-parent: true
power-domains:
description:
Power domain if available
maxItems: 1
ti,asel:
$ref: /schemas/types.yaml#/definitions/uint32
description: ASEL value for non slave channels
@ -123,10 +123,51 @@ required:
- msi-parent
- ti,sci
- ti,sci-dev-id
- ti,sci-rm-range-bchan
- ti,sci-rm-range-tchan
- ti,sci-rm-range-rchan
allOf:
- $ref: /schemas/dma/dma-controller.yaml#
- $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
- if:
properties:
compatible:
contains:
const: ti,am62a-dmss-bcdma-csirx
then:
properties:
ti,sci-rm-range-bchan: false
ti,sci-rm-range-tchan: false
reg:
maxItems: 3
reg-names:
items:
- const: gcfg
- const: rchanrt
- const: ringrt
required:
- power-domains
else:
properties:
reg:
minItems: 5
reg-names:
items:
- const: gcfg
- const: bchanrt
- const: rchanrt
- const: tchanrt
- const: ringrt
required:
- ti,sci-rm-range-bchan
- ti,sci-rm-range-tchan
unevaluatedProperties: false
examples:

View file

@ -22946,6 +22946,17 @@ F: Documentation/devicetree/bindings/media/xilinx/
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
XILINX XDMA DRIVER
M: Lizhi Hou <lizhi.hou@amd.com>
M: Brian Xu <brian.xu@amd.com>
M: Raj Kumar Rampelli <raj.kumar.rampelli@amd.com>
L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/xilinx/xdma-regs.h
F: drivers/dma/xilinx/xdma.c
F: include/linux/dma/amd_xdma.h
F: include/linux/platform_data/amd_xdma.h
XILINX ZYNQMP DPDMA DRIVER
M: Hyun Kwon <hyun.kwon@xilinx.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>

View file

@ -245,7 +245,7 @@ config FSL_RAID
config HISI_DMA
tristate "HiSilicon DMA Engine support"
depends on ARM64 || COMPILE_TEST
depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@ -716,6 +716,20 @@ config XILINX_DMA
the scatter gather interface with multiple channels independent
configuration support.
config XILINX_XDMA
tristate "Xilinx DMA/Bridge Subsystem DMA Engine"
depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select REGMAP_MMIO
help
Enable support for Xilinx DMA/Bridge Subsystem DMA engine. The DMA
provides high performance block data movement between Host memory
and the DMA subsystem. These direct memory transfers can be both in
the Host to Card (H2C) and Card to Host (C2H) transfers.
The core also provides up to 16 user interrupt wires that generate
interrupts to the host.
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST

View file

@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "dmaengine.h"
@ -240,6 +241,7 @@ struct at_xdmac_chan {
struct at_xdmac {
struct dma_device dma;
void __iomem *regs;
struct device *dev;
int irq;
struct clk *clk;
u32 save_gim;
@ -361,13 +363,65 @@ MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc;
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
if (!desc->active_xfer)
continue;
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
}
}
static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc;
int ret;
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
if (!desc->active_xfer)
continue;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return ret;
}
return 0;
}
static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
{
return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return false;
ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return ret;
}
static void at_xdmac_off(struct at_xdmac *atxdmac)
{
struct dma_chan *chan, *_chan;
struct at_xdmac_chan *atchan;
int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return;
at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
/* Wait that all chans are disabled. */
@ -375,6 +429,18 @@ static void at_xdmac_off(struct at_xdmac *atxdmac)
cpu_relax();
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
/* Decrement runtime PM ref counter for each active descriptor. */
if (!list_empty(&atxdmac->dma.channels)) {
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
device_node) {
atchan = to_at_xdmac_chan(chan);
at_xdmac_runtime_suspend_descriptors(atchan);
}
}
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
}
/* Call with lock hold. */
@ -383,6 +449,11 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
u32 reg;
int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return;
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
@ -462,7 +533,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
}
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
@ -1456,14 +1526,14 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
static enum dma_status
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
struct dma_tx_state *txstate)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
int residue, retry, pm_status;
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@ -1473,6 +1543,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if (ret == DMA_COMPLETE || !txstate)
return ret;
pm_status = pm_runtime_resume_and_get(atxdmac->dev);
if (pm_status < 0)
return DMA_ERROR;
spin_lock_irqsave(&atchan->lock, flags);
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
@ -1590,6 +1664,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return ret;
}
@ -1636,6 +1712,11 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *bad_desc;
int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return;
/*
* The descriptor currently at the head of the active list is
@ -1665,12 +1746,16 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
/* Then continue with usual descriptor management */
}
static void at_xdmac_tasklet(struct tasklet_struct *t)
{
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
u32 error_mask;
@ -1720,6 +1805,13 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
at_xdmac_advance_work(atchan);
spin_unlock_irq(&atchan->lock);
/*
* Decrement runtime PM ref counter incremented in
* at_xdmac_start_xfer().
*/
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
@ -1811,19 +1903,31 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
return 0;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
cpu_relax();
/* Decrement runtime PM ref counter for each active descriptor. */
at_xdmac_runtime_suspend_descriptors(atchan);
spin_unlock_irqrestore(&atchan->lock, flags);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return 0;
}
@ -1832,20 +1936,32 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
if (!at_xdmac_chan_is_paused(atchan)) {
spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
}
if (!at_xdmac_chan_is_paused(atchan))
goto unlock;
/* Increment runtime PM ref counter for each active descriptor. */
ret = at_xdmac_runtime_resume_descriptors(atchan);
if (ret < 0)
goto unlock;
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
return 0;
unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return ret;
}
static int at_xdmac_device_terminate_all(struct dma_chan *chan)
@ -1854,9 +1970,14 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@ -1867,12 +1988,24 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_del(&desc->xfer_node);
list_splice_tail_init(&desc->descs_list,
&atchan->free_descs_list);
/*
* We incremented the runtime PM reference count on
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
if (desc->active_xfer) {
pm_runtime_put_autosuspend(atxdmac->dev);
pm_runtime_mark_last_busy(atxdmac->dev);
}
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return 0;
}
@ -1974,6 +2107,11 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
{
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev);
if (ret < 0)
return ret;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
@ -1986,12 +2124,13 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
}
at_xdmac_runtime_suspend_descriptors(atchan);
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
return 0;
return pm_runtime_force_suspend(atxdmac->dev);
}
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
@ -2003,8 +2142,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
int i;
int ret;
ret = clk_prepare_enable(atxdmac->clk);
if (ret)
ret = pm_runtime_force_resume(atxdmac->dev);
if (ret < 0)
return ret;
at_xdmac_axi_config(pdev);
@ -2019,6 +2158,11 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
ret = at_xdmac_runtime_resume_descriptors(atchan);
if (ret < 0)
return ret;
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) {
if (at_xdmac_chan_is_paused(atchan))
@ -2030,9 +2174,29 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
pm_runtime_mark_last_busy(atxdmac->dev);
pm_runtime_put_autosuspend(atxdmac->dev);
return 0;
}
static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
{
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
clk_disable(atxdmac->clk);
return 0;
}
static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
{
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
return clk_enable(atxdmac->clk);
}
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
@ -2071,6 +2235,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->regs = base;
atxdmac->irq = irq;
atxdmac->dev = &pdev->dev;
atxdmac->layout = of_device_get_match_data(&pdev->dev);
if (!atxdmac->layout)
@ -2135,11 +2300,20 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
/* Disable all chans and interrupts. */
at_xdmac_off(atxdmac);
platform_set_drvdata(pdev, atxdmac);
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
/* Init channels. */
INIT_LIST_HEAD(&atxdmac->dma.channels);
/* Disable all chans and interrupts. */
at_xdmac_off(atxdmac);
for (i = 0; i < nr_channels; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
@ -2159,12 +2333,11 @@ static int at_xdmac_probe(struct platform_device *pdev)
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
cpu_relax();
}
platform_set_drvdata(pdev, atxdmac);
ret = dma_async_device_register(&atxdmac->dma);
if (ret) {
dev_err(&pdev->dev, "fail to register DMA engine device\n");
goto err_clk_disable;
goto err_pm_disable;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@ -2179,10 +2352,18 @@ static int at_xdmac_probe(struct platform_device *pdev)
at_xdmac_axi_config(pdev);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_dma_unregister:
dma_async_device_unregister(&atxdmac->dma);
err_pm_disable:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(atxdmac->clk);
err_free_irq:
@ -2198,6 +2379,9 @@ static int at_xdmac_remove(struct platform_device *pdev)
at_xdmac_off(atxdmac);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&atxdmac->dma);
pm_runtime_disable(atxdmac->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
clk_disable_unprepare(atxdmac->clk);
free_irq(atxdmac->irq, atxdmac);
@ -2215,6 +2399,8 @@ static int at_xdmac_remove(struct platform_device *pdev)
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
.prepare = atmel_xdmac_prepare,
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
atmel_xdmac_runtime_resume, NULL)
};
static const struct of_device_id atmel_xdmac_dt_ids[] = {

View file

@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
struct resource *res;
void __iomem *base;
int rc;
int i, j;
@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);

View file

@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
struct regmap *regmap;
unsigned int version;
int ret;
@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (dmac->irq == 0)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmac->base = devm_ioremap_resource(&pdev->dev, res);
dmac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);

View file

@ -172,7 +172,7 @@ static ssize_t memcpy_count_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->memcpy_count;
err = sprintf(buf, "%lu\n", count);
err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@ -194,7 +194,7 @@ static ssize_t bytes_transferred_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->bytes_transferred;
err = sprintf(buf, "%lu\n", count);
err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@ -212,7 +212,7 @@ static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
mutex_lock(&dma_list_mutex);
chan = dev_to_dma_chan(dev);
if (chan)
err = sprintf(buf, "%d\n", chan->client_count);
err = sysfs_emit(buf, "%d\n", chan->client_count);
else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@ -1323,11 +1323,8 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
static void dmam_device_release(struct device *dev, void *res)
static void dmaenginem_async_device_unregister(void *device)
{
struct dma_device *device;
device = *(struct dma_device **)res;
dma_async_device_unregister(device);
}
@ -1339,22 +1336,13 @@ static void dmam_device_release(struct device *dev, void *res)
*/
int dmaenginem_async_device_register(struct dma_device *device)
{
void *p;
int ret;
p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
if (!p)
return -ENOMEM;
ret = dma_async_device_register(device);
if (!ret) {
*(struct dma_device **)p = device;
devres_add(device->dev, p);
} else {
devres_free(p);
}
if (ret)
return ret;
return ret;
return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
}
EXPORT_SYMBOL(dmaenginem_async_device_register);

View file

@ -325,8 +325,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
completed_length = completed_blocks * len;
bytes = length - completed_length;
} else {
bytes = vd_to_axi_desc(vdesc)->length;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
@ -1371,7 +1369,6 @@ static int dw_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
struct resource *mem;
struct dw_axi_dma *dw;
struct dw_axi_dma_hcfg *hdata;
u32 i;
@ -1397,8 +1394,7 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->regs = devm_ioremap_resource(chip->dev, mem);
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);

View file

@ -889,7 +889,8 @@ static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
return NULL;
}
static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
enum dma_status *status)
{
struct dw_desc *desc;
unsigned long flags;
@ -903,6 +904,8 @@ static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
residue = desc->residue;
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
residue -= dwc_get_sent(dwc);
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
*status = DMA_PAUSED;
} else {
residue = desc->total_len;
}
@ -932,11 +935,7 @@ dwc_tx_status(struct dma_chan *chan,
if (ret == DMA_COMPLETE)
return ret;
dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
return ret;
}

View file

@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
const struct fsl_edma_drvdata *drvdata = NULL;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
/* on error: disable all previously enabled clks */
fsl_disable_clocks(fsl_edma, i);

View file

@ -1119,7 +1119,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
int ret, i;
int blk_num, blk_off;
u32 len, chans, queues;
struct resource *res;
struct fsl_qdma_chan *fsl_chan;
struct fsl_qdma_engine *fsl_qdma;
struct device_node *np = pdev->dev.of_node;
@ -1183,18 +1182,15 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->status[i])
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_qdma->ctrl_base))
return PTR_ERR(fsl_qdma->ctrl_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fsl_qdma->status_base))
return PTR_ERR(fsl_qdma->status_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(fsl_qdma->block_base))
return PTR_ERR(fsl_qdma->block_base);
fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);

View file

@ -137,8 +137,11 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
u32 status_err, u32 status_xfer)
{
struct idma64_chan *idma64c = &idma64->chan[c];
struct dma_chan_percpu *stat;
struct idma64_desc *desc;
stat = this_cpu_ptr(idma64c->vchan.chan.local);
spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
@ -149,6 +152,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
dma_writel(idma64, CLEAR(XFER), idma64c->mask);
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
stat->bytes_transferred += desc->length;
idma64_start_transfer(idma64c);
}
@ -627,7 +631,6 @@ static int idma64_platform_probe(struct platform_device *pdev)
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@ -638,8 +641,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->regs = devm_ioremap_resource(dev, mem);
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);

View file

@ -699,9 +699,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->num_engines = 0;
group->num_wqs = 0;
group->use_rdbuf_limit = false;
group->rdbufs_allowed = 0;
/*
* The default value is the same as the value of
* total read buffers in GRPCAP.
*/
group->rdbufs_allowed = idxd->max_rdbufs;
group->rdbufs_reserved = 0;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
@ -934,11 +938,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
if (group->rdbufs_allowed)
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}

View file

@ -63,12 +63,6 @@ static void op_flag_setup(unsigned long flags, u32 *desc_flags)
*desc_flags |= IDXD_OP_FLAG_RCI;
}
static inline void set_completion_address(struct idxd_desc *desc,
u64 *compl_addr)
{
*compl_addr = desc->compl_dma;
}
static inline void idxd_prep_desc_common(struct idxd_wq *wq,
struct dsa_hw_desc *hw, char opcode,
u64 addr_f1, u64 addr_f2, u64 len,

View file

@ -295,13 +295,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
}
idxd->groups[i] = group;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
group->tc_a = -1;
group->tc_b = -1;
}
/*
* The default value is the same as the value of
* total read buffers in GRPCAP.
*/
group->rdbufs_allowed = idxd->max_rdbufs;
}
return 0;

View file

@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)
@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)

View file

@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev)
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
struct resource *res;
unsigned int i;
u32 val;
int ret;
@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->soc = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdma->regs = devm_ioremap_resource(&pdev->dev, res);
mdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdma->regs))
return PTR_ERR(mdma->regs);

View file

@ -1038,7 +1038,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
struct resource *res;
int ret, i;
int irq, irq_err;
@ -1049,8 +1048,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->dev = &pdev->dev;
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
imxdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imxdma->base))
return PTR_ERR(imxdma->base);

View file

@ -954,7 +954,10 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
desc = sdmac->desc;
if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP) {
sdma_update_channel_loop(sdmac);
if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
sdma_update_channel_loop(sdmac);
else
vchan_cyclic_callback(&desc->vd);
} else {
mxc_sdma_handle_channel_normal(sdmac);
vchan_cookie_complete(&desc->vd);
@ -1074,6 +1077,10 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break;
case IMX_DMATYPE_HDMI:
emi_2_per = sdma->script_addrs->hdmi_dma_addr;
sdmac->is_ram_script = true;
break;
default:
dev_err(sdma->dev, "Unsupported transfer type %d\n",
peripheral_type);
@ -1125,11 +1132,16 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral
* and watermark level
*/
context->gReg[0] = sdmac->event_mask[1];
context->gReg[1] = sdmac->event_mask[0];
context->gReg[2] = sdmac->per_addr;
context->gReg[6] = sdmac->shp_addr;
context->gReg[7] = sdmac->watermark_level;
if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
context->gReg[4] = sdmac->per_addr;
context->gReg[6] = sdmac->shp_addr;
} else {
context->gReg[0] = sdmac->event_mask[1];
context->gReg[1] = sdmac->event_mask[0];
context->gReg[2] = sdmac->per_addr;
context->gReg[6] = sdmac->shp_addr;
context->gReg[7] = sdmac->watermark_level;
}
bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
@ -1513,7 +1525,7 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
desc->sdmac = sdmac;
desc->num_bd = bds;
if (sdma_alloc_bd(desc))
if (bds && sdma_alloc_bd(desc))
goto err_desc_out;
/* No slave_config called in MEMCPY case, so do here */
@ -1680,13 +1692,16 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len;
int num_periods = 0;
int channel = sdmac->channel;
int i = 0, buf = 0;
struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
num_periods = buf_len / period_len;
sdma_config_write(chan, &sdmac->slave_config, direction);
desc = sdma_transfer_init(sdmac, direction, num_periods);
@ -1703,6 +1718,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
goto err_bd_out;
}
if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
while (buf < buf_len) {
struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
@ -1763,6 +1781,10 @@ static int sdma_config_write(struct dma_chan *chan,
sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
SDMA_WATERMARK_LEVEL_HWML;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
} else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->per_address2 = dmaengine_cfg->src_addr;
sdmac->watermark_level = 0;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@ -2169,7 +2191,6 @@ static int sdma_probe(struct platform_device *pdev)
const char *fw_name;
int ret;
int irq;
struct resource *iores;
struct resource spba_res;
int i;
struct sdma_engine *sdma;
@ -2192,8 +2213,7 @@ static int sdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
sdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdma->regs))
return PTR_ERR(sdma->regs);
@ -2234,6 +2254,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */

View file

@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
@ -210,9 +209,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mutex_init(&mcf_edma->fsl_edma_mutex);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);

View file

@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_device *hsdma;
struct mtk_hsdma_vchan *vc;
struct dma_device *dd;
struct resource *res;
int i, err;
hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
dd = &hsdma->ddev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hsdma->base = devm_ioremap_resource(&pdev->dev, res);
hsdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsdma->base))
return PTR_ERR(hsdma->base);

View file

@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op)
struct mmp_pdma_device *pdev;
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(pdev->dev, iores);
pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);

View file

@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
enum mmp_tdma_type type;
const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
struct resource *iores;
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
@ -663,8 +662,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
irq_num++;
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tdev->base = devm_ioremap_resource(&pdev->dev, iores);
tdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);

View file

@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev)
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dma_base_addr = devm_ioremap_resource(dev, res);
dma_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dma_base_addr))
return PTR_ERR(dma_base_addr);

View file

@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev)
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
struct resource *res;
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (!xor_dev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xor_dev->dma_base))
return PTR_ERR(xor_dev->dma_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(xor_dev->glob_base))
return PTR_ERR(xor_dev->glob_base);

View file

@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev)
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
mxs_dma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);

View file

@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev)
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nbpf->base = devm_ioremap_resource(dev, iomem);
nbpf->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nbpf->base))
return PTR_ERR(nbpf->base);

View file

@ -4299,9 +4299,8 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1)
continue;
size += scnprintf(buf + size, PAGE_SIZE - size,
"PPC440SP(E)-ADMA.%d: %s\n", i,
ppc_adma_errors[ppc440spe_adma_devices[i]]);
size += sysfs_emit_at(buf, size, "PPC440SP(E)-ADMA.%d: %s\n",
i, ppc_adma_errors[ppc440spe_adma_devices[i]]);
}
return size;
}
@ -4309,9 +4308,8 @@ static DRIVER_ATTR_RO(devices);
static ssize_t enable_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
ppc440spe_r6_enabled ? "EN" : "DIS");
return sysfs_emit(buf, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
ppc440spe_r6_enabled ? "EN" : "DIS");
}
static ssize_t enable_store(struct device_driver *dev, const char *buf,
@ -4362,7 +4360,7 @@ static ssize_t poly_show(struct device_driver *dev, char *buf)
reg &= 0xFF;
#endif
size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
size = sysfs_emit(buf, "PPC440SP(e) RAID-6 driver "
"uses 0x1%02x polynomial.\n", reg);
return size;
}

View file

@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
if (engine_is_idle)
if (engine_is_idle && desc)
pt_cmd_callback(desc, 0);
}

View file

@ -1346,7 +1346,6 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@ -1358,8 +1357,7 @@ static int pxad_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(&op->dev, iores);
pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);

View file

@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
const struct of_device_id *match;
struct resource *iores;
int ret, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->layout = match->data;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
bdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdev->regs))
return PTR_ERR(bdev->regs);

View file

@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
desc->in_use = true;
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
struct sf_pdma_desc *desc;
desc = to_sf_pdma_desc(vdesc);
desc->in_use = false;
kfree(desc);
}
static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
@ -494,7 +493,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
struct resource *res;
int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@ -519,8 +517,7 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);

View file

@ -78,7 +78,6 @@ struct sf_pdma_desc {
u64 src_addr;
struct virt_dma_desc vdesc;
struct sf_pdma_chan *chan;
bool in_use;
enum dma_transfer_direction dirn;
struct dma_async_tx_descriptor *async_tx;
};

View file

@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
struct resource *mem;
unsigned int i;
int ret;
@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);

View file

@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct stm32_dmamux_data *stm32_dmamux;
struct resource *res;
void __iomem *iomem;
struct reset_control *rst;
int i, count, ret;
@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
pm_runtime_get_noresume(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);

View file

@ -1580,7 +1580,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct stm32_mdma_device *dmadev;
struct dma_device *dd;
struct device_node *of_node;
struct resource *res;
struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@ -1622,8 +1621,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
count);
dmadev->nr_ahb_addr_masks = count;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmadev->base = devm_ioremap_resource(&pdev->dev, res);
dmadev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);

View file

@ -1144,15 +1144,13 @@ static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
static int sun4i_dma_probe(struct platform_device *pdev)
{
struct sun4i_dma_dev *priv;
struct resource *res;
int i, j, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);

View file

@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@ -1283,7 +1284,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
struct resource *res;
int ret, i;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
@ -1294,8 +1294,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc->cfg)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdc->base = devm_ioremap_resource(&pdev->dev, res);
sdc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdc->base))
return PTR_ERR(sdc->base);
@ -1334,6 +1333,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&sdc->pending);
spin_lock_init(&sdc->lock);
dma_set_max_seg_size(&pdev->dev, SZ_32M - 1);
dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);

View file

@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
struct resource *res;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);

View file

@ -10,6 +10,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j7200.o \
k3-psil-am64.o \
k3-psil-j721s2.o \
k3-psil-am62.o
k3-psil-am62.o \
k3-psil-am62a.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o

View file

@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
struct resource *mem;
int index;
int irq;
int ret;
@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (index < 0)
return index;
mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(cdd->ctrl_mem))
return PTR_ERR(cdd->ctrl_mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
cdd->sched_mem = devm_ioremap_resource(dev, mem);
cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1);
if (IS_ERR(cdd->sched_mem))
return PTR_ERR(cdd->sched_mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2);
if (IS_ERR(cdd->qmgr_mem))
return PTR_ERR(cdd->qmgr_mem);

View file

@ -0,0 +1,196 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/kernel.h>
#include "k3-psil-priv.h"
#define PSIL_PDMA_XY_TR(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.mapped_channel_id = -1, \
.default_flow_id = -1, \
}, \
}
#define PSIL_PDMA_XY_PKT(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.mapped_channel_id = -1, \
.default_flow_id = -1, \
.pkt_mode = 1, \
}, \
}
#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
.pkt_mode = 1, \
.needs_epib = 1, \
.psd_size = 16, \
.mapped_channel_id = ch, \
.flow_start = flow_base, \
.flow_num = flow_cnt, \
.default_flow_id = flow_base, \
}, \
}
#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
.pkt_mode = 1, \
.needs_epib = 1, \
.psd_size = 64, \
.mapped_channel_id = ch, \
.flow_start = flow_base, \
.flow_num = flow_cnt, \
.default_flow_id = default_flow, \
.notdpkt = tx, \
}, \
}
#define PSIL_PDMA_MCASP(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.pdma_acc32 = 1, \
.pdma_burst = 1, \
}, \
}
#define PSIL_CSI2RX(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
}, \
}
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep am62a_src_ep_map[] = {
/* SAUL */
PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
PSIL_PDMA_XY_PKT(0x4305),
PSIL_PDMA_XY_PKT(0x4306),
PSIL_PDMA_XY_PKT(0x4307),
PSIL_PDMA_XY_PKT(0x4308),
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
PSIL_PDMA_XY_PKT(0x430c),
PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
PSIL_PDMA_XY_PKT(0x4402),
PSIL_PDMA_XY_PKT(0x4403),
PSIL_PDMA_XY_PKT(0x4404),
PSIL_PDMA_XY_PKT(0x4405),
PSIL_PDMA_XY_PKT(0x4406),
/* PDMA_MAIN2 - MCASP0-2 */
PSIL_PDMA_MCASP(0x4500),
PSIL_PDMA_MCASP(0x4501),
PSIL_PDMA_MCASP(0x4502),
/* CPSW3G */
PSIL_ETHERNET(0x4600, 19, 19, 16),
/* CSI2RX */
PSIL_CSI2RX(0x5000),
PSIL_CSI2RX(0x5001),
PSIL_CSI2RX(0x5002),
PSIL_CSI2RX(0x5003),
PSIL_CSI2RX(0x5004),
PSIL_CSI2RX(0x5005),
PSIL_CSI2RX(0x5006),
PSIL_CSI2RX(0x5007),
PSIL_CSI2RX(0x5008),
PSIL_CSI2RX(0x5009),
PSIL_CSI2RX(0x500a),
PSIL_CSI2RX(0x500b),
PSIL_CSI2RX(0x500c),
PSIL_CSI2RX(0x500d),
PSIL_CSI2RX(0x500e),
PSIL_CSI2RX(0x500f),
PSIL_CSI2RX(0x5010),
PSIL_CSI2RX(0x5011),
PSIL_CSI2RX(0x5012),
PSIL_CSI2RX(0x5013),
PSIL_CSI2RX(0x5014),
PSIL_CSI2RX(0x5015),
PSIL_CSI2RX(0x5016),
PSIL_CSI2RX(0x5017),
PSIL_CSI2RX(0x5018),
PSIL_CSI2RX(0x5019),
PSIL_CSI2RX(0x501a),
PSIL_CSI2RX(0x501b),
PSIL_CSI2RX(0x501c),
PSIL_CSI2RX(0x501d),
PSIL_CSI2RX(0x501e),
PSIL_CSI2RX(0x501f),
};
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep am62a_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
/* PDMA_MAIN0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
PSIL_PDMA_XY_PKT(0xc305),
PSIL_PDMA_XY_PKT(0xc306),
PSIL_PDMA_XY_PKT(0xc307),
PSIL_PDMA_XY_PKT(0xc308),
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
PSIL_PDMA_XY_PKT(0xc30c),
PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
PSIL_PDMA_XY_PKT(0xc402),
PSIL_PDMA_XY_PKT(0xc403),
PSIL_PDMA_XY_PKT(0xc404),
PSIL_PDMA_XY_PKT(0xc405),
PSIL_PDMA_XY_PKT(0xc406),
/* PDMA_MAIN2 - MCASP0-2 */
PSIL_PDMA_MCASP(0xc500),
PSIL_PDMA_MCASP(0xc501),
PSIL_PDMA_MCASP(0xc502),
/* CPSW3G */
PSIL_ETHERNET(0xc600, 19, 19, 8),
PSIL_ETHERNET(0xc601, 20, 27, 8),
PSIL_ETHERNET(0xc602, 21, 35, 8),
PSIL_ETHERNET(0xc603, 22, 43, 8),
PSIL_ETHERNET(0xc604, 23, 51, 8),
PSIL_ETHERNET(0xc605, 24, 59, 8),
PSIL_ETHERNET(0xc606, 25, 67, 8),
PSIL_ETHERNET(0xc607, 26, 75, 8),
};
struct psil_ep_map am62a_ep_map = {
.name = "am62a",
.src = am62a_src_ep_map,
.src_count = ARRAY_SIZE(am62a_src_ep_map),
.dst = am62a_dst_ep_map,
.dst_count = ARRAY_SIZE(am62a_dst_ep_map),
};

View file

@ -43,5 +43,6 @@ extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
extern struct psil_ep_map am62a_ep_map;
#endif /* K3_PSIL_PRIV_H_ */

View file

@ -24,6 +24,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_ep_map },
{ .family = "J721S2", .data = &j721s2_ep_map },
{ .family = "AM62X", .data = &am62_ep_map },
{ .family = "AM62AX", .data = &am62a_ep_map },
{ /* sentinel */ }
};

View file

@ -135,6 +135,7 @@ struct udma_match_data {
u32 flags;
u32 statictr_z_mask;
u8 burst_size[3];
struct udma_soc_data *soc_data;
};
struct udma_soc_data {
@ -4296,6 +4297,25 @@ static struct udma_match_data j721e_mcu_data = {
},
};
static struct udma_soc_data am62a_dmss_csi_soc_data = {
.oes = {
.bcdma_rchan_data = 0xe00,
.bcdma_rchan_ring = 0x1000,
},
};
static struct udma_match_data am62a_bcdma_csirx_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x3100,
.enable_memcpy_support = false,
.burst_size = {
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
0, /* No H Channels */
0, /* No UH Channels */
},
.soc_data = &am62a_dmss_csi_soc_data,
};
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
@ -4345,6 +4365,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am64-dmss-pktdma",
.data = &am64_pktdma_data,
},
{
.compatible = "ti,am62a-dmss-bcdma-csirx",
.data = &am62a_bcdma_csirx_data,
},
{ /* Sentinel */ },
};
@ -4387,6 +4411,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_soc_data },
{ .family = "J721S2", .data = &j721e_soc_data},
{ .family = "AM62X", .data = &am64_soc_data },
{ .family = "AM62AX", .data = &am64_soc_data },
{ /* sentinel */ }
};
@ -4775,7 +4800,10 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].num = rm_res->desc[i].num;
}
}
} else {
i = 0;
}
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
@ -5271,12 +5299,15 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
soc = soc_device_match(k3_soc_devices);
if (!soc) {
dev_err(dev, "No compatible SoC found\n");
return -ENODEV;
ud->soc_data = ud->match_data->soc_data;
if (!ud->soc_data) {
soc = soc_device_match(k3_soc_devices);
if (!soc) {
dev_err(dev, "No compatible SoC found\n");
return -ENODEV;
}
ud->soc_data = soc->data;
}
ud->soc_data = soc->data;
ret = udma_get_mmrs(pdev, ud);
if (ret)
@ -5345,7 +5376,6 @@ static int udma_probe(struct platform_device *pdev)
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi.domain) {
dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}

View file

@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev)
{
const struct omap_dma_config *conf;
struct omap_dmadev *od;
struct resource *res;
int rc, i, irq;
u32 val;
@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev)
if (!od)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
od->base = devm_ioremap_resource(&pdev->dev, res);
od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);

View file

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
obj-$(CONFIG_XILINX_XDMA) += xdma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o

View file

@ -0,0 +1,166 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright (C) 2022, Advanced Micro Devices, Inc.
*/
#ifndef __DMA_XDMA_REGS_H
#define __DMA_XDMA_REGS_H
/* The length of register space exposed to host */
#define XDMA_REG_SPACE_LEN 65536
/*
* maximum number of DMA channels for each direction:
* Host to Card (H2C) or Card to Host (C2H)
*/
#define XDMA_MAX_CHANNELS 4
/*
* macros to define the number of descriptor blocks can be used in one
* DMA transfer request.
* the DMA engine uses a linked list of descriptor blocks that specify the
* source, destination, and length of the DMA transfers.
*/
#define XDMA_DESC_BLOCK_NUM BIT(7)
#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1)
/* descriptor definitions */
#define XDMA_DESC_ADJACENT 32
#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1)
#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8)
#define XDMA_DESC_MAGIC 0xad4bUL
#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16)
#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0)
#define XDMA_DESC_STOPPED BIT(0)
#define XDMA_DESC_COMPLETED BIT(1)
#define XDMA_DESC_BLEN_BITS 28
#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
/* macros to construct the descriptor control word */
#define XDMA_DESC_CONTROL(adjacent, flag) \
(FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \
FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \
FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
#define XDMA_DESC_CONTROL_LAST \
XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
/*
* Descriptor for a single contiguous memory block transfer.
*
* Multiple descriptors are linked by means of the next pointer. An additional
* extra adjacent number gives the amount of extra contiguous descriptors.
*
* The descriptors are in root complex memory, and the bytes in the 32-bit
* words must be in little-endian byte ordering.
*/
struct xdma_hw_desc {
__le32 control;
__le32 bytes;
__le64 src_addr;
__le64 dst_addr;
__le64 next_desc;
};
#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
#define XDMA_DESC_BLOCK_ALIGN 4096
/*
* Channel registers
*/
#define XDMA_CHAN_IDENTIFIER 0x0
#define XDMA_CHAN_CONTROL 0x4
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
#define XDMA_CHAN_INTR_ENABLE_W1S 0x94
#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c
#define XDMA_CHAN_STRIDE 0x100
#define XDMA_CHAN_H2C_OFFSET 0x0
#define XDMA_CHAN_C2H_OFFSET 0x1000
#define XDMA_CHAN_H2C_TARGET 0x0
#define XDMA_CHAN_C2H_TARGET 0x1
/* macro to check if channel is available */
#define XDMA_CHAN_MAGIC 0x1fc0
#define XDMA_CHAN_CHECK_TARGET(id, target) \
(((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target))
/* bits of the channel control register */
#define CHAN_CTRL_RUN_STOP BIT(0)
#define CHAN_CTRL_IE_DESC_STOPPED BIT(1)
#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2)
#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
#define CHAN_CTRL_POLL_MODE_WB BIT(26)
#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \
CHAN_CTRL_IE_DESC_STOPPED | \
CHAN_CTRL_IE_DESC_COMPLETED | \
CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
/* bits of the channel interrupt enable mask */
#define CHAN_IM_DESC_ERROR BIT(19)
#define CHAN_IM_READ_ERROR BIT(9)
#define CHAN_IM_IDLE_STOPPED BIT(6)
#define CHAN_IM_MAGIC_STOPPED BIT(4)
#define CHAN_IM_DESC_COMPLETED BIT(2)
#define CHAN_IM_DESC_STOPPED BIT(1)
#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \
CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \
CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED)
/*
* Channel SGDMA registers
*/
#define XDMA_SGDMA_IDENTIFIER 0x4000
#define XDMA_SGDMA_DESC_LO 0x4080
#define XDMA_SGDMA_DESC_HI 0x4084
#define XDMA_SGDMA_DESC_ADJ 0x4088
#define XDMA_SGDMA_DESC_CREDIT 0x408c
/* bits of the SG DMA control register */
#define XDMA_CTRL_RUN_STOP BIT(0)
#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
#define XDMA_CTRL_POLL_MODE_WB BIT(26)
/*
* interrupt registers
*/
#define XDMA_IRQ_IDENTIFIER 0x2000
#define XDMA_IRQ_USER_INT_EN 0x2004
#define XDMA_IRQ_USER_INT_EN_W1S 0x2008
#define XDMA_IRQ_USER_INT_EN_W1C 0x200c
#define XDMA_IRQ_CHAN_INT_EN 0x2010
#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014
#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018
#define XDMA_IRQ_USER_INT_REQ 0x2040
#define XDMA_IRQ_CHAN_INT_REQ 0x2044
#define XDMA_IRQ_USER_INT_PEND 0x2048
#define XDMA_IRQ_CHAN_INT_PEND 0x204c
#define XDMA_IRQ_USER_VEC_NUM 0x2080
#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0
#define XDMA_IRQ_VEC_SHIFT 8
#endif /* __DMA_XDMA_REGS_H */

974
drivers/dma/xilinx/xdma.c Normal file
View file

@ -0,0 +1,974 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DMA driver for Xilinx DMA/Bridge Subsystem
*
* Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
* Copyright (C) 2022, Advanced Micro Devices, Inc.
*/
/*
* The DMA/Bridge Subsystem for PCI Express allows for the movement of data
* between Host memory and the DMA subsystem. It does this by operating on
* 'descriptors' that contain information about the source, destination and
* amount of data to transfer. These direct memory transfers can be both in
* the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
* configured to have a single AXI4 Master interface shared by all channels
* or one AXI4-Stream interface for each channel enabled. Memory transfers are
* specified on a per-channel basis in descriptor linked lists, which the DMA
* fetches from host memory and processes. Events such as descriptor completion
* and errors are signaled using interrupts. The core also provides up to 16
* user interrupt wires that generate interrupts to the host.
*/
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
#include <linux/dmapool.h>
#include <linux/regmap.h>
#include <linux/dmaengine.h>
#include <linux/dma/amd_xdma.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_xdma.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include "../virt-dma.h"
#include "xdma-regs.h"
/* mmio regmap config for all XDMA registers */
static const struct regmap_config xdma_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = XDMA_REG_SPACE_LEN,
};
/**
* struct xdma_desc_block - Descriptor block
* @virt_addr: Virtual address of block start
* @dma_addr: DMA address of block start
*/
struct xdma_desc_block {
void *virt_addr;
dma_addr_t dma_addr;
};
/**
* struct xdma_chan - Driver specific DMA channel structure
* @vchan: Virtual channel
* @xdev_hdl: Pointer to DMA device structure
* @base: Offset of channel registers
* @desc_pool: Descriptor pool
* @busy: Busy flag of the channel
* @dir: Transferring direction of the channel
* @cfg: Transferring config of the channel
* @irq: IRQ assigned to the channel
*/
struct xdma_chan {
struct virt_dma_chan vchan;
void *xdev_hdl;
u32 base;
struct dma_pool *desc_pool;
bool busy;
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
};
/**
* struct xdma_desc - DMA desc structure
* @vdesc: Virtual DMA descriptor
* @chan: DMA channel pointer
* @dir: Transferring direction of the request
* @dev_addr: Physical address on DMA device side
* @desc_blocks: Hardware descriptor blocks
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
struct xdma_chan *chan;
enum dma_transfer_direction dir;
u64 dev_addr;
struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
/**
* struct xdma_device - DMA device structure
* @pdev: Platform device pointer
* @dma_dev: DMA device structure
* @rmap: MMIO regmap for DMA registers
* @h2c_chans: Host to Card channels
* @c2h_chans: Card to Host channels
* @h2c_chan_num: Number of H2C channels
* @c2h_chan_num: Number of C2H channels
* @irq_start: Start IRQ assigned to device
* @irq_num: Number of IRQ assigned to device
* @status: Initialization status
*/
struct xdma_device {
struct platform_device *pdev;
struct dma_device dma_dev;
struct regmap *rmap;
struct xdma_chan *h2c_chans;
struct xdma_chan *c2h_chans;
u32 h2c_chan_num;
u32 c2h_chan_num;
u32 irq_start;
u32 irq_num;
u32 status;
};
#define xdma_err(xdev, fmt, args...) \
dev_err(&(xdev)->pdev->dev, fmt, ##args)
#define XDMA_CHAN_NUM(_xd) ({ \
typeof(_xd) (xd) = (_xd); \
((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
/* Get the last desc in a desc block */
static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
{
return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
}
/**
* xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
* @sw_desc: Tx descriptor pointer
*/
static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
{
struct xdma_desc_block *block;
u32 last_blk_desc, desc_control;
struct xdma_hw_desc *desc;
int i;
desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
for (i = 1; i < sw_desc->dblk_num; i++) {
block = &sw_desc->desc_blocks[i - 1];
desc = xdma_blk_last_desc(block);
if (!(i & XDMA_DESC_BLOCK_MASK)) {
desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
continue;
}
desc->control = cpu_to_le32(desc_control);
desc->next_desc = cpu_to_le64(block[1].dma_addr);
}
/* update the last block */
last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
desc = xdma_blk_last_desc(block);
desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
desc->control = cpu_to_le32(desc_control);
}
block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
}
static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct xdma_chan, vchan.chan);
}
static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
{
return container_of(vdesc, struct xdma_desc, vdesc);
}
/**
* xdma_channel_init - Initialize DMA channel registers
* @chan: DMA channel pointer
*/
static int xdma_channel_init(struct xdma_chan *chan)
{
struct xdma_device *xdev = chan->xdev_hdl;
int ret;
ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
CHAN_CTRL_NON_INCR_ADDR);
if (ret)
return ret;
ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
CHAN_IM_ALL);
if (ret)
return ret;
return 0;
}
/**
* xdma_free_desc - Free descriptor
* @vdesc: Virtual DMA descriptor
*/
static void xdma_free_desc(struct virt_dma_desc *vdesc)
{
struct xdma_desc *sw_desc;
int i;
sw_desc = to_xdma_desc(vdesc);
for (i = 0; i < sw_desc->dblk_num; i++) {
if (!sw_desc->desc_blocks[i].virt_addr)
break;
dma_pool_free(sw_desc->chan->desc_pool,
sw_desc->desc_blocks[i].virt_addr,
sw_desc->desc_blocks[i].dma_addr);
}
kfree(sw_desc->desc_blocks);
kfree(sw_desc);
}
/**
* xdma_alloc_desc - Allocate descriptor
* @chan: DMA channel pointer
* @desc_num: Number of hardware descriptors
*/
static struct xdma_desc *
xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
{
struct xdma_desc *sw_desc;
struct xdma_hw_desc *desc;
dma_addr_t dma_addr;
u32 dblk_num;
void *addr;
int i, j;
sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
if (!sw_desc)
return NULL;
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
if (!sw_desc->desc_blocks)
goto failed;
sw_desc->dblk_num = dblk_num;
for (i = 0; i < sw_desc->dblk_num; i++) {
addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
if (!addr)
goto failed;
sw_desc->desc_blocks[i].virt_addr = addr;
sw_desc->desc_blocks[i].dma_addr = dma_addr;
for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
}
xdma_link_desc_blocks(sw_desc);
return sw_desc;
failed:
xdma_free_desc(&sw_desc->vdesc);
return NULL;
}
/**
* xdma_xfer_start - Start DMA transfer
* @xdma_chan: DMA channel pointer
*/
static int xdma_xfer_start(struct xdma_chan *xchan)
{
struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
struct xdma_device *xdev = xchan->xdev_hdl;
struct xdma_desc_block *block;
u32 val, completed_blocks;
struct xdma_desc *desc;
int ret;
/*
* check if there is not any submitted descriptor or channel is busy.
* vchan lock should be held where this function is called.
*/
if (!vd || xchan->busy)
return -EINVAL;
/* clear run stop bit to get ready for transfer */
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
desc = to_xdma_desc(vd);
if (desc->dir != xchan->dir) {
xdma_err(xdev, "incorrect request direction");
return -EINVAL;
}
/* set DMA engine to the first descriptor block */
completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
block = &desc->desc_blocks[completed_blocks];
val = lower_32_bits(block->dma_addr);
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
if (ret)
return ret;
val = upper_32_bits(block->dma_addr);
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
if (ret)
return ret;
if (completed_blocks + 1 == desc->dblk_num)
val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
else
val = XDMA_DESC_ADJACENT - 1;
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
if (ret)
return ret;
/* kick off DMA transfer */
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
CHAN_CTRL_START);
if (ret)
return ret;
xchan->busy = true;
return 0;
}
/**
* xdma_alloc_channels - Detect and allocate DMA channels
* @xdev: DMA device pointer
* @dir: Channel direction
*/
static int xdma_alloc_channels(struct xdma_device *xdev,
enum dma_transfer_direction dir)
{
struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
struct xdma_chan **chans, *xchan;
u32 base, identifier, target;
u32 *chan_num;
int i, j, ret;
if (dir == DMA_MEM_TO_DEV) {
base = XDMA_CHAN_H2C_OFFSET;
target = XDMA_CHAN_H2C_TARGET;
chans = &xdev->h2c_chans;
chan_num = &xdev->h2c_chan_num;
} else if (dir == DMA_DEV_TO_MEM) {
base = XDMA_CHAN_C2H_OFFSET;
target = XDMA_CHAN_C2H_TARGET;
chans = &xdev->c2h_chans;
chan_num = &xdev->c2h_chan_num;
} else {
xdma_err(xdev, "invalid direction specified");
return -EINVAL;
}
/* detect number of available DMA channels */
for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
&identifier);
if (ret)
return ret;
/* check if it is available DMA channel */
if (XDMA_CHAN_CHECK_TARGET(identifier, target))
(*chan_num)++;
}
if (!*chan_num) {
xdma_err(xdev, "does not probe any channel");
return -EINVAL;
}
*chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
GFP_KERNEL);
if (!*chans)
return -ENOMEM;
for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
&identifier);
if (ret)
return ret;
if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
continue;
if (j == *chan_num) {
xdma_err(xdev, "invalid channel number");
return -EIO;
}
/* init channel structure and hardware */
xchan = &(*chans)[j];
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
ret = xdma_channel_init(xchan);
if (ret)
return ret;
xchan->vchan.desc_free = xdma_free_desc;
vchan_init(&xchan->vchan, &xdev->dma_dev);
j++;
}
dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
(dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
return 0;
}
/**
* xdma_issue_pending - Issue pending transactions
* @chan: DMA channel pointer
*/
static void xdma_issue_pending(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
if (vchan_issue_pending(&xdma_chan->vchan))
xdma_xfer_start(xdma_chan);
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
}
/**
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
* @chan: DMA channel pointer
* @sgl: Transfer scatter gather list
* @sg_len: Length of scatter gather list
* @dir: Transfer direction
* @flags: transfer ack flags
* @context: APP words of the descriptor
*/
static struct dma_async_tx_descriptor *
xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
u32 desc_num = 0, i, len, rest;
struct xdma_desc_block *dblk;
struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
u64 dev_addr, *src, *dst;
struct scatterlist *sg;
u64 addr;
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
src = &addr;
dst = &dev_addr;
} else {
dev_addr = xdma_chan->cfg.src_addr;
src = &dev_addr;
dst = &addr;
}
dblk = sw_desc->desc_blocks;
desc = dblk->virt_addr;
desc_num = 1;
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
rest = sg_dma_len(sg);
do {
len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
/* set hardware descriptor */
desc->bytes = cpu_to_le32(len);
desc->src_addr = cpu_to_le64(*src);
desc->dst_addr = cpu_to_le64(*dst);
if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
dblk++;
desc = dblk->virt_addr;
} else {
desc++;
}
desc_num++;
dev_addr += len;
addr += len;
rest -= len;
} while (rest);
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
if (!tx_desc)
goto failed;
return tx_desc;
failed:
xdma_free_desc(&sw_desc->vdesc);
return NULL;
}
/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
*/
static int xdma_device_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
return 0;
}
/**
* xdma_free_chan_resources - Free channel resources
* @chan: DMA channel
*/
static void xdma_free_chan_resources(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
vchan_free_chan_resources(&xdma_chan->vchan);
dma_pool_destroy(xdma_chan->desc_pool);
xdma_chan->desc_pool = NULL;
}
/**
* xdma_alloc_chan_resources - Allocate channel resources
* @chan: DMA channel
*/
static int xdma_alloc_chan_resources(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct xdma_device *xdev = xdma_chan->xdev_hdl;
struct device *dev = xdev->dma_dev.dev;
while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev) {
xdma_err(xdev, "unable to find pci device");
return -EINVAL;
}
xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
dev, XDMA_DESC_BLOCK_SIZE,
XDMA_DESC_BLOCK_ALIGN, 0);
if (!xdma_chan->desc_pool) {
xdma_err(xdev, "unable to allocate descriptor pool");
return -ENOMEM;
}
return 0;
}
/**
* xdma_channel_isr - XDMA channel interrupt handler
* @irq: IRQ number
* @dev_id: Pointer to the DMA channel structure
*/
static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
{
struct xdma_chan *xchan = dev_id;
u32 complete_desc_num = 0;
struct xdma_device *xdev;
struct virt_dma_desc *vd;
struct xdma_desc *desc;
int ret;
spin_lock(&xchan->vchan.lock);
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
goto out;
xchan->busy = false;
desc = to_xdma_desc(vd);
xdev = xchan->xdev_hdl;
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num);
if (ret)
goto out;
desc->completed_desc_num += complete_desc_num;
/*
* if all data blocks are transferred, remove and complete the request
*/
if (desc->completed_desc_num == desc->desc_num) {
list_del(&vd->node);
vchan_cookie_complete(vd);
goto out;
}
if (desc->completed_desc_num > desc->desc_num ||
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
goto out;
/* transfer the rest of data */
xdma_xfer_start(xchan);
out:
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
}
/**
* xdma_irq_fini - Uninitialize IRQ
* @xdev: DMA device pointer
*/
static void xdma_irq_fini(struct xdma_device *xdev)
{
int i;
/* disable interrupt */
regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
/* free irq handler */
for (i = 0; i < xdev->h2c_chan_num; i++)
free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
for (i = 0; i < xdev->c2h_chan_num; i++)
free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
}
/**
* xdma_set_vector_reg - configure hardware IRQ registers
* @xdev: DMA device pointer
* @vec_tbl_start: Start of IRQ registers
* @irq_start: Start of IRQ
* @irq_num: Number of IRQ
*/
static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
u32 irq_start, u32 irq_num)
{
u32 shift, i, val = 0;
int ret;
/* Each IRQ register is 32 bit and contains 4 IRQs */
while (irq_num > 0) {
for (i = 0; i < 4; i++) {
shift = XDMA_IRQ_VEC_SHIFT * i;
val |= irq_start << shift;
irq_start++;
irq_num--;
}
/* write IRQ register */
ret = regmap_write(xdev->rmap, vec_tbl_start, val);
if (ret)
return ret;
vec_tbl_start += sizeof(u32);
val = 0;
}
return 0;
}
/**
* xdma_irq_init - initialize IRQs
* @xdev: DMA device pointer
*/
static int xdma_irq_init(struct xdma_device *xdev)
{
u32 irq = xdev->irq_start;
u32 user_irq_start;
int i, j, ret;
/* return failure if there are not enough IRQs */
if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
xdma_err(xdev, "not enough irq");
return -EINVAL;
}
/* setup H2C interrupt handler */
for (i = 0; i < xdev->h2c_chan_num; i++) {
ret = request_irq(irq, xdma_channel_isr, 0,
"xdma-h2c-channel", &xdev->h2c_chans[i]);
if (ret) {
xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
i, irq, ret);
goto failed_init_h2c;
}
xdev->h2c_chans[i].irq = irq;
irq++;
}
/* setup C2H interrupt handler */
for (j = 0; j < xdev->c2h_chan_num; j++) {
ret = request_irq(irq, xdma_channel_isr, 0,
"xdma-c2h-channel", &xdev->c2h_chans[j]);
if (ret) {
xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
j, irq, ret);
goto failed_init_c2h;
}
xdev->c2h_chans[j].irq = irq;
irq++;
}
/* config hardware IRQ registers */
ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
XDMA_CHAN_NUM(xdev));
if (ret) {
xdma_err(xdev, "failed to set channel vectors: %d", ret);
goto failed_init_c2h;
}
/* config user IRQ registers if needed */
user_irq_start = XDMA_CHAN_NUM(xdev);
if (xdev->irq_num > user_irq_start) {
ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
user_irq_start,
xdev->irq_num - user_irq_start);
if (ret) {
xdma_err(xdev, "failed to set user vectors: %d", ret);
goto failed_init_c2h;
}
}
/* enable interrupt */
ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
if (ret)
goto failed_init_c2h;
return 0;
failed_init_c2h:
while (j--)
free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
failed_init_h2c:
while (i--)
free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
return ret;
}
static bool xdma_filter_fn(struct dma_chan *chan, void *param)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct xdma_chan_info *chan_info = param;
return chan_info->dir == xdma_chan->dir;
}
/**
* xdma_disable_user_irq - Disable user interrupt
* @pdev: Pointer to the platform_device structure
* @irq_num: System IRQ number
*/
void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
u32 index;
index = irq_num - xdev->irq_start;
if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq number");
return;
}
index -= XDMA_CHAN_NUM(xdev);
regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
}
EXPORT_SYMBOL(xdma_disable_user_irq);
/**
* xdma_enable_user_irq - Enable user logic interrupt
* @pdev: Pointer to the platform_device structure
* @irq_num: System IRQ number
*/
int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
u32 index;
int ret;
index = irq_num - xdev->irq_start;
if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq number");
return -EINVAL;
}
index -= XDMA_CHAN_NUM(xdev);
ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(xdma_enable_user_irq);
/**
* xdma_get_user_irq - Get system IRQ number
* @pdev: Pointer to the platform_device structure
* @user_irq_index: User logic IRQ wire index
*
* Return: The system IRQ number allocated for the given wire index.
*/
int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq index");
return -EINVAL;
}
return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
}
EXPORT_SYMBOL(xdma_get_user_irq);
/**
* xdma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
*/
static int xdma_remove(struct platform_device *pdev)
{
struct xdma_device *xdev = platform_get_drvdata(pdev);
if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
xdma_irq_fini(xdev);
if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
dma_async_device_unregister(&xdev->dma_dev);
return 0;
}
/**
* xdma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*/
static int xdma_probe(struct platform_device *pdev)
{
struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
struct xdma_device *xdev;
void __iomem *reg_base;
struct resource *res;
int ret = -ENODEV;
if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
dev_err(&pdev->dev, "invalid max dma channels %d",
pdata->max_dma_channels);
return -EINVAL;
}
xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
platform_set_drvdata(pdev, xdev);
xdev->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
xdma_err(xdev, "failed to get irq resource");
goto failed;
}
xdev->irq_start = res->start;
xdev->irq_num = res->end - res->start + 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
xdma_err(xdev, "failed to get io resource");
goto failed;
}
reg_base = devm_ioremap_resource(&pdev->dev, res);
if (!reg_base) {
xdma_err(xdev, "ioremap failed");
goto failed;
}
xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
&xdma_regmap_config);
if (!xdev->rmap) {
xdma_err(xdev, "config regmap failed: %d", ret);
goto failed;
}
INIT_LIST_HEAD(&xdev->dma_dev.channels);
ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
if (ret) {
xdma_err(xdev, "config H2C channels failed: %d", ret);
goto failed;
}
ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
if (ret) {
xdma_err(xdev, "config C2H channels failed: %d", ret);
goto failed;
}
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
xdev->dma_dev.device_tx_status = dma_cookie_status;
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
goto failed;
}
xdev->status |= XDMA_DEV_STATUS_REG_DMA;
ret = xdma_irq_init(xdev);
if (ret) {
xdma_err(xdev, "failed to init msix: %d", ret);
goto failed;
}
xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
return 0;
failed:
xdma_remove(pdev);
return ret;
}
static const struct platform_device_id xdma_id_table[] = {
{ "xdma", 0},
{ },
};
static struct platform_driver xdma_driver = {
.driver = {
.name = "xdma",
},
.id_table = xdma_id_table,
.probe = xdma_probe,
.remove = xdma_remove,
};
module_platform_driver(xdma_driver);
MODULE_DESCRIPTION("AMD XDMA driver");
MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
MODULE_LICENSE("GPL");

View file

@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
struct platform_device *pdev)
{
struct zynqmp_dma_chan *chan;
struct resource *res;
struct device_node *node = pdev->dev.of_node;
int err;
@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
chan->dev = zdev->dev;
chan->zdev = zdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chan->regs = devm_ioremap_resource(&pdev->dev, res);
chan->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chan->regs))
return PTR_ERR(chan->regs);

View file

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2022, Advanced Micro Devices, Inc.
*/
#ifndef _DMAENGINE_AMD_XDMA_H
#define _DMAENGINE_AMD_XDMA_H
#include <linux/interrupt.h>
#include <linux/platform_device.h>
int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
#endif /* _DMAENGINE_AMD_XDMA_H */

View file

@ -40,6 +40,7 @@ enum sdma_peripheral_type {
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
IMX_DMATYPE_HDMI, /* HDMI Audio */
};
enum imx_dma_prio {

View file

@ -773,6 +773,7 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
* @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
@ -789,6 +790,7 @@ struct dma_filter {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @owner: owner module (automatically set based on the provided dev)
* @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@ -802,6 +804,7 @@ struct dma_filter {
* @max_sg_burst: max number of SG list entries executed in a single burst
* DMA tansaction with no software intervention for reinitialization.
* Zero value means unlimited number of entries.
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@ -839,7 +842,6 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @device_release: called sometime atfer dma_async_device_unregister() is
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
@ -847,6 +849,7 @@ struct dma_filter {
* @dbg_summary_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra,
* controller specific information.
* @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
struct kref ref;
@ -855,7 +858,7 @@ struct dma_device {
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
dma_cap_mask_t cap_mask;
dma_cap_mask_t cap_mask;
enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
@ -924,10 +927,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
void (*device_caps)(struct dma_chan *chan,
struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);

View file

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2022, Advanced Micro Devices, Inc.
*/
#ifndef _PLATDATA_AMD_XDMA_H
#define _PLATDATA_AMD_XDMA_H
#include <linux/dmaengine.h>
/**
* struct xdma_chan_info - DMA channel information
* This information is used to match channel when request dma channel
* @dir: Channel transfer direction
*/
struct xdma_chan_info {
enum dma_transfer_direction dir;
};
#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
struct dma_slave_map;
/**
* struct xdma_platdata - platform specific data for XDMA engine
* @max_dma_channels: Maximum dma channels in each direction
*/
struct xdma_platdata {
u32 max_dma_channels;
u32 device_map_cnt;
struct dma_slave_map *device_map;
};
#endif /* _PLATDATA_AMD_XDMA_H */