Merge tag 'drm-misc-next-2024-03-28' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

Two misc-next in one.

drm-misc-next for v6.10-rc1:

The deal of a lifetime! You get ALL of the previous
drm-misc-next-2024-03-21-1 tag!!

But WAIT, there's MORE!

Cross-subsystem Changes:
- Assorted DT binding updates.

Core Changes:
- Clarify how optional wait_hpd_asserted is.
- Shuffle Kconfig names around.

Driver Changes:
- Assorted build fixes for panthor, imagination,
- Add AUO B120XAN01.0 panels.
- Assorted small fixes to panthor, panfrost.

drm-misc-next for v6.10:
UAPI Changes:
- Move some nouveau magic constants to uapi.

Cross-subsystem Changes:
- Move drm-misc to gitlab and freedesktop hosting.
- Add entries for panfrost.

Core Changes:
- Improve placement for TTM bo's in idle/busy handling.
- Improve drm/bridge init ordering.
- Add CONFIG_DRM_WERROR, and use W=1 for drm.
- Assorted documentation updates.
- Make more (drm and driver) headers self-contained and add header
  guards.
- Grab reservation lock in pin/unpin callbacks.
- Fix reservation lock handling for vmap.
- Add edp and edid panel matching, use it to fix a nearly identical
  panel.

Driver Changes:
- Add drm/panthor driver and assorted fixes.
- Assorted small fixes to xlnx, panel-edp, tidss, ci, nouveau,
  panel and bridge drivers.
- Add Samsung s6e3fa7, BOE NT116WHM-N44, CMN N116BCA-EA1,
  CrystalClear CMT430B19N00, Startek KD050HDFIA020-C020A,
  powertip PH128800T006-ZHC01 panels.
- Fix console for omapdrm.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/bea310a6-6ff6-477e-9363-f9f053cfd12a@linux.intel.com
This commit is contained in:
Dave Airlie 2024-04-05 13:15:28 +10:00
commit fee54d08bc
152 changed files with 15901 additions and 824 deletions

View file

@ -0,0 +1,10 @@
What: /sys/bus/platform/drivers/panfrost/.../profiling
Date: February 2024
KernelVersion: 6.8.0
Contact: Adrian Larumbe <adrian.larumbe@collabora.com>
Description:
Get/set drm fdinfo's engine and cycles profiling status.
Valid values are:
0: Don't enable fdinfo job profiling sources.
1: Enable fdinfo job profiling sources, this enables both the GPU's
timestamp and cycle counter registers.

View file

@ -41,6 +41,7 @@ properties:
- enum:
- ti,ds90cf364a # For the DS90CF364A FPD-Link LVDS Receiver
- ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
- ti,sn65lvds94 # For the SN65DS94 LVDS serdes
- const: lvds-decoder # Generic LVDS decoders compatible fallback
- enum:
- thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer

View file

@ -19,6 +19,7 @@ properties:
- ampire,am8001280g
- bananapi,lhr050h41
- feixin,k101-im2byl02
- startek,kd050hdfia020
- tdo,tl050hdv35
- wanchanglong,w552946aba
- const: ilitek,ili9881c

View file

@ -19,7 +19,7 @@ description: |
either bilinear interpolation or pixel duplication.
allOf:
- $ref: panel-common.yaml#
- $ref: panel-common-dual.yaml#
properties:
compatible:
@ -59,6 +59,7 @@ required:
- avee-supply
- dvdd-supply
- vddio-supply
- ports
additionalProperties: false

View file

@ -14,9 +14,6 @@ description: |
panels. Support video mode panels from China Star Optoelectronics
Technology (CSOT) and BOE Technology.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
oneOf:
@ -38,7 +35,6 @@ properties:
description: regulator that supplies the I/O voltage
reg: true
ports: true
rotation: true
backlight: true
@ -47,7 +43,26 @@ required:
- reg
- vddio-supply
- reset-gpios
- ports
allOf:
- $ref: panel-common-dual.yaml#
- if:
properties:
compatible:
contains:
enum:
- novatek,nt36523w
then:
properties:
ports:
properties:
port@1: false
else:
properties:
port: false
ports:
required:
- port@1
unevaluatedProperties: false

View file

@ -0,0 +1,47 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/panel-common-dual.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Common Properties for Dual-Link Display Panels
maintainers:
- Thierry Reding <thierry.reding@gmail.com>
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
description:
Properties common for Panel IC supporting dual link panels. Devices might
support also single link.
allOf:
- $ref: panel-common.yaml#
properties:
ports:
$ref: /schemas/graph.yaml#/properties/ports
additionalProperties: false
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: First link
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: Second link
"#address-cells": true
"#size-cells": true
required:
- port@0
# Single-panel setups are still allowed.
oneOf:
- required:
- ports
- required:
- port
additionalProperties: true

View file

@ -50,6 +50,8 @@ properties:
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
# Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel
- samsung,s6e3fa7-ams559nk06
# Samsung s6e3fc2x01 1080x2340 AMOLED panel
- samsung,s6e3fc2x01
# Samsung sofef00 1080x2280 AMOLED panel

View file

@ -91,6 +91,8 @@ properties:
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
# Crystal Clear Technology CMT430B19N00 4.3" 480x272 TFT-LCD panel
- cct,cmt430b19n00
# CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
- cdtech,s043wq26h-ct7
# CDTech(H.K.) Electronics Limited 7" WSVGA (1024x600) TFT LCD Panel
@ -272,6 +274,8 @@ properties:
- osddisplays,osd070t1718-19ts
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2045-53ts
# POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
- powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
- powertip,ph800480t013-idf02
# QiaoDian XianShi Corporation 4"3 TFT LCD panel

View file

@ -23,6 +23,8 @@ properties:
reg: true
backlight: true
width-mm: true
height-mm: true
vddio-supply:
description: VDDIO 1.8V supply

View file

@ -0,0 +1,147 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/gpu/arm,mali-valhall-csf.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ARM Mali Valhall GPU
maintainers:
- Liviu Dudau <liviu.dudau@arm.com>
- Boris Brezillon <boris.brezillon@collabora.com>
properties:
$nodename:
pattern: '^gpu@[a-f0-9]+$'
compatible:
oneOf:
- items:
- enum:
- rockchip,rk3588-mali
- const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable
reg:
maxItems: 1
interrupts:
items:
- description: Job interrupt
- description: MMU interrupt
- description: GPU interrupt
interrupt-names:
items:
- const: job
- const: mmu
- const: gpu
clocks:
minItems: 1
maxItems: 3
clock-names:
minItems: 1
items:
- const: core
- const: coregroup
- const: stacks
mali-supply: true
operating-points-v2: true
opp-table:
type: object
power-domains:
minItems: 1
maxItems: 5
power-domain-names:
minItems: 1
maxItems: 5
sram-supply: true
"#cooling-cells":
const: 2
dynamic-power-coefficient:
$ref: /schemas/types.yaml#/definitions/uint32
description:
A u32 value that represents the running time dynamic
power coefficient in units of uW/MHz/V^2. The
coefficient can either be calculated from power
measurements or derived by analysis.
The dynamic power consumption of the GPU is
proportional to the square of the Voltage (V) and
the clock frequency (f). The coefficient is used to
calculate the dynamic power as below -
Pdyn = dynamic-power-coefficient * V^2 * f
where voltage is in V, frequency is in MHz.
dma-coherent: true
required:
- compatible
- reg
- interrupts
- interrupt-names
- clocks
- mali-supply
additionalProperties: false
allOf:
- if:
properties:
compatible:
contains:
const: rockchip,rk3588-mali
then:
properties:
clocks:
minItems: 3
power-domains:
maxItems: 1
power-domain-names: false
examples:
- |
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/rk3588-power.h>
gpu: gpu@fb000000 {
compatible = "rockchip,rk3588-mali", "arm,mali-valhall-csf";
reg = <0xfb000000 0x200000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "job", "mmu", "gpu";
clock-names = "core", "coregroup", "stacks";
clocks = <&cru CLK_GPU>, <&cru CLK_GPU_COREGROUP>,
<&cru CLK_GPU_STACKS>;
power-domains = <&power RK3588_PD_GPU>;
operating-points-v2 = <&gpu_opp_table>;
mali-supply = <&vdd_gpu_s0>;
sram-supply = <&vdd_gpu_mem_s0>;
gpu_opp_table: opp-table {
compatible = "operating-points-v2";
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <675000 675000 850000>;
};
opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <675000 675000 850000>;
};
};
};
...

View file

@ -256,6 +256,8 @@ patternProperties:
description: Catalyst Semiconductor, Inc.
"^cavium,.*":
description: Cavium, Inc.
"^cct,.*":
description: Crystal Clear Technology Sdn. Bhd.
"^cdns,.*":
description: Cadence Design Systems Inc.
"^cdtech,.*":

View file

@ -18,6 +18,11 @@ VM_BIND / EXEC uAPI
.. kernel-doc:: include/uapi/drm/nouveau_drm.h
drm/panthor uAPI
================
.. kernel-doc:: include/uapi/drm/panthor_drm.h
drm/xe uAPI
===========

View file

@ -38,3 +38,12 @@ the currently possible format options:
Possible `drm-engine-` key names are: `fragment`, and `vertex-tiler`.
`drm-curfreq-` values convey the current operating frequency for that engine.
Users must bear in mind that engine and cycle sampling are disabled by default,
because of power saving concerns. `fdinfo` users and benchmark applications which
query the fdinfo file must make sure to toggle the job profiling status of the
driver by writing into the appropriate sysfs node::
echo <N> > /sys/bus/platform/drivers/panfrost/[a-f0-9]*.gpu/profiling
Where `N` is either `0` or `1`, depending on the desired enablement status.

View file

@ -1671,7 +1671,7 @@ F: drivers/soc/versatile/
ARM KOMEDA DRM-KMS DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,komeda.yaml
F: Documentation/gpu/komeda-kms.rst
F: drivers/gpu/drm/arm/display/include/
@ -1683,15 +1683,26 @@ M: Rob Herring <robh@kernel.org>
R: Steven Price <steven.price@arm.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
ARM MALI PANTHOR DRM DRIVER
M: Boris Brezillon <boris.brezillon@collabora.com>
M: Steven Price <steven.price@arm.com>
M: Liviu Dudau <liviu.dudau@arm.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
F: drivers/gpu/drm/panthor/
F: include/uapi/drm/panthor_drm.h
ARM MALI-DP DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/arm,malidp.yaml
F: Documentation/gpu/afbc.rst
F: drivers/gpu/drm/arm/
@ -6312,7 +6323,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/dma-buf.rst
F: Documentation/userspace-api/dma-buf-alloc-exchange.rst
F: drivers/dma-buf/
@ -6366,7 +6377,7 @@ L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
@ -6575,7 +6586,7 @@ M: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
M: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/accel/ivpu/
F: include/uapi/drm/ivpu_accel.h
@ -6595,18 +6606,18 @@ M: Chen-Yu Tsai <wens@csie.org>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/sun4i/sun8i*
DRM DRIVER FOR ARM PL111 CLCD
S: Orphan
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/pl111/
DRM DRIVER FOR ARM VERSATILE TFT PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
F: drivers/gpu/drm/panel/panel-arm-versatile.c
@ -6614,7 +6625,7 @@ DRM DRIVER FOR ASPEED BMC GFX
M: Joel Stanley <joel@jms.id.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
F: drivers/gpu/drm/aspeed/
@ -6624,14 +6635,14 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/bochs.c
DRM DRIVER FOR BOE HIMAX8279D PANELS
@ -6649,14 +6660,14 @@ F: drivers/gpu/drm/bridge/chipone-icn6211.c
DRM DRIVER FOR EBBG FT8719 PANEL
M: Joel Selvaraj <jo@jsfamily.in>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/ebbg,ft8719.yaml
F: drivers/gpu/drm/panel/panel-ebbg-ft8719.c
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tve200/
DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
@ -6676,7 +6687,7 @@ M: Thomas Zimmermann <tzimmermann@suse.de>
M: Javier Martinez Canillas <javierm@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_aperture.c
F: drivers/gpu/drm/tiny/ofdrm.c
F: drivers/gpu/drm/tiny/simpledrm.c
@ -6695,27 +6706,27 @@ DRM DRIVER FOR GENERIC USB DISPLAY
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/gud/wiki
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gud/
F: include/drm/gud.h
DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
M: Hans de Goede <hdegoede@redhat.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/gm12u320.c
DRM DRIVER FOR HIMAX HX8394 MIPI-DSI LCD panels
M: Ondrej Jirman <megi@xff.cz>
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
F: drivers/gpu/drm/panel/panel-himax-hx8394.c
DRM DRIVER FOR HX8357D PANELS
S: Orphan
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
F: drivers/gpu/drm/tiny/hx8357d.c
@ -6724,20 +6735,20 @@ M: Deepak Rawat <drawat.floss@gmail.com>
L: linux-hyperv@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/hyperv
DRM DRIVER FOR ILITEK ILI9225 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
F: drivers/gpu/drm/tiny/ili9225.c
DRM DRIVER FOR ILITEK ILI9486 PANELS
M: Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
F: drivers/gpu/drm/tiny/ili9486.c
@ -6756,14 +6767,14 @@ F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/logicvc/
DRM DRIVER FOR LVDS PANELS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/lvds.yaml
F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
F: drivers/gpu/drm/panel/panel-lvds.c
@ -6781,13 +6792,13 @@ R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR MI0283QT
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
F: drivers/gpu/drm/tiny/mi0283qt.c
@ -6795,7 +6806,7 @@ DRM DRIVER FOR MIPI DBI compatible panels
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
W: https://github.com/notro/panel-mipi-dbi/wiki
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
F: drivers/gpu/drm/tiny/panel-mipi-dbi.c
@ -6819,28 +6830,28 @@ F: include/uapi/drm/msm_drm.h
DRM DRIVER FOR NOVATEK NT35510 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
DRM DRIVER FOR NOVATEK NT35560 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35560.c
DRM DRIVER FOR NOVATEK NT36523 PANELS
M: Jianhua Lu <lujianhua000@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36523.c
DRM DRIVER FOR NOVATEK NT36672A PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@ -6874,7 +6885,7 @@ F: drivers/gpu/drm/bridge/parade-ps8640.c
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/repaper.txt
F: drivers/gpu/drm/tiny/repaper.c
@ -6884,7 +6895,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
S: Obsolete
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/tiny/cirrus.c
DRM DRIVER FOR QXL VIRTUAL GPU
@ -6893,7 +6904,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux.dev
L: spice-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
@ -6906,7 +6917,7 @@ F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR SAMSUNG DB7430 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
@ -6915,7 +6926,7 @@ M: Inki Dae <inki.dae@samsung.com>
M: Jagan Teki <jagan@amarulasolutions.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/samsung,mipi-dsim.yaml
F: drivers/gpu/drm/bridge/samsung-dsim.c
F: include/drm/bridge/samsung-dsim.h
@ -6935,7 +6946,7 @@ F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
DRM DRIVER FOR SITRONIX ST7586 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7586.txt
F: drivers/gpu/drm/tiny/st7586.c
@ -6956,14 +6967,14 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
DRM DRIVER FOR SITRONIX ST7735R PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
F: drivers/gpu/drm/tiny/st7735r.c
DRM DRIVER FOR SOLOMON SSD130X OLED DISPLAYS
M: Javier Martinez Canillas <javierm@redhat.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/solomon,ssd-common.yaml
F: Documentation/devicetree/bindings/display/solomon,ssd13*.yaml
F: drivers/gpu/drm/solomon/ssd130x*
@ -6971,7 +6982,7 @@ F: drivers/gpu/drm/solomon/ssd130x*
DRM DRIVER FOR ST-ERICSSON MCDE
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ste,mcde.yaml
F: drivers/gpu/drm/mcde/
@ -6995,7 +7006,7 @@ F: drivers/gpu/drm/bridge/ti-sn65dsi86.c
DRM DRIVER FOR TPO TPG110 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
F: drivers/gpu/drm/panel/panel-tpo-tpg110.c
@ -7005,7 +7016,7 @@ R: Sean Paul <sean@poorly.run>
R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
@ -7016,7 +7027,7 @@ R: Haneen Mohammed <hamohammed.sa@gmail.com>
R: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vkms.rst
F: drivers/gpu/drm/vkms/
@ -7024,7 +7035,7 @@ DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
@ -7032,14 +7043,14 @@ M: Zack Rusin <zack.rusin@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/vmwgfx/
F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVER FOR WIDECHIPS WS2401 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
@ -7064,8 +7075,8 @@ M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
M: Maxime Ripard <mripard@kernel.org>
M: Thomas Zimmermann <tzimmermann@suse.de>
S: Maintained
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
T: git git://anongit.freedesktop.org/drm/drm-misc
W: https://drm.pages.freedesktop.org/maintainer-tools/drm-misc.html
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
@ -7092,7 +7103,7 @@ M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/allwinner*
F: drivers/gpu/drm/sun4i/
@ -7102,7 +7113,7 @@ L: dri-devel@lists.freedesktop.org
L: linux-amlogic@lists.infradead.org
S: Supported
W: http://linux-meson.com/
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
F: Documentation/gpu/meson.rst
@ -7114,7 +7125,7 @@ M: Sam Ravnborg <sam@ravnborg.org>
M: Boris Brezillon <bbrezillon@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/atmel/
F: drivers/gpu/drm/atmel-hlcdc/
@ -7126,7 +7137,7 @@ R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
F: drivers/gpu/drm/drm_bridge.c
@ -7151,7 +7162,7 @@ M: Stefan Agner <stefan@agner.ch>
M: Alison Wang <alison.wang@nxp.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
F: drivers/gpu/drm/fsl-dcu/
@ -7160,7 +7171,7 @@ DRM DRIVERS FOR FREESCALE IMX 5/6
M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
T: git git://git.pengutronix.de/git/pza/linux
F: Documentation/devicetree/bindings/display/imx/
F: drivers/gpu/drm/imx/ipuv3/
@ -7180,7 +7191,7 @@ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
@ -7192,7 +7203,7 @@ R: Yongqin Liu <yongqin.liu@linaro.org>
R: John Stultz <jstultz@google.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/hisilicon/
F: drivers/gpu/drm/hisilicon/
@ -7201,7 +7212,7 @@ M: Qiang Yu <yuq825@gmail.com>
L: dri-devel@lists.freedesktop.org
L: lima@lists.freedesktop.org (moderated for non-subscribers)
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/lima/
F: include/uapi/drm/lima_drm.h
@ -7209,7 +7220,7 @@ DRM DRIVERS FOR LOONGSON
M: Sui Jingfeng <suijingfeng@loongson.cn>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/loongson/
DRM DRIVERS FOR MEDIATEK
@ -7257,7 +7268,7 @@ M: Biju Das <biju.das.jz@bp.renesas.com>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
F: drivers/gpu/drm/renesas/rz-du/
@ -7267,7 +7278,7 @@ M: Geert Uytterhoeven <geert+renesas@glider.be>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
F: drivers/gpu/drm/renesas/shmobile/
F: include/linux/platform_data/shmob_drm.h
@ -7278,7 +7289,7 @@ M: Heiko Stübner <heiko@sntech.de>
M: Andy Yan <andy.yan@rock-chips.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/ci/xfails/rockchip*
F: drivers/gpu/drm/rockchip/
@ -7287,7 +7298,7 @@ DRM DRIVERS FOR STI
M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
F: drivers/gpu/drm/sti
@ -7297,7 +7308,7 @@ M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
F: drivers/gpu/drm/stm
@ -7306,7 +7317,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@ -7317,7 +7328,7 @@ M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
@ -7325,7 +7336,7 @@ DRM DRIVERS FOR TI OMAP
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/ti/
F: drivers/gpu/drm/omapdrm/
@ -7333,7 +7344,7 @@ DRM DRIVERS FOR V3D
M: Melissa Wen <mwen@igalia.com>
M: Maíra Canal <mcanal@igalia.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
F: drivers/gpu/drm/v3d/
F: include/uapi/drm/v3d_drm.h
@ -7342,7 +7353,7 @@ DRM DRIVERS FOR VC4
M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
@ -7363,7 +7374,7 @@ M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
L: dri-devel@lists.freedesktop.org
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/xen-front.rst
F: drivers/gpu/drm/xen/
@ -7371,7 +7382,7 @@ DRM DRIVERS FOR XILINX
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
@ -7380,7 +7391,7 @@ M: Luben Tuikov <ltuikov89@gmail.com>
M: Matthew Brost <matthew.brost@intel.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/scheduler/
F: include/drm/gpu_scheduler.h
@ -7390,7 +7401,7 @@ R: Jessica Zhang <quic_jesszhan@quicinc.com>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/
F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
@ -7400,7 +7411,7 @@ DRM PRIVACY-SCREEN CLASS
M: Hans de Goede <hdegoede@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/drm_privacy_screen*
F: include/drm/drm_privacy_screen*
@ -7409,7 +7420,7 @@ M: Christian Koenig <christian.koenig@amd.com>
M: Huang Rui <ray.huang@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
@ -7417,7 +7428,7 @@ DRM AUTOMATED TESTING
M: Helen Koike <helen.koike@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/automated_testing.rst
F: drivers/gpu/drm/ci/
@ -8483,7 +8494,7 @@ F: arch/x86/math-emu/
FRAMEBUFFER CORE
M: Daniel Vetter <daniel@ffwll.ch>
S: Odd Fixes
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/video/fbdev/core/
FRAMEBUFFER LAYER
@ -10590,7 +10601,7 @@ IMGTEC POWERVR DRM DRIVER
M: Frank Binns <frank.binns@imgtec.com>
M: Matt Coster <matt.coster@imgtec.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
@ -11371,7 +11382,7 @@ IOSYS-MAP HELPERS
M: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/iosys-map.h
IO_URING
@ -11564,7 +11575,7 @@ ITE IT66121 HDMI BRIDGE DRIVER
M: Phong LE <ple@baylibre.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
F: drivers/gpu/drm/bridge/ite-it66121.c
@ -15154,7 +15165,7 @@ M: Marek Vasut <marex@denx.de>
M: Stefan Agner <stefan@agner.ch>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/fsl,lcdif.yaml
F: drivers/gpu/drm/mxsfb/
@ -15875,7 +15886,7 @@ M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
R: Lucas Stach <l.stach@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
F: drivers/gpu/drm/imx/dcss/
@ -18179,7 +18190,7 @@ R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/accel/qaic/
F: drivers/accel/qaic/
F: include/uapi/drm/qaic_accel.h
@ -21331,7 +21342,7 @@ R: Gustavo Padovan <gustavo@padovan.org>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/driver-api/sync_file.rst
F: drivers/dma-buf/dma-fence*
F: drivers/dma-buf/sw_sync.c
@ -23113,7 +23124,7 @@ USERSPACE DMA BUFFER DRIVER
M: Gerd Hoffmann <kraxel@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/dma-buf/udmabuf.c
F: include/uapi/linux/udmabuf.h
@ -23295,7 +23306,7 @@ F: drivers/vfio/pci/virtio
VGA_SWITCHEROO
R: Lukas Wunner <lukas@wunner.de>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vga-switcheroo.rst
F: drivers/gpu/vga/vga_switcheroo.c
F: include/linux/vga_switcheroo.h
@ -23488,7 +23499,7 @@ R: Chia-I Wu <olvaffe@gmail.com>
L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux.dev
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/ci/xfails/virtio*
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h

View file

@ -2,6 +2,8 @@
#ifndef __M68K_PGTABLE_H
#define __M68K_PGTABLE_H
#include <asm/page.h>
#ifdef __uClinux__
#include <asm/pgtable_no.h>
#else

View file

@ -131,7 +131,7 @@ CONFIG_PPDEV=m
CONFIG_I2C=y
CONFIG_HWMON=m
CONFIG_DRM=m
CONFIG_DRM_DP_CEC=y
CONFIG_DRM_DISPLAY_DP_AUX_CEC=y
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_SIL164 is not set
CONFIG_DRM_RADEON=m

View file

@ -74,10 +74,12 @@ config DRM_KUNIT_TEST_HELPERS
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT && MMU
depends on DRM
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on KUNIT
depends on MMU
select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
@ -371,6 +373,8 @@ source "drivers/gpu/drm/lima/Kconfig"
source "drivers/gpu/drm/panfrost/Kconfig"
source "drivers/gpu/drm/panthor/Kconfig"
source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
@ -414,3 +418,16 @@ config DRM_LIB_RANDOM
config DRM_PRIVACY_SCREEN
bool
default n
config DRM_WERROR
bool "Compile the drm subsystem with warnings as errors"
depends on DRM && EXPERT
default n
help
A kernel build should not cause any compiler warnings, and this
enables the '-Werror' flag to enforce that rule in the drm subsystem.
The drm subsystem enables more warnings than the kernel default, so
this config option is disabled by default.
If in doubt, say N.

View file

@ -5,6 +5,34 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
# Unconditionally enable W=1 warnings locally
# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
subdir-ccflags-y += $(call cc-option, -Wrestrict)
subdir-ccflags-y += -Wmissing-format-attribute
subdir-ccflags-y += -Wold-style-definition
subdir-ccflags-y += -Wmissing-include-dirs
subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
# FIXME: fix -Wformat-truncation warnings and uncomment
#subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
subdir-ccflags-y += -Wno-missing-field-initializers
subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-shift-negative-value
endif
ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),)
subdir-ccflags-y += -Wno-sign-compare
endif
# --- end copy-paste
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror
drm-y := \
drm_aperture.o \
drm_atomic.o \
@ -179,6 +207,7 @@ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/

View file

@ -2,13 +2,15 @@
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
depends on DRM
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDCP_HELPER
depends on DRM_DISPLAY_HDMI_HELPER
depends on DRM_DISPLAY_HELPER
depends on MMU
depends on PCI
depends on !UML
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM

View file

@ -173,6 +173,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
/*
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}

View file

@ -92,13 +92,12 @@ config DRM_FSL_LDB
config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDCP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_DP_HELPER
select EXTCON
select CRYPTO
select CRYPTO_HASH
@ -226,10 +225,10 @@ config DRM_PARADE_PS8622
config DRM_PARADE_PS8640
tristate "Parade PS8640 MIPI DSI to eDP Converter"
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@ -313,9 +312,9 @@ config DRM_TOSHIBA_TC358764
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
@ -336,9 +335,9 @@ config DRM_TOSHIBA_TC358768
config DRM_TOSHIBA_TC358775
tristate "Toshiba TC358775 DSI/LVDS bridge"
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
@ -381,15 +380,15 @@ config DRM_TI_SN65DSI83
config DRM_TI_SN65DSI86
tristate "TI SN65DSI86 DSI to eDP bridge"
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
select AUXILIARY_BUS
select DRM_DP_AUX_BUS
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver

View file

@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ANALOGIX_ANX6345
tristate "Analogix ANX6345 bridge"
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_ANALOGIX_DP
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
@ -15,9 +15,9 @@ config DRM_ANALOGIX_ANX6345
config DRM_ANALOGIX_ANX78XX
tristate "Analogix ANX78XX bridge"
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
select DRM_ANALOGIX_DP
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
help
@ -33,11 +33,11 @@ config DRM_ANALOGIX_DP
config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDCP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
select DRM_MIPI_DSI
help
ANX7625 is an ultra-low power 4K mobile HD transmitter

View file

@ -23,12 +23,12 @@ endif
config DRM_CDNS_MHDP8546
tristate "Cadence DPI/DP bridge"
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDCP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
depends on OF
help
Support Cadence DPI to DP bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.

View file

@ -5,9 +5,9 @@ config DRM_IMX_LDB_HELPER
config DRM_IMX8MP_DW_HDMI_BRIDGE
tristate "Freescale i.MX8MP HDMI-TX bridge support"
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
depends on DRM_DW_HDMI
depends on OF
select DRM_IMX8MP_HDMI_PVI
select PHY_FSL_SAMSUNG_HDMI_PHY
help

View file

@ -1540,12 +1540,6 @@ static int it66121_probe(struct i2c_client *client)
return -EINVAL;
}
if (!of_device_is_available(ep)) {
of_node_put(ep);
dev_err(ctx->dev, "The remote device is disabled\n");
return -ENODEV;
}
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
if (!ctx->next_bridge) {
@ -1586,13 +1580,18 @@ static int it66121_probe(struct i2c_client *client)
ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (client->irq > 0) {
ctx->bridge.ops |= DRM_BRIDGE_OP_HPD;
ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler,
IRQF_ONESHOT, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
return ret;
ret = devm_request_threaded_irq(dev, client->irq, NULL,
it66121_irq_threaded_handler,
IRQF_ONESHOT, dev_name(dev),
ctx);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
return ret;
}
}
it66121_audio_codec_init(ctx, dev);

View file

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_DW_HDMI
tristate
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_HDMI_HELPER
depends on DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_MMIO
select CEC_CORE if CEC_NOTIFIER

View file

@ -3291,40 +3291,17 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi)
static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
{
struct device_node *endpoint;
struct device_node *remote;
if (!hdmi->plat_data->output_port)
return 0;
endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node,
hdmi->plat_data->output_port,
-1);
if (!endpoint) {
/*
* On platforms whose bindings don't make the output port
* mandatory (such as Rockchip) the plat_data->output_port
* field isn't set, so it's safe to make this a fatal error.
*/
dev_err(hdmi->dev, "Missing endpoint in port@%u\n",
hdmi->plat_data->output_port);
return -ENODEV;
}
remote = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
if (!remote) {
dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n",
hdmi->plat_data->output_port);
remote = of_graph_get_remote_node(hdmi->dev->of_node,
hdmi->plat_data->output_port,
-1);
if (!remote)
return -ENODEV;
}
if (!of_device_is_available(remote)) {
dev_err(hdmi->dev, "port@%u remote device is disabled\n",
hdmi->plat_data->output_port);
of_node_put(remote);
return -ENODEV;
}
hdmi->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);

View file

@ -123,29 +123,14 @@ static int thc63_parse_dt(struct thc63_dev *thc63)
struct device_node *endpoint;
struct device_node *remote;
endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
THC63_RGB_OUT0, -1);
if (!endpoint) {
dev_err(thc63->dev, "Missing endpoint in port@%u\n",
THC63_RGB_OUT0);
return -ENODEV;
}
remote = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
remote = of_graph_get_remote_node(thc63->dev->of_node,
THC63_RGB_OUT0, -1);
if (!remote) {
dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
dev_err(thc63->dev, "No remote endpoint for port@%u\n",
THC63_RGB_OUT0);
return -ENODEV;
}
if (!of_device_is_available(remote)) {
dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
THC63_RGB_OUT0);
of_node_put(remote);
return -ENODEV;
}
thc63->next = of_drm_find_bridge(remote);
of_node_put(remote);
if (!thc63->next)

View file

@ -252,11 +252,11 @@ i915:cml:
i915:tgl:
extends:
- .i915
parallel: 8
parallel: 5
variables:
DEVICE_TYPE: asus-cx9400-volteer
DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
GPU_VERSION: tgl
RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
.amdgpu:
extends:

View file

@ -1,31 +1,57 @@
# SPDX-License-Identifier: MIT
config DRM_DP_AUX_BUS
tristate
depends on DRM
depends on OF || COMPILE_TEST
config DRM_DISPLAY_HELPER
tristate
tristate "DRM Display Helpers"
depends on DRM
default y
help
DRM helpers for display adapters.
config DRM_DISPLAY_DP_HELPER
bool
config DRM_DISPLAY_DP_AUX_BUS
tristate "DRM DisplayPort AUX bus support"
depends on DRM
depends on OF || COMPILE_TEST
default y
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
depends on DRM
depends on DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_DP_HELPER
select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
Note: not all adapters support this feature, and even for those
that do support this they often do not hook up the CEC pin.
config DRM_DISPLAY_DP_AUX_CHARDEV
bool "DRM DisplayPort AUX Interface"
depends on DRM
depends on DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_DP_HELPER
help
Choose this option to enable a /dev/drm_dp_auxN node that allows to
read and write values to arbitrary DPCD registers on the DP aux
channel.
config DRM_DISPLAY_DP_HELPER
bool "DRM DisplayPort Helpers"
depends on DRM_DISPLAY_HELPER
default y
help
DRM display helpers for DisplayPort.
config DRM_DISPLAY_DP_TUNNEL
bool
select DRM_DISPLAY_DP_HELPER
bool "DRM DisplayPort tunnels support"
depends on DRM_DISPLAY_DP_HELPER
help
Enable support for DisplayPort tunnels. This allows drivers to use
DP tunnel features like the Bandwidth Allocation mode to maximize the
BW utilization for display streams on Thunderbolt links.
config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
bool "Enable debugging the DP tunnel state"
depends on REF_TRACKER
depends on DRM_DISPLAY_DP_TUNNEL
@ -39,34 +65,15 @@ config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
If in doubt, say "N".
config DRM_DISPLAY_HDCP_HELPER
bool
bool "DRM HDCD Helpers"
depends on DRM_DISPLAY_HELPER
default y
help
DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER
bool
bool "DRM HDMI Helpers"
depends on DRM_DISPLAY_HELPER
default y
help
DRM display helpers for HDMI.
config DRM_DP_AUX_CHARDEV
bool "DRM DP AUX Interface"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
help
Choose this option to enable a /dev/drm_dp_auxN node that allows to
read and write values to arbitrary DPCD registers on the DP aux
channel.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
Note: not all adapters support this feature, and even for those
that do support this they often do not hook up the CEC pin.

View file

@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
@ -14,7 +14,7 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o

View file

@ -2113,7 +2113,7 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
* call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
* Functions which don't follow this will likely Oops when
* %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
* %CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV is enabled.
*
* For devices where the AUX channel is a device that exists independently of
* the &drm_device that uses it, such as SoCs and bridge devices, it is

View file

@ -5,7 +5,7 @@
struct drm_dp_aux;
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);

View file

@ -10,7 +10,9 @@
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
#include <drm/display/drm_dp_mst_helper.h>
struct drm_dp_sideband_msg_req_body;
struct drm_dp_sideband_msg_tx;
struct drm_printer;
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,

View file

@ -191,7 +191,7 @@ struct drm_dp_tunnel_mgr {
struct drm_dp_tunnel_group *groups;
wait_queue_head_t bw_req_queue;
#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
struct ref_tracker_dir ref_tracker;
#endif
};
@ -385,7 +385,7 @@ static void tunnel_put(struct drm_dp_tunnel *tunnel)
kref_put(&tunnel->kref, free_tunnel);
}
#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
struct ref_tracker **tracker)
{
@ -1603,7 +1603,7 @@ static void cleanup_group(struct drm_dp_tunnel_group *group)
drm_atomic_private_obj_fini(&group->base);
}
#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
{
const struct drm_dp_tunnel_state *tunnel_state;
@ -1881,7 +1881,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
}
#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_exit(&mgr->ref_tracker);
#endif
@ -1918,7 +1918,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
return NULL;
}
#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
#endif

View file

@ -657,6 +657,13 @@ static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
* bridge will be called before the previous one to reverse the @pre_enable
* calling direction.
*
* Example:
* Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
*
* With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
* @post_disable order would be,
* Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@ -687,11 +694,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
*/
list_for_each_entry_from(next, &encoder->bridge_chain,
chain_node) {
if (next->pre_enable_prev_first) {
if (!next->pre_enable_prev_first) {
next = list_prev_entry(next, chain_node);
limit = next;
break;
}
if (list_is_last(&next->chain_node,
&encoder->bridge_chain)) {
limit = next;
break;
}
}
/* Call these bridges in reverse order */
@ -747,6 +760,13 @@ static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
* If a bridge sets @pre_enable_prev_first, then the pre_enable for the
* prev bridge will be called before pre_enable of this bridge.
*
* Example:
* Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
*
* With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
* @pre_enable order would be,
* Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
@ -774,7 +794,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
/* Found first bridge that does NOT
* request prev to be enabled first
*/
limit = list_prev_entry(next, chain_node);
limit = next;
break;
}
}

View file

@ -304,6 +304,66 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
return ERR_PTR(ret);
}
/**
* drm_client_buffer_vmap_local - Map DRM client buffer into address space
* @buffer: DRM client buffer
* @map_copy: Returns the mapped memory's address
*
* This function maps a client buffer into kernel address space. If the
* buffer is already mapped, it returns the existing mapping's address.
*
* Client buffer mappings are not ref'counted. Each call to
* drm_client_buffer_vmap_local() should be closely followed by a call to
* drm_client_buffer_vunmap_local(). See drm_client_buffer_vmap() for
* long-term mappings.
*
* The returned address is a copy of the internal value. In contrast to
* other vmap interfaces, you don't need it for the client's vunmap
* function. So you can modify it at will during blit and draw operations.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
int ret;
drm_gem_lock(gem);
ret = drm_gem_vmap(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
return 0;
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
return 0;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
/**
* drm_client_buffer_vunmap_local - Unmap DRM client buffer
* @buffer: DRM client buffer
*
* This function removes a client buffer's memory mapping established
* with drm_client_buffer_vunmap_local(). Calling this function is only
* required by clients that manage their buffer mappings by themselves.
*/
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
drm_gem_vunmap(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
/**
* drm_client_buffer_vmap - Map DRM client buffer into address space
* @buffer: DRM client buffer
@ -328,24 +388,30 @@ int
drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
int ret;
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
* backend-agnostic dma-buf vmap support instead. This would
* require that the handle2fd prime ioctl is reworked to pull the
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
ret = drm_gem_vmap_unlocked(buffer->gem, map);
drm_gem_lock(gem);
ret = drm_gem_pin_locked(gem);
if (ret)
return ret;
goto err_drm_gem_pin_locked;
ret = drm_gem_vmap(gem, map);
if (ret)
goto err_drm_gem_vmap;
drm_gem_unlock(gem);
*map_copy = *map;
return 0;
err_drm_gem_vmap:
drm_gem_unpin_locked(buffer->gem);
err_drm_gem_pin_locked:
drm_gem_unlock(gem);
return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@ -359,9 +425,13 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
drm_gem_vunmap_unlocked(buffer->gem, map);
drm_gem_lock(gem);
drm_gem_vunmap(gem, map);
drm_gem_unpin_locked(gem);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);

View file

@ -26,10 +26,15 @@
* implementation details and are not exported to drivers.
*/
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#ifndef __DRM_CRTC_HELPER_INTERNAL_H__
#define __DRM_CRTC_HELPER_INTERNAL_H__
enum drm_mode_status;
struct drm_connector;
struct drm_crtc;
struct drm_display_mode;
struct drm_encoder;
struct drm_modeset_acquire_ctx;
/* drm_probe_helper.c */
enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
@ -44,3 +49,5 @@ drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *
drm_connector_get_single_encoder(struct drm_connector *connector);
#endif /* __DRM_CRTC_HELPER_INTERNAL_H__ */

View file

@ -32,6 +32,10 @@
* and are not exported to drivers.
*/
#ifndef __DRM_CRTC_INTERNAL_H__
#define __DRM_CRTC_INTERNAL_H__
#include <linux/err.h>
#include <linux/types.h>
enum drm_color_encoding;
@ -54,6 +58,7 @@ struct drm_mode_object;
struct drm_mode_set;
struct drm_plane;
struct drm_plane_state;
struct drm_printer;
struct drm_property;
struct edid;
struct fwnode_handle;
@ -303,3 +308,5 @@ drm_edid_load_firmware(struct drm_connector *connector)
return ERR_PTR(-ENOENT);
}
#endif
#endif /* __DRM_CRTC_INTERNAL_H__ */

View file

@ -102,6 +102,11 @@ struct detailed_mode_closure {
int modes;
};
struct drm_edid_match_closure {
const struct drm_edid_ident *ident;
bool matched;
};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
#define LEVEL_GTF2 2
@ -109,13 +114,15 @@ struct detailed_mode_closure {
#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
{ \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
.ident = { \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, \
vend_chr_2, product_id), \
}, \
.quirks = _quirks \
}
static const struct edid_quirk {
u32 panel_id;
const struct drm_edid_ident ident;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
@ -2749,8 +2756,27 @@ const struct drm_edid *drm_edid_read(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_edid_read);
static u32 edid_extract_panel_id(const struct edid *edid)
/**
* drm_edid_get_panel_id - Get a panel's ID from EDID
* @drm_edid: EDID that contains panel ID.
*
* This function uses the first block of the EDID of a panel and (assuming
* that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
* (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
* supposed to be different for each different modem of panel.
*
* Return: A 32-bit ID that should be different for each make/model of panel.
* See the functions drm_edid_encode_panel_id() and
* drm_edid_decode_panel_id() for some details on the structure of this
* ID. Return 0 if the EDID size is less than a base block.
*/
u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid)
{
const struct edid *edid = drm_edid->edid;
if (drm_edid->size < EDID_LENGTH)
return 0;
/*
* We represent the ID as a 32-bit number so it can easily be compared
* with "==".
@ -2768,60 +2794,54 @@ static u32 edid_extract_panel_id(const struct edid *edid)
(u32)edid->mfg_id[1] << 16 |
(u32)EDID_PRODUCT_ID(edid);
}
EXPORT_SYMBOL(drm_edid_get_panel_id);
/**
* drm_edid_get_panel_id - Get a panel's ID through DDC
* drm_edid_read_base_block - Get a panel's EDID base block
* @adapter: I2C adapter to use for DDC
*
* This function reads the first block of the EDID of a panel and (assuming
* that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
* (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
* supposed to be different for each different modem of panel.
* This function returns the drm_edid containing the first block of the EDID of
* a panel.
*
* This function is intended to be used during early probing on devices where
* more than one panel might be present. Because of its intended use it must
* assume that the EDID of the panel is correct, at least as far as the ID
* is concerned (in other words, we don't process any overrides here).
* assume that the EDID of the panel is correct, at least as far as the base
* block is concerned (in other words, we don't process any overrides here).
*
* Caller should call drm_edid_free() after use.
*
* NOTE: it's expected that this function and drm_do_get_edid() will both
* be read the EDID, but there is no caching between them. Since we're only
* reading the first block, hopefully this extra overhead won't be too big.
*
* Return: A 32-bit ID that should be different for each make/model of panel.
* See the functions drm_edid_encode_panel_id() and
* drm_edid_decode_panel_id() for some details on the structure of this
* ID.
* WARNING: Only use this function when the connector is unknown. For example,
* during the early probe of panel. The EDID read from the function is temporary
* and should be replaced by the full EDID returned from other drm_edid_read.
*
* Return: Pointer to allocated EDID base block, or NULL on any failure.
*/
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter)
{
enum edid_block_status status;
void *base_block;
u32 panel_id = 0;
/*
* There are no manufacturer IDs of 0, so if there is a problem reading
* the EDID then we'll just return 0.
*/
base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block)
return 0;
return NULL;
status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
edid_block_status_print(status, base_block, 0);
if (edid_block_status_valid(status, edid_block_tag(base_block)))
panel_id = edid_extract_panel_id(base_block);
else
if (!edid_block_status_valid(status, edid_block_tag(base_block))) {
edid_block_dump(KERN_NOTICE, base_block, 0);
kfree(base_block);
return NULL;
}
kfree(base_block);
return panel_id;
return _drm_edid_alloc(base_block, EDID_LENGTH);
}
EXPORT_SYMBOL(drm_edid_get_panel_id);
EXPORT_SYMBOL(drm_edid_read_base_block);
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
@ -2903,16 +2923,17 @@ EXPORT_SYMBOL(drm_edid_duplicate);
* @drm_edid: EDID to process
*
* This tells subsequent routines what fixes they need to apply.
*
* Return: A u32 represents the quirks to apply.
*/
static u32 edid_get_quirks(const struct drm_edid *drm_edid)
{
u32 panel_id = edid_extract_panel_id(drm_edid->edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (quirk->panel_id == panel_id)
if (drm_edid_match(drm_edid, &quirk->ident))
return quirk->quirks;
}
@ -5442,6 +5463,66 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
connector->audio_latency[0], connector->audio_latency[1]);
}
static void
match_identity(const struct detailed_timing *timing, void *data)
{
struct drm_edid_match_closure *closure = data;
unsigned int i;
const char *name = closure->ident->name;
unsigned int name_len = strlen(name);
const char *desc = timing->data.other_data.data.str.str;
unsigned int desc_len = ARRAY_SIZE(timing->data.other_data.data.str.str);
if (name_len > desc_len ||
!(is_display_descriptor(timing, EDID_DETAIL_MONITOR_NAME) ||
is_display_descriptor(timing, EDID_DETAIL_MONITOR_STRING)))
return;
if (strncmp(name, desc, name_len))
return;
for (i = name_len; i < desc_len; i++) {
if (desc[i] == '\n')
break;
/* Allow white space before EDID string terminator. */
if (!isspace(desc[i]))
return;
}
closure->matched = true;
}
/**
* drm_edid_match - match drm_edid with given identity
* @drm_edid: EDID
* @ident: the EDID identity to match with
*
* Check if the EDID matches with the given identity.
*
* Return: True if the given identity matched with EDID, false otherwise.
*/
bool drm_edid_match(const struct drm_edid *drm_edid,
const struct drm_edid_ident *ident)
{
if (!drm_edid || drm_edid_get_panel_id(drm_edid) != ident->panel_id)
return false;
/* Match with name only if it's not NULL. */
if (ident->name) {
struct drm_edid_match_closure closure = {
.ident = ident,
.matched = false,
};
drm_for_each_detailed_block(drm_edid, match_identity, &closure);
return closure.matched;
}
return true;
}
EXPORT_SYMBOL(drm_edid_match);
static void
monitor_name(const struct detailed_timing *timing, void *data)
{

View file

@ -197,14 +197,14 @@ static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
*/
mutex_lock(&fb_helper->lock);
ret = drm_client_buffer_vmap(buffer, &map);
ret = drm_client_buffer_vmap_local(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
drm_client_buffer_vunmap_local(buffer);
out:
mutex_unlock(&fb_helper->lock);

View file

@ -1161,7 +1161,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
int drm_gem_pin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->pin)
return obj->funcs->pin(obj);
@ -1169,12 +1169,30 @@ int drm_gem_pin(struct drm_gem_object *obj)
return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
void drm_gem_unpin_locked(struct drm_gem_object *obj)
{
if (obj->funcs->unpin)
obj->funcs->unpin(obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
{
int ret;
dma_resv_lock(obj->resv, NULL);
ret = drm_gem_pin_locked(obj);
dma_resv_unlock(obj->resv);
return ret;
}
void drm_gem_unpin(struct drm_gem_object *obj)
{
dma_resv_lock(obj->resv, NULL);
drm_gem_unpin_locked(obj);
dma_resv_unlock(obj->resv);
}
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@ -1209,6 +1227,18 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
void drm_gem_lock(struct drm_gem_object *obj)
{
dma_resv_lock(obj->resv, NULL);
}
EXPORT_SYMBOL(drm_gem_lock);
void drm_gem_unlock(struct drm_gem_object *obj)
{
dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_unlock);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;

View file

@ -10,7 +10,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@ -228,7 +227,7 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
int ret;
@ -238,13 +237,15 @@ static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
drm_gem_shmem_put_pages(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object

View file

@ -282,6 +282,8 @@ static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx ctx = { false, false };
int ret;
dma_resv_assert_held(gbo->bo.base.resv);
if (gbo->bo.pin_count)
goto out;
@ -337,6 +339,8 @@ EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
dma_resv_assert_held(gbo->bo.base.resv);
ttm_bo_unpin(&gbo->bo);
}
@ -363,54 +367,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
int ret;
if (gbo->vmap_use_count > 0)
goto out;
/*
* VRAM helpers unmap the BO only on demand. So the previous
* page mapping might still be around. Only vmap if the there's
* no mapping present.
*/
if (iosys_map_is_null(&gbo->map)) {
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
if (ret)
return ret;
}
out:
++gbo->vmap_use_count;
*map = gbo->map;
return 0;
}
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
struct drm_device *dev = gbo->bo.base.dev;
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map)))
return; /* BUG: map not mapped from this BO */
if (--gbo->vmap_use_count > 0)
return;
/*
* Permanently mapping and unmapping buffers adds overhead from
* updating the page tables and creates debugging output. Therefore,
* we delay the actual unmap operation until the BO gets evicted
* from memory. See drm_gem_vram_bo_driver_move_notify().
*/
}
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
* space
@ -433,18 +389,25 @@ int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map)
dma_resv_assert_held(gbo->bo.base.resv);
ret = drm_gem_vram_pin_locked(gbo, 0);
if (ret)
return ret;
ret = drm_gem_vram_kmap_locked(gbo, map);
if (ret)
goto err_drm_gem_vram_unpin_locked;
if (gbo->vmap_use_count > 0)
goto out;
/*
* VRAM helpers unmap the BO only on demand. So the previous
* page mapping might still be around. Only vmap if the there's
* no mapping present.
*/
if (iosys_map_is_null(&gbo->map)) {
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
if (ret)
return ret;
}
out:
++gbo->vmap_use_count;
*map = gbo->map;
return 0;
err_drm_gem_vram_unpin_locked:
drm_gem_vram_unpin_locked(gbo);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_vmap);
@ -459,10 +422,25 @@ EXPORT_SYMBOL(drm_gem_vram_vmap);
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
struct iosys_map *map)
{
struct drm_device *dev = gbo->bo.base.dev;
dma_resv_assert_held(gbo->bo.base.resv);
drm_gem_vram_kunmap_locked(gbo, map);
drm_gem_vram_unpin_locked(gbo);
if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count))
return;
if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map)))
return; /* BUG: map not mapped from this BO */
if (--gbo->vmap_use_count > 0)
return;
/*
* Permanently mapping and unmapping buffers adds overhead from
* updating the page tables and creates debugging output. Therefore,
* we delay the actual unmap operation until the BO gets evicted
* from memory. See drm_gem_vram_bo_driver_move_notify().
*/
}
EXPORT_SYMBOL(drm_gem_vram_vunmap);
@ -768,7 +746,8 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
/* Fbdev console emulation is the use case of these PRIME
/*
* Fbdev console emulation is the use case of these PRIME
* helpers. This may involve updating a hardware buffer from
* a shadow FB. We pin the buffer to it's current location
* (either video RAM or system memory) to prevent it from
@ -776,7 +755,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
* the buffer to be pinned to VRAM, implement a callback that
* sets the flags accordingly.
*/
return drm_gem_vram_pin(gbo, 0);
return drm_gem_vram_pin_locked(gbo, 0);
}
/**
@ -787,7 +766,7 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_unpin(gbo);
drm_gem_vram_unpin_locked(gbo);
}
/**

View file

@ -21,6 +21,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_INTERNAL_H__
#define __DRM_INTERNAL_H__
#include <linux/kthread.h>
#include <linux/types.h>
@ -170,6 +173,8 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
int drm_gem_pin_locked(struct drm_gem_object *obj);
void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
@ -276,3 +281,5 @@ void drm_framebuffer_debugfs_init(struct drm_device *dev);
/* drm_edid.c */
void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
#endif /* __DRM_INTERNAL_H__ */

View file

@ -4,7 +4,6 @@ config DRM_EXYNOS
depends on OF && DRM && COMMON_CLK
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on MMU
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
@ -68,8 +67,9 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "Exynos specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
select DRM_ANALOGIX_DP
select DRM_DISPLAY_DP_HELPER
default DRM_EXYNOS
select DRM_PANEL
help

View file

@ -11,8 +11,6 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <asm/intel-mid.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>

View file

@ -2,6 +2,10 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDCP_HELPER
depends on DRM_DISPLAY_HDMI_HELPER
depends on DRM_DISPLAY_HELPER
depends on X86 && PCI
depends on !PREEMPT_RT
select INTEL_GTT if X86
@ -10,10 +14,6 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI

View file

@ -27,8 +27,8 @@ config DRM_I915_DEBUG
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
select DRM_DP_AUX_CHARDEV
select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
select DRM_DISPLAY_DP_AUX_CHARDEV
select DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y

View file

@ -46,7 +46,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
if (!mips_data)
return -ENOMEM;
for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!mips_data->pt_pages[page_nr]) {
err = -ENOMEM;
@ -102,7 +102,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
int page_nr;
vunmap(mips_data->pt);
for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);

View file

@ -35,7 +35,8 @@ config DRM_IMX_LDB
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
depends on DRM_IMX && OF
depends on DRM_DW_HDMI
depends on DRM_IMX
depends on OF
help
Choose this if you want to use HDMI on i.MX6.

View file

@ -27,8 +27,8 @@ config DRM_INGENIC_IPU
config DRM_INGENIC_DW_HDMI
tristate "Ingenic specific support for Synopsys DW HDMI"
depends on DRM_DW_HDMI
depends on MACH_JZ4780
select DRM_DW_HDMI
help
Choose this option to enable Synopsys DesignWare HDMI based driver.
If you want to enable HDMI on Ingenic JZ4780 based SoC, you should

View file

@ -19,33 +19,24 @@ static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret))
return ret;
dma_resv_assert_held(obj->resv);
ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0))
lbo->sharing_count++;
lsdc_bo_unreserve(lbo);
return ret;
}
static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
{
struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
int ret;
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret))
return;
dma_resv_assert_held(obj->resv);
lsdc_bo_unpin(lbo);
if (lbo->sharing_count)
lbo->sharing_count--;
lsdc_bo_unreserve(lbo);
}
static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)

View file

@ -22,11 +22,11 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on DRM_MEDIATEK
select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DP_AUX_BUS
help
DRM/KMS Display Port driver for MediaTek SoCs.

View file

@ -13,9 +13,9 @@ config DRM_MESON
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
depends on DRM_DW_HDMI
depends on DRM_MESON
default y if DRM_MESON
select DRM_DW_HDMI
imply DRM_DW_HDMI_I2S_AUDIO
config DRM_MESON_DW_MIPI_DSI

View file

@ -2,9 +2,12 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on DRM
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on IOMMU_SUPPORT
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
@ -14,9 +17,6 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL

View file

@ -219,7 +219,7 @@ static void put_pages(struct drm_gem_object *obj)
}
}
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@ -257,24 +257,24 @@ static void pin_obj_locked(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct page **p;
msm_gem_lock(obj);
p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
msm_gem_assert_locked(obj);
p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (!IS_ERR(p))
pin_obj_locked(obj);
msm_gem_unlock(obj);
return p;
}
void msm_gem_unpin_pages(struct drm_gem_object *obj)
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_assert_locked(obj);
msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
@ -489,7 +489,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
msm_gem_assert_locked(obj);
pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
@ -703,7 +703,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (obj->import_attach)
return ERR_PTR(-ENODEV);
pages = msm_gem_pin_pages_locked(obj, madv);
pages = msm_gem_get_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);

View file

@ -140,8 +140,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
void msm_gem_unpin_pages(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,

View file

@ -47,13 +47,23 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_pin_pages(obj);
return 0;
struct page **pages;
int ret = 0;
if (obj->import_attach)
return 0;
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
ret = PTR_ERR(pages);
return ret;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_unpin_pages(obj);
if (obj->import_attach)
return;
msm_gem_unpin_pages_locked(obj);
}

View file

@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
if (ret)
return ret;
if (pm_runtime_suspended(dev))
return 0;
return lcdif_rpm_suspend(dev);
}
@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
lcdif_rpm_resume(dev);
if (!pm_runtime_suspended(dev))
lcdif_rpm_resume(dev);
return drm_mode_config_helper_resume(drm);
}

View file

@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI && MMU
depends on DRM
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HDMI_HELPER
depends on DRM_DISPLAY_HELPER
depends on PCI
depends on MMU
select IOMMU_API
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER

View file

@ -312,11 +312,21 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
case NOUVEAU_FIFO_ENGINE_GR:
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
break;
case NOUVEAU_FIFO_ENGINE_VP:
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC;
break;
case NOUVEAU_FIFO_ENGINE_PPP:
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP;
break;
case NOUVEAU_FIFO_ENGINE_BSP:
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD;
break;
case NOUVEAU_FIFO_ENGINE_CE:
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE;
break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}

View file

@ -50,18 +50,6 @@ struct drm_nouveau_grobj_alloc {
int class;
};
struct drm_nouveau_notifierobj_alloc {
uint32_t channel;
uint32_t handle;
uint32_t size;
uint32_t offset;
};
struct drm_nouveau_gpuobj_free {
int channel;
uint32_t handle;
};
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;

View file

@ -467,17 +467,14 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
set_placement_range(nvbo, domain);
}
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
bool force = false, evict = false;
int ret;
int ret = 0;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
dma_resv_assert_held(bo->base.resv);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
@ -540,20 +537,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
out:
if (force && ret)
nvbo->contig = false;
ttm_bo_unreserve(bo);
return ret;
}
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
dma_resv_assert_held(bo->base.resv);
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
@ -568,8 +560,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
break;
}
}
}
int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
ret = nouveau_bo_pin_locked(nvbo, domain, contig);
ttm_bo_unreserve(bo);
return ret;
}
int nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
nouveau_bo_unpin_locked(nvbo);
ttm_bo_unreserve(bo);
return 0;
}

View file

@ -85,6 +85,8 @@ int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig);
void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo);
int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
int nouveau_bo_map(struct nouveau_bo *);

View file

@ -89,18 +89,18 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
return -EINVAL;
ret = -EINVAL;
return 0;
return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
nouveau_bo_unpin(nvbo);
nouveau_bo_unpin_locked(nvbo);
}
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,

View file

@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret) {
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return PTR_ERR(ctrl);
return ret;
}
memcpy(data, ctrl->data, size);

View file

@ -4,7 +4,7 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
default n

View file

@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
omap_gem_roll(bo, fbi->var.yoffset * npages);
}
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area)
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
@ -78,11 +82,9 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct drm_framebuffer *fb = helper->fb;
struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
return fb_deferred_io_mmap(info, vma);
}
static void omap_fbdev_fb_destroy(struct fb_info *info)
@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
DBG();
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(helper);
omap_gem_unpin(bo);
@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
kfree(fbdev);
}
/*
* For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
* because we use write-combine.
*/
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
__FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
__FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
.fb_ioctl = drm_fb_helper_ioctl,
.fb_mmap = omap_fbdev_fb_mmap,
.fb_destroy = omap_fbdev_fb_destroy,
@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = bo->size;
/* deferred I/O */
helper->fbdefio.delay = HZ / 20;
helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
fbi->fbdefio = &helper->fbdefio;
ret = fb_deferred_io_init(fbi);
if (ret)
goto fail;
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
@ -238,8 +254,20 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
return ret;
}
static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
{
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
if (helper->fb->funcs->dirty)
return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
return 0;
}
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
.fb_dirty = omap_fbdev_dirty,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)

View file

@ -533,11 +533,11 @@ config DRM_PANEL_RAYDIUM_RM68200
config DRM_PANEL_RAYDIUM_RM692E5
tristate "Raydium RM692E5-based DSI panel"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on DRM_MIPI_DSI
depends on OF
help
Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone.
@ -559,12 +559,12 @@ config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
config DRM_PANEL_SAMSUNG_ATNA33XC20
tristate "Samsung ATNA33XC20 eDP panel"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
depends on PM
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
help
DRM panel driver for the Samsung ATNA33XC20 panel. This panel can't
be handled by the DRM_PANEL_SIMPLE driver because its power
@ -586,6 +586,15 @@ config DRM_PANEL_SAMSUNG_LD9040
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
config DRM_PANEL_SAMSUNG_S6E3FA7
tristate "Samsung S6E3FA7 panel driver"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for the Samsung S6E3FA7
1920x2220 panel.
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
@ -790,13 +799,13 @@ config DRM_PANEL_STARTEK_KD070FHFID015
config DRM_PANEL_EDP
tristate "support for simple Embedded DisplayPort panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on DRM_DISPLAY_DP_AUX_BUS
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on OF
depends on PM
select VIDEOMODE_HELPERS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
help
DRM panel driver for dumb eDP panels that need at most a regulator and
@ -870,11 +879,11 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
depends on DRM_DISPLAY_DP_HELPER
depends on DRM_DISPLAY_HELPER
depends on DRM_MIPI_DSI
depends on OF
help
Say Y here if you want to enable support for Visionox
R66451 1080x2340 AMOLED DSI panel.

View file

@ -62,6 +62,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o

View file

@ -210,15 +210,12 @@ struct panel_desc {
* struct edp_panel_entry - Maps panel ID to delay / panel name.
*/
struct edp_panel_entry {
/** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
u32 panel_id;
/** @ident: edid identity used for panel matching. */
const struct drm_edid_ident ident;
/** @delay: The power sequencing delays needed for this panel. */
const struct panel_delay *delay;
/** @name: Name of this panel (for printing to logs). */
const char *name;
/** @override_edid_mode: Override the mode obtained by edid. */
const struct drm_display_mode *override_edid_mode;
};
@ -691,7 +688,7 @@ static int detected_panel_show(struct seq_file *s, void *data)
else if (!p->detected_panel)
seq_puts(s, "HARDCODED\n");
else
seq_printf(s, "%s\n", p->detected_panel->name);
seq_printf(s, "%s\n", p->detected_panel->ident.name);
return 0;
}
@ -761,11 +758,12 @@ static void panel_edp_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid);
static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
{
struct panel_desc *desc;
const struct drm_edid *base_block;
u32 panel_id;
char vend[4];
u16 product_id;
@ -795,15 +793,19 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
goto exit;
}
panel_id = drm_edid_get_panel_id(panel->ddc);
if (!panel_id) {
base_block = drm_edid_read_base_block(panel->ddc);
if (base_block) {
panel_id = drm_edid_get_panel_id(base_block);
} else {
dev_err(dev, "Couldn't identify panel via EDID\n");
ret = -EIO;
goto exit;
}
drm_edid_decode_panel_id(panel_id, vend, &product_id);
panel->detected_panel = find_edp_panel(panel_id);
panel->detected_panel = find_edp_panel(panel_id, base_block);
drm_edid_free(base_block);
/*
* We're using non-optimized timings and want it really obvious that
@ -836,7 +838,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
panel->detected_panel = ERR_PTR(-EINVAL);
} else {
dev_info(dev, "Detected %s %s (%#06x)\n",
vend, panel->detected_panel->name, product_id);
vend, panel->detected_panel->ident.name, product_id);
/* Update the delay; everything else comes from EDID */
desc->delay = *panel->detected_panel->delay;
@ -1005,6 +1007,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
static const struct drm_display_mode auo_b116xa3_mode = {
.clock = 70589,
.hdisplay = 1366,
.hsync_start = 1366 + 40,
.hsync_end = 1366 + 40 + 40,
.htotal = 1366 + 40 + 40 + 32,
.vdisplay = 768,
.vsync_start = 768 + 10,
.vsync_end = 768 + 10 + 12,
.vtotal = 768 + 10 + 12 + 6,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@ -1865,6 +1880,13 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
static const struct panel_delay delay_200_500_e50_p2e200 = {
.hpd_absent = 200,
.unprepare = 500,
.enable = 50,
.prepare_to_enable = 200,
};
static const struct panel_delay delay_200_500_e80 = {
.hpd_absent = 200,
.unprepare = 500,
@ -1919,17 +1941,21 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
.ident = { \
.name = _name, \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
}, \
.delay = _delay \
}
#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
{ \
.name = _name, \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
.ident = { \
.name = _name, \
.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
product_id), \
}, \
.delay = _delay, \
.override_edid_mode = _mode \
}
@ -1953,7 +1979,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
&auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
@ -1961,6 +1989,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0607, &delay_200_500_e200, "Unknown"),
@ -2010,6 +2039,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@ -2025,6 +2055,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@ -2034,7 +2065,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
@ -2076,15 +2107,25 @@ static const struct edp_panel_entry edp_panels[] = {
{ /* sentinal */ }
};
static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
static const struct edp_panel_entry *find_edp_panel(u32 panel_id, const struct drm_edid *edid)
{
const struct edp_panel_entry *panel;
if (!panel_id)
return NULL;
for (panel = edp_panels; panel->panel_id; panel++)
if (panel->panel_id == panel_id)
/*
* Match with identity first. This allows handling the case where
* vendors incorrectly reused the same panel ID for multiple panels that
* need different settings. If there's no match, try again with panel
* ID, which should be unique.
*/
for (panel = edp_panels; panel->ident.panel_id; panel++)
if (drm_edid_match(edid, &panel->ident))
return panel;
for (panel = edp_panels; panel->ident.panel_id; panel++)
if (panel->ident.panel_id == panel_id)
return panel;
return NULL;

View file

@ -455,6 +455,202 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
};
static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
ILI9881C_COMMAND_INSTR(0x02, 0x00),
ILI9881C_COMMAND_INSTR(0x03, 0x72),
ILI9881C_COMMAND_INSTR(0x04, 0x00),
ILI9881C_COMMAND_INSTR(0x05, 0x00),
ILI9881C_COMMAND_INSTR(0x06, 0x09),
ILI9881C_COMMAND_INSTR(0x07, 0x00),
ILI9881C_COMMAND_INSTR(0x08, 0x00),
ILI9881C_COMMAND_INSTR(0x09, 0x01),
ILI9881C_COMMAND_INSTR(0x0a, 0x00),
ILI9881C_COMMAND_INSTR(0x0b, 0x00),
ILI9881C_COMMAND_INSTR(0x0c, 0x01),
ILI9881C_COMMAND_INSTR(0x0d, 0x00),
ILI9881C_COMMAND_INSTR(0x0e, 0x00),
ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
ILI9881C_COMMAND_INSTR(0x12, 0x00),
ILI9881C_COMMAND_INSTR(0x13, 0x00),
ILI9881C_COMMAND_INSTR(0x14, 0x00),
ILI9881C_COMMAND_INSTR(0x15, 0x00),
ILI9881C_COMMAND_INSTR(0x16, 0x00),
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x00),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
ILI9881C_COMMAND_INSTR(0x1a, 0x00),
ILI9881C_COMMAND_INSTR(0x1b, 0x00),
ILI9881C_COMMAND_INSTR(0x1c, 0x00),
ILI9881C_COMMAND_INSTR(0x1d, 0x00),
ILI9881C_COMMAND_INSTR(0x1e, 0x40),
ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x05),
ILI9881C_COMMAND_INSTR(0x20, 0x05),
ILI9881C_COMMAND_INSTR(0x21, 0x02),
ILI9881C_COMMAND_INSTR(0x22, 0x00),
ILI9881C_COMMAND_INSTR(0x23, 0x00),
ILI9881C_COMMAND_INSTR(0x24, 0x00),
ILI9881C_COMMAND_INSTR(0x25, 0x00),
ILI9881C_COMMAND_INSTR(0x26, 0x00),
ILI9881C_COMMAND_INSTR(0x27, 0x00),
ILI9881C_COMMAND_INSTR(0x28, 0x33),
ILI9881C_COMMAND_INSTR(0x29, 0x02),
ILI9881C_COMMAND_INSTR(0x2a, 0x00),
ILI9881C_COMMAND_INSTR(0x2b, 0x00),
ILI9881C_COMMAND_INSTR(0x2c, 0x00),
ILI9881C_COMMAND_INSTR(0x2d, 0x00),
ILI9881C_COMMAND_INSTR(0x2e, 0x00),
ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x32, 0x00),
ILI9881C_COMMAND_INSTR(0x32, 0x00),
ILI9881C_COMMAND_INSTR(0x33, 0x00),
ILI9881C_COMMAND_INSTR(0x34, 0x04),
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
ILI9881C_COMMAND_INSTR(0x38, 0x3C),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x40),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
ILI9881C_COMMAND_INSTR(0x3c, 0x00),
ILI9881C_COMMAND_INSTR(0x3d, 0x00),
ILI9881C_COMMAND_INSTR(0x3e, 0x00),
ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x00),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
ILI9881C_COMMAND_INSTR(0x43, 0x00),
ILI9881C_COMMAND_INSTR(0x44, 0x00),
ILI9881C_COMMAND_INSTR(0x50, 0x01),
ILI9881C_COMMAND_INSTR(0x51, 0x23),
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
ILI9881C_COMMAND_INSTR(0x5a, 0x89),
ILI9881C_COMMAND_INSTR(0x5b, 0xab),
ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
ILI9881C_COMMAND_INSTR(0x5d, 0xef),
ILI9881C_COMMAND_INSTR(0x5e, 0x11),
ILI9881C_COMMAND_INSTR(0x5f, 0x01),
ILI9881C_COMMAND_INSTR(0x60, 0x00),
ILI9881C_COMMAND_INSTR(0x61, 0x15),
ILI9881C_COMMAND_INSTR(0x62, 0x14),
ILI9881C_COMMAND_INSTR(0x63, 0x0E),
ILI9881C_COMMAND_INSTR(0x64, 0x0F),
ILI9881C_COMMAND_INSTR(0x65, 0x0C),
ILI9881C_COMMAND_INSTR(0x66, 0x0D),
ILI9881C_COMMAND_INSTR(0x67, 0x06),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x07),
ILI9881C_COMMAND_INSTR(0x6a, 0x02),
ILI9881C_COMMAND_INSTR(0x6b, 0x02),
ILI9881C_COMMAND_INSTR(0x6c, 0x02),
ILI9881C_COMMAND_INSTR(0x6d, 0x02),
ILI9881C_COMMAND_INSTR(0x6e, 0x02),
ILI9881C_COMMAND_INSTR(0x6f, 0x02),
ILI9881C_COMMAND_INSTR(0x70, 0x02),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
ILI9881C_COMMAND_INSTR(0x72, 0x02),
ILI9881C_COMMAND_INSTR(0x73, 0x02),
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x01),
ILI9881C_COMMAND_INSTR(0x76, 0x00),
ILI9881C_COMMAND_INSTR(0x77, 0x14),
ILI9881C_COMMAND_INSTR(0x78, 0x15),
ILI9881C_COMMAND_INSTR(0x79, 0x0E),
ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
ILI9881C_COMMAND_INSTR(0x7b, 0x0C),
ILI9881C_COMMAND_INSTR(0x7c, 0x0D),
ILI9881C_COMMAND_INSTR(0x7d, 0x06),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x07),
ILI9881C_COMMAND_INSTR(0x80, 0x02),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
ILI9881C_COMMAND_INSTR(0x83, 0x02),
ILI9881C_COMMAND_INSTR(0x84, 0x02),
ILI9881C_COMMAND_INSTR(0x85, 0x02),
ILI9881C_COMMAND_INSTR(0x86, 0x02),
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
ILI9881C_COMMAND_INSTR(0x8A, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(0x4),
ILI9881C_COMMAND_INSTR(0x6C, 0x15),
ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
ILI9881C_COMMAND_INSTR(0x6F, 0x33),
ILI9881C_COMMAND_INSTR(0x3A, 0x94),
ILI9881C_COMMAND_INSTR(0x8D, 0x15),
ILI9881C_COMMAND_INSTR(0x87, 0xBA),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
ILI9881C_COMMAND_INSTR(0xB5, 0x06),
ILI9881C_SWITCH_PAGE_INSTR(0x1),
ILI9881C_COMMAND_INSTR(0x22, 0x0A),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x90),
ILI9881C_COMMAND_INSTR(0x55, 0xA2),
ILI9881C_COMMAND_INSTR(0x50, 0xB7),
ILI9881C_COMMAND_INSTR(0x51, 0xB7),
ILI9881C_COMMAND_INSTR(0x60, 0x22),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
ILI9881C_COMMAND_INSTR(0x62, 0x19),
ILI9881C_COMMAND_INSTR(0x63, 0x10),
ILI9881C_COMMAND_INSTR(0xA0, 0x08),
ILI9881C_COMMAND_INSTR(0xA1, 0x1A),
ILI9881C_COMMAND_INSTR(0xA2, 0x27),
ILI9881C_COMMAND_INSTR(0xA3, 0x15),
ILI9881C_COMMAND_INSTR(0xA4, 0x17),
ILI9881C_COMMAND_INSTR(0xA5, 0x2A),
ILI9881C_COMMAND_INSTR(0xA6, 0x1E),
ILI9881C_COMMAND_INSTR(0xA7, 0x1F),
ILI9881C_COMMAND_INSTR(0xA8, 0x8B),
ILI9881C_COMMAND_INSTR(0xA9, 0x1B),
ILI9881C_COMMAND_INSTR(0xAA, 0x27),
ILI9881C_COMMAND_INSTR(0xAB, 0x78),
ILI9881C_COMMAND_INSTR(0xAC, 0x18),
ILI9881C_COMMAND_INSTR(0xAD, 0x18),
ILI9881C_COMMAND_INSTR(0xAE, 0x4C),
ILI9881C_COMMAND_INSTR(0xAF, 0x21),
ILI9881C_COMMAND_INSTR(0xB0, 0x27),
ILI9881C_COMMAND_INSTR(0xB1, 0x54),
ILI9881C_COMMAND_INSTR(0xB2, 0x67),
ILI9881C_COMMAND_INSTR(0xB3, 0x39),
ILI9881C_COMMAND_INSTR(0xC0, 0x08),
ILI9881C_COMMAND_INSTR(0xC1, 0x1A),
ILI9881C_COMMAND_INSTR(0xC2, 0x27),
ILI9881C_COMMAND_INSTR(0xC3, 0x15),
ILI9881C_COMMAND_INSTR(0xC4, 0x17),
ILI9881C_COMMAND_INSTR(0xC5, 0x2A),
ILI9881C_COMMAND_INSTR(0xC6, 0x1E),
ILI9881C_COMMAND_INSTR(0xC7, 0x1F),
ILI9881C_COMMAND_INSTR(0xC8, 0x8B),
ILI9881C_COMMAND_INSTR(0xC9, 0x1B),
ILI9881C_COMMAND_INSTR(0xCA, 0x27),
ILI9881C_COMMAND_INSTR(0xCB, 0x78),
ILI9881C_COMMAND_INSTR(0xCC, 0x18),
ILI9881C_COMMAND_INSTR(0xCD, 0x18),
ILI9881C_COMMAND_INSTR(0xCE, 0x4C),
ILI9881C_COMMAND_INSTR(0xCF, 0x21),
ILI9881C_COMMAND_INSTR(0xD0, 0x27),
ILI9881C_COMMAND_INSTR(0xD1, 0x54),
ILI9881C_COMMAND_INSTR(0xD2, 0x67),
ILI9881C_COMMAND_INSTR(0xD3, 0x39),
ILI9881C_SWITCH_PAGE_INSTR(0),
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x3A, 0x7),
};
static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@ -1080,10 +1276,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
msleep(5);
/* And reset it */
gpiod_set_value(ctx->reset, 1);
gpiod_set_value_cansleep(ctx->reset, 1);
msleep(20);
gpiod_set_value(ctx->reset, 0);
gpiod_set_value_cansleep(ctx->reset, 0);
msleep(20);
for (i = 0; i < ctx->desc->init_length; i++) {
@ -1138,7 +1334,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
regulator_disable(ctx->power);
gpiod_set_value(ctx->reset, 1);
gpiod_set_value_cansleep(ctx->reset, 1);
return 0;
}
@ -1177,6 +1373,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.height_mm = 217,
};
static const struct drm_display_mode kd050hdfia020_default_mode = {
.clock = 62000,
.hdisplay = 720,
.hsync_start = 720 + 10,
.hsync_end = 720 + 10 + 20,
.htotal = 720 + 10 + 20 + 30,
.vdisplay = 1280,
.vsync_start = 1280 + 10,
.vsync_end = 1280 + 10 + 10,
.vtotal = 1280 + 10 + 10 + 20,
.width_mm = 62,
.height_mm = 110,
};
static const struct drm_display_mode tl050hdv35_default_mode = {
.clock = 59400,
@ -1345,6 +1558,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
};
static const struct ili9881c_desc kd050hdfia020_desc = {
.init = kd050hdfia020_init,
.init_length = ARRAY_SIZE(kd050hdfia020_init),
.mode = &kd050hdfia020_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
};
static const struct ili9881c_desc tl050hdv35_desc = {
.init = tl050hdv35_init,
.init_length = ARRAY_SIZE(tl050hdv35_init),
@ -1372,6 +1593,7 @@ static const struct ili9881c_desc am8001280g_desc = {
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },

View file

@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
mipi_dsi_dcs_write_seq(dsi, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0x29);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)

View file

@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
if (hpd_asserted < 0)
ret = hpd_asserted;
if (ret)
if (ret) {
dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
return ret;
}
if (p->aux->wait_hpd_asserted) {
goto error;
}
} else if (p->aux->wait_hpd_asserted) {
ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
if (ret)
if (ret) {
dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
return ret;
goto error;
}
}
/*
@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
* right times.
*/
return 0;
error:
drm_dp_dpcd_set_powered(p->aux, false);
regulator_disable(p->supply);
return ret;
}
static int atana33xc20_disable(struct drm_panel *panel)

View file

@ -0,0 +1,285 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Samsung S6E3FA7 panel.
*
* Copyright (c) 2022-2024, The Linux Foundation. All rights reserved.
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
struct s6e3fa7_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
};
static inline struct s6e3fa7_panel *to_s6e3fa7_panel(struct drm_panel *panel)
{
return container_of(panel, struct s6e3fa7_panel, panel);
}
static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(1000, 2000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
usleep_range(10000, 11000);
}
static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
msleep(120);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
dev_err(dev, "Failed to set tear on: %d\n", ret);
return ret;
}
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
mipi_dsi_dcs_write_seq(dsi, 0xf4,
0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0,
0x00, 0xb4, 0x37, 0x70, 0x79, 0x69);
mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display on: %d\n", ret);
return ret;
}
return 0;
}
static int s6e3fa7_panel_prepare(struct drm_panel *panel)
{
struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
struct device *dev = &ctx->dsi->dev;
int ret;
s6e3fa7_panel_reset(ctx);
ret = s6e3fa7_panel_on(ctx);
if (ret < 0) {
dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
return 0;
}
static int s6e3fa7_panel_unprepare(struct drm_panel *panel)
{
struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return 0;
}
static int s6e3fa7_panel_disable(struct drm_panel *panel)
{
struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
struct device *dev = &dsi->dev;
int ret;
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
return ret;
}
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
return ret;
}
msleep(120);
return 0;
}
static const struct drm_display_mode s6e3fa7_panel_mode = {
.clock = (1080 + 32 + 32 + 78) * (2220 + 32 + 4 + 78) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 32,
.hsync_end = 1080 + 32 + 32,
.htotal = 1080 + 32 + 32 + 78,
.vdisplay = 2220,
.vsync_start = 2220 + 32,
.vsync_end = 2220 + 32 + 4,
.vtotal = 2220 + 32 + 4 + 78,
.width_mm = 62,
.height_mm = 127,
};
static int s6e3fa7_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, &s6e3fa7_panel_mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6e3fa7_panel_funcs = {
.prepare = s6e3fa7_panel_prepare,
.unprepare = s6e3fa7_panel_unprepare,
.disable = s6e3fa7_panel_disable,
.get_modes = s6e3fa7_panel_get_modes,
};
static int s6e3fa7_panel_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
int ret;
ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (ret < 0)
return ret;
return 0;
}
static int s6e3fa7_panel_bl_get_brightness(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness;
int ret;
ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
if (ret < 0)
return ret;
return brightness;
}
static const struct backlight_ops s6e3fa7_panel_bl_ops = {
.update_status = s6e3fa7_panel_bl_update_status,
.get_brightness = s6e3fa7_panel_bl_get_brightness,
};
static struct backlight_device *
s6e3fa7_panel_create_backlight(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_RAW,
.brightness = 1023,
.max_brightness = 1023,
};
return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
&s6e3fa7_panel_bl_ops, &props);
}
static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct s6e3fa7_panel *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset-gpios\n");
ctx->dsi = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
"Failed to create backlight\n");
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
return 0;
}
static void s6e3fa7_panel_remove(struct mipi_dsi_device *dsi)
{
struct s6e3fa7_panel *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
}
static const struct of_device_id s6e3fa7_panel_of_match[] = {
{ .compatible = "samsung,s6e3fa7-ams559nk06" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, s6e3fa7_panel_of_match);
static struct mipi_dsi_driver s6e3fa7_panel_driver = {
.probe = s6e3fa7_panel_probe,
.remove = s6e3fa7_panel_remove,
.driver = {
.name = "panel-samsung-s6e3fa7",
.of_match_table = s6e3fa7_panel_of_match,
},
};
module_mipi_dsi_driver(s6e3fa7_panel_driver);
MODULE_AUTHOR("Richard Acayan <mailingradian@gmail.com>");
MODULE_DESCRIPTION("DRM driver for Samsung S6E3FA7 command mode DSI panel");
MODULE_LICENSE("GPL");

View file

@ -1457,6 +1457,32 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing cct_cmt430b19n00_timing = {
.pixelclock = { 8000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
.hfront_porch = { 2, 8, 75 },
.hback_porch = { 3, 43, 43 },
.hsync_len = { 2, 4, 75 },
.vactive = { 272, 272, 272 },
.vfront_porch = { 2, 8, 37 },
.vback_porch = { 2, 12, 12 },
.vsync_len = { 2, 4, 37 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW
};
static const struct panel_desc cct_cmt430b19n00 = {
.timings = &cct_cmt430b19n00_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 95,
.height = 53,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@ -3465,6 +3491,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
.hsync_start = 1280 + 12,
.hsync_end = 1280 + 12 + 20,
.htotal = 1280 + 12 + 20 + 56,
.vdisplay = 800,
.vsync_start = 800 + 1,
.vsync_end = 800 + 1 + 3,
.vtotal = 800 + 1 + 3 + 20,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct panel_desc powertip_ph128800t006_zhc01 = {
.modes = &powertip_ph128800t006_zhc01_mode,
.num_modes = 1,
.bpc = 8,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.clock = 24750,
.hdisplay = 800,
@ -4402,6 +4454,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
.compatible = "cct,cmt430b19n00",
.data = &cct_cmt430b19n00,
}, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
@ -4639,6 +4694,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,

View file

@ -12,6 +12,4 @@ panfrost-y := \
panfrost_perfcnt.o \
panfrost_dump.o
panfrost-$(CONFIG_DEBUG_FS) += panfrost_debugfs.o
obj-$(CONFIG_DRM_PANFROST) += panfrost.o

View file

@ -1,21 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2023 Collabora ltd. */
/* Copyright 2023 Amazon.com, Inc. or its affiliates. */
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/panfrost_drm.h>
#include "panfrost_device.h"
#include "panfrost_gpu.h"
#include "panfrost_debugfs.h"
void panfrost_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev->dev));
debugfs_create_atomic_t("profile", 0600, minor->debugfs_root, &pfdev->profile_mode);
}

View file

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2023 Collabora ltd.
* Copyright 2023 Amazon.com, Inc. or its affiliates.
*/
#ifndef PANFROST_DEBUGFS_H
#define PANFROST_DEBUGFS_H
#ifdef CONFIG_DEBUG_FS
void panfrost_debugfs_init(struct drm_minor *minor);
#endif
#endif /* PANFROST_DEBUGFS_H */

View file

@ -130,7 +130,7 @@ struct panfrost_device {
struct list_head scheduled_jobs;
struct panfrost_perfcnt *perfcnt;
atomic_t profile_mode;
bool profile_mode;
struct mutex sched_lock;

View file

@ -20,7 +20,6 @@
#include "panfrost_job.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
#include "panfrost_debugfs.h"
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
@ -551,10 +550,12 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS);
for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
drm_printf(p, "drm-engine-%s:\t%llu ns\n",
engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
drm_printf(p, "drm-cycles-%s:\t%llu\n",
engine_names[i], panfrost_priv->engine_usage.cycles[i]);
if (pfdev->profile_mode) {
drm_printf(p, "drm-engine-%s:\t%llu ns\n",
engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
drm_printf(p, "drm-cycles-%s:\t%llu\n",
engine_names[i], panfrost_priv->engine_usage.cycles[i]);
}
drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
engine_names[i], pfdev->pfdevfreq.fast_rate);
drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
@ -600,10 +601,6 @@ static const struct drm_driver panfrost_drm_driver = {
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = panfrost_debugfs_init,
#endif
};
static int panfrost_probe(struct platform_device *pdev)
@ -692,6 +689,40 @@ static void panfrost_remove(struct platform_device *pdev)
drm_dev_put(ddev);
}
static ssize_t profiling_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", pfdev->profile_mode);
}
static ssize_t profiling_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
bool value;
int err;
err = kstrtobool(buf, &value);
if (err)
return err;
pfdev->profile_mode = value;
return len;
}
static DEVICE_ATTR_RW(profiling);
static struct attribute *panfrost_attrs[] = {
&dev_attr_profiling.attr,
NULL,
};
ATTRIBUTE_GROUPS(panfrost);
/*
* The OPP core wants the supply names to be NULL terminated, but we need the
* correct num_supplies value for regulator core. Hence, we NULL terminate here
@ -789,6 +820,7 @@ static struct platform_driver panfrost_driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
.dev_groups = panfrost_groups,
},
};
module_platform_driver(panfrost_driver);

View file

@ -243,7 +243,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
if (atomic_read(&pfdev->profile_mode)) {
if (pfdev->profile_mode) {
panfrost_cycle_counter_get(pfdev);
job->is_profiled = true;
job->start_time = ktime_get();

View file

@ -0,0 +1,23 @@
# SPDX-License-Identifier: GPL-2.0 or MIT
config DRM_PANTHOR
tristate "Panthor (DRM support for ARM Mali CSF-based GPUs)"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on MMU
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DRM_EXEC
select DRM_GEM_SHMEM_HELPER
select DRM_GPUVM
select DRM_SCHED
select IOMMU_IO_PGTABLE_LPAE
select IOMMU_SUPPORT
select PM_DEVFREQ
help
DRM driver for ARM Mali CSF-based GPUs.
This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
be supported with the panfrost driver as they are not CSF GPUs.

View file

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0 or MIT
panthor-y := \
panthor_devfreq.o \
panthor_device.o \
panthor_drv.o \
panthor_fw.o \
panthor_gem.o \
panthor_gpu.o \
panthor_heap.o \
panthor_mmu.o \
panthor_sched.o
obj-$(CONFIG_DRM_PANTHOR) += panthor.o

View file

@ -0,0 +1,283 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2019 Collabora ltd. */
#include <linux/clk.h>
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <drm/drm_managed.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
/**
* struct panthor_devfreq - Device frequency management
*/
struct panthor_devfreq {
/** @devfreq: devfreq device. */
struct devfreq *devfreq;
/** @gov_data: Governor data. */
struct devfreq_simple_ondemand_data gov_data;
/** @busy_time: Busy time. */
ktime_t busy_time;
/** @idle_time: Idle time. */
ktime_t idle_time;
/** @time_last_update: Last update time. */
ktime_t time_last_update;
/** @last_busy_state: True if the GPU was busy last time we updated the state. */
bool last_busy_state;
/**
* @lock: Lock used to protect busy_time, idle_time, time_last_update and
* last_busy_state.
*
* These fields can be accessed concurrently by panthor_devfreq_get_dev_status()
* and panthor_devfreq_record_{busy,idle}().
*/
spinlock_t lock;
};
static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
{
ktime_t now, last;
now = ktime_get();
last = pdevfreq->time_last_update;
if (pdevfreq->last_busy_state)
pdevfreq->busy_time += ktime_sub(now, last);
else
pdevfreq->idle_time += ktime_sub(now, last);
pdevfreq->time_last_update = now;
}
static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct dev_pm_opp *opp;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
return dev_pm_opp_set_rate(dev, *freq);
}
static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
{
pdevfreq->busy_time = 0;
pdevfreq->idle_time = 0;
pdevfreq->time_last_update = ktime_get();
}
static int panthor_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
unsigned long irqflags;
status->current_frequency = clk_get_rate(ptdev->clks.core);
spin_lock_irqsave(&pdevfreq->lock, irqflags);
panthor_devfreq_update_utilization(pdevfreq);
status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
pdevfreq->idle_time));
status->busy_time = ktime_to_ns(pdevfreq->busy_time);
panthor_devfreq_reset(pdevfreq);
spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
drm_dbg(&ptdev->base, "busy %lu total %lu %lu %% freq %lu MHz\n",
status->busy_time, status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
return 0;
}
static struct devfreq_dev_profile panthor_devfreq_profile = {
.timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panthor_devfreq_target,
.get_dev_status = panthor_devfreq_get_dev_status,
};
int panthor_devfreq_init(struct panthor_device *ptdev)
{
/* There's actually 2 regulators (mali and sram), but the OPP core only
* supports one.
*
* We assume the sram regulator is coupled with the mali one and let
* the coupling logic deal with voltage updates.
*/
static const char * const reg_names[] = { "mali", NULL };
struct thermal_cooling_device *cooling;
struct device *dev = ptdev->base.dev;
struct panthor_devfreq *pdevfreq;
struct dev_pm_opp *opp;
unsigned long cur_freq;
int ret;
pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
if (!pdevfreq)
return -ENOMEM;
ptdev->devfreq = pdevfreq;
ret = devm_pm_opp_set_regulators(dev, reg_names);
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
return ret;
}
ret = devm_pm_opp_of_add_table(dev);
if (ret)
return ret;
spin_lock_init(&pdevfreq->lock);
panthor_devfreq_reset(pdevfreq);
cur_freq = clk_get_rate(ptdev->clks.core);
opp = devfreq_recommended_opp(dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
panthor_devfreq_profile.initial_freq = cur_freq;
/* Regulator coupling only takes care of synchronizing/balancing voltage
* updates, but the coupled regulator needs to be enabled manually.
*
* We use devm_regulator_get_enable_optional() and keep the sram supply
* enabled until the device is removed, just like we do for the mali
* supply, which is enabled when dev_pm_opp_set_opp(dev, opp) is called,
* and disabled when the opp_table is torn down, using the devm action.
*
* If we really care about disabling regulators on suspend, we should:
* - use devm_regulator_get_optional() here
* - call dev_pm_opp_set_opp(dev, NULL) before leaving this function
* (this disables the regulator passed to the OPP layer)
* - call dev_pm_opp_set_opp(dev, NULL) and
* regulator_disable(ptdev->regulators.sram) in
* panthor_devfreq_suspend()
* - call dev_pm_opp_set_opp(dev, default_opp) and
* regulator_enable(ptdev->regulators.sram) in
* panthor_devfreq_resume()
*
* But without knowing if it's beneficial or not (in term of power
* consumption), or how much it slows down the suspend/resume steps,
* let's just keep regulators enabled for the device lifetime.
*/
ret = devm_regulator_get_enable_optional(dev, "sram");
if (ret && ret != -ENODEV) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Couldn't retrieve/enable sram supply\n");
return ret;
}
/*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()
*/
ret = dev_pm_opp_set_opp(dev, opp);
if (ret) {
DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
return ret;
}
dev_pm_opp_put(opp);
/*
* Setup default thresholds for the simple_ondemand governor.
* The values are chosen based on experiments.
*/
pdevfreq->gov_data.upthreshold = 45;
pdevfreq->gov_data.downdifferential = 5;
pdevfreq->devfreq = devm_devfreq_add_device(dev, &panthor_devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&pdevfreq->gov_data);
if (IS_ERR(pdevfreq->devfreq)) {
DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(pdevfreq->devfreq);
pdevfreq->devfreq = NULL;
return ret;
}
cooling = devfreq_cooling_em_register(pdevfreq->devfreq, NULL);
if (IS_ERR(cooling))
DRM_DEV_INFO(dev, "Failed to register cooling device\n");
return 0;
}
int panthor_devfreq_resume(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
return 0;
panthor_devfreq_reset(pdevfreq);
return devfreq_resume_device(pdevfreq->devfreq);
}
int panthor_devfreq_suspend(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
return 0;
return devfreq_suspend_device(pdevfreq->devfreq);
}
void panthor_devfreq_record_busy(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
unsigned long irqflags;
if (!pdevfreq->devfreq)
return;
spin_lock_irqsave(&pdevfreq->lock, irqflags);
panthor_devfreq_update_utilization(pdevfreq);
pdevfreq->last_busy_state = true;
spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
}
void panthor_devfreq_record_idle(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
unsigned long irqflags;
if (!pdevfreq->devfreq)
return;
spin_lock_irqsave(&pdevfreq->lock, irqflags);
panthor_devfreq_update_utilization(pdevfreq);
pdevfreq->last_busy_state = false;
spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
}

View file

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2019 Collabora ltd. */
#ifndef __PANTHOR_DEVFREQ_H__
#define __PANTHOR_DEVFREQ_H__
struct devfreq;
struct thermal_cooling_device;
struct panthor_device;
struct panthor_devfreq;
int panthor_devfreq_init(struct panthor_device *ptdev);
int panthor_devfreq_resume(struct panthor_device *ptdev);
int panthor_devfreq_suspend(struct panthor_device *ptdev);
void panthor_devfreq_record_busy(struct panthor_device *ptdev);
void panthor_devfreq_record_idle(struct panthor_device *ptdev);
#endif /* __PANTHOR_DEVFREQ_H__ */

View file

@ -0,0 +1,561 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
static int panthor_clk_init(struct panthor_device *ptdev)
{
ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
if (IS_ERR(ptdev->clks.core))
return dev_err_probe(ptdev->base.dev,
PTR_ERR(ptdev->clks.core),
"get 'core' clock failed");
ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
if (IS_ERR(ptdev->clks.stacks))
return dev_err_probe(ptdev->base.dev,
PTR_ERR(ptdev->clks.stacks),
"get 'stacks' clock failed");
ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
if (IS_ERR(ptdev->clks.coregroup))
return dev_err_probe(ptdev->base.dev,
PTR_ERR(ptdev->clks.coregroup),
"get 'coregroup' clock failed");
drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
return 0;
}
void panthor_device_unplug(struct panthor_device *ptdev)
{
/* This function can be called from two different path: the reset work
* and the platform device remove callback. drm_dev_unplug() doesn't
* deal with concurrent callers, so we have to protect drm_dev_unplug()
* calls with our own lock, and bail out if the device is already
* unplugged.
*/
mutex_lock(&ptdev->unplug.lock);
if (drm_dev_is_unplugged(&ptdev->base)) {
/* Someone beat us, release the lock and wait for the unplug
* operation to be reported as done.
**/
mutex_unlock(&ptdev->unplug.lock);
wait_for_completion(&ptdev->unplug.done);
return;
}
/* Call drm_dev_unplug() so any access to HW blocks happening after
* that point get rejected.
*/
drm_dev_unplug(&ptdev->base);
/* We do the rest of the unplug with the unplug lock released,
* future callers will wait on ptdev->unplug.done anyway.
*/
mutex_unlock(&ptdev->unplug.lock);
drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
/* Now, try to cleanly shutdown the GPU before the device resources
* get reclaimed.
*/
panthor_sched_unplug(ptdev);
panthor_fw_unplug(ptdev);
panthor_mmu_unplug(ptdev);
panthor_gpu_unplug(ptdev);
pm_runtime_dont_use_autosuspend(ptdev->base.dev);
pm_runtime_put_sync_suspend(ptdev->base.dev);
/* If PM is disabled, we need to call the suspend handler manually. */
if (!IS_ENABLED(CONFIG_PM))
panthor_device_suspend(ptdev->base.dev);
/* Report the unplug operation as done to unblock concurrent
* panthor_device_unplug() callers.
*/
complete_all(&ptdev->unplug.done);
}
static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
cancel_work_sync(&ptdev->reset.work);
destroy_workqueue(ptdev->reset.wq);
}
static void panthor_device_reset_work(struct work_struct *work)
{
struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
int ret = 0, cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
/*
* No need for a reset as the device has been (or will be)
* powered down
*/
atomic_set(&ptdev->reset.pending, 0);
return;
}
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
panthor_sched_pre_reset(ptdev);
panthor_fw_pre_reset(ptdev, true);
panthor_mmu_pre_reset(ptdev);
panthor_gpu_soft_reset(ptdev);
panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
if (ret)
goto out_dev_exit;
atomic_set(&ptdev->reset.pending, 0);
panthor_sched_post_reset(ptdev);
out_dev_exit:
drm_dev_exit(cookie);
if (ret) {
panthor_device_unplug(ptdev);
drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
}
}
static bool panthor_device_is_initialized(struct panthor_device *ptdev)
{
return !!ptdev->scheduler;
}
static void panthor_device_free_page(struct drm_device *ddev, void *data)
{
__free_page(data);
}
int panthor_device_init(struct panthor_device *ptdev)
{
u32 *dummy_page_virt;
struct resource *res;
struct page *p;
int ret;
ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
return ret;
ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
if (ret)
return ret;
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
ptdev->pm.dummy_latest_flush = p;
dummy_page_virt = page_address(p);
ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
ptdev->pm.dummy_latest_flush);
if (ret)
return ret;
/*
* Set the dummy page holding the latest flush to 1. This will cause the
* flush to avoided as we know it isn't necessary if the submission
* happens while the dummy page is mapped. Zero cannot be used because
* that means 'always flush'.
*/
*dummy_page_virt = 1;
INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
if (!ptdev->reset.wq)
return -ENOMEM;
ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
if (ret)
return ret;
ret = panthor_clk_init(ptdev);
if (ret)
return ret;
ret = panthor_devfreq_init(ptdev);
if (ret)
return ret;
ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
0, &res);
if (IS_ERR(ptdev->iomem))
return PTR_ERR(ptdev->iomem);
ptdev->phys_addr = res->start;
ret = devm_pm_runtime_enable(ptdev->base.dev);
if (ret)
return ret;
ret = pm_runtime_resume_and_get(ptdev->base.dev);
if (ret)
return ret;
/* If PM is disabled, we need to call panthor_device_resume() manually. */
if (!IS_ENABLED(CONFIG_PM)) {
ret = panthor_device_resume(ptdev->base.dev);
if (ret)
return ret;
}
ret = panthor_gpu_init(ptdev);
if (ret)
goto err_rpm_put;
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
ret = panthor_fw_init(ptdev);
if (ret)
goto err_unplug_mmu;
ret = panthor_sched_init(ptdev);
if (ret)
goto err_unplug_fw;
/* ~3 frames */
pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
pm_runtime_use_autosuspend(ptdev->base.dev);
ret = drm_dev_register(&ptdev->base, 0);
if (ret)
goto err_disable_autosuspend;
pm_runtime_put_autosuspend(ptdev->base.dev);
return 0;
err_disable_autosuspend:
pm_runtime_dont_use_autosuspend(ptdev->base.dev);
panthor_sched_unplug(ptdev);
err_unplug_fw:
panthor_fw_unplug(ptdev);
err_unplug_mmu:
panthor_mmu_unplug(ptdev);
err_unplug_gpu:
panthor_gpu_unplug(ptdev);
err_rpm_put:
pm_runtime_put_sync_suspend(ptdev->base.dev);
return ret;
}
#define PANTHOR_EXCEPTION(id) \
[DRM_PANTHOR_EXCEPTION_ ## id] = { \
.name = #id, \
}
struct panthor_exception_info {
const char *name;
};
static const struct panthor_exception_info panthor_exception_infos[] = {
PANTHOR_EXCEPTION(OK),
PANTHOR_EXCEPTION(TERMINATED),
PANTHOR_EXCEPTION(KABOOM),
PANTHOR_EXCEPTION(EUREKA),
PANTHOR_EXCEPTION(ACTIVE),
PANTHOR_EXCEPTION(CS_RES_TERM),
PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
PANTHOR_EXCEPTION(CS_BUS_FAULT),
PANTHOR_EXCEPTION(CS_INSTR_INVALID),
PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
PANTHOR_EXCEPTION(INSTR_INVALID_PC),
PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
PANTHOR_EXCEPTION(IMPRECISE_FAULT),
PANTHOR_EXCEPTION(OOM),
PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
PANTHOR_EXCEPTION(GPU_BUS_FAULT),
PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
PANTHOR_EXCEPTION(PERM_FAULT_0),
PANTHOR_EXCEPTION(PERM_FAULT_1),
PANTHOR_EXCEPTION(PERM_FAULT_2),
PANTHOR_EXCEPTION(PERM_FAULT_3),
PANTHOR_EXCEPTION(ACCESS_FLAG_1),
PANTHOR_EXCEPTION(ACCESS_FLAG_2),
PANTHOR_EXCEPTION(ACCESS_FLAG_3),
PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
};
const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
{
if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
!panthor_exception_infos[exception_code].name)
return "Unknown exception type";
return panthor_exception_infos[exception_code].name;
}
static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct panthor_device *ptdev = vma->vm_private_data;
u64 id = (u64)vma->vm_pgoff << PAGE_SHIFT;
unsigned long pfn;
pgprot_t pgprot;
vm_fault_t ret;
bool active;
int cookie;
if (!drm_dev_enter(&ptdev->base, &cookie))
return VM_FAULT_SIGBUS;
mutex_lock(&ptdev->pm.mmio_lock);
active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
switch (panthor_device_mmio_offset(id)) {
case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
if (active)
pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
else
pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
break;
default:
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
pgprot = vma->vm_page_prot;
if (active)
pgprot = pgprot_noncached(pgprot);
ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
out_unlock:
mutex_unlock(&ptdev->pm.mmio_lock);
drm_dev_exit(cookie);
return ret;
}
static const struct vm_operations_struct panthor_mmio_vm_ops = {
.fault = panthor_mmio_vm_fault,
};
int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
{
u64 id = (u64)vma->vm_pgoff << PAGE_SHIFT;
switch (panthor_device_mmio_offset(id)) {
case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
(vma->vm_flags & (VM_WRITE | VM_EXEC)))
return -EINVAL;
break;
default:
return -EINVAL;
}
/* Defer actual mapping to the fault handler. */
vma->vm_private_data = ptdev;
vma->vm_ops = &panthor_mmio_vm_ops;
vm_flags_set(vma,
VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
return 0;
}
int panthor_device_resume(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
int ret, cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
return -EINVAL;
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
ret = clk_prepare_enable(ptdev->clks.core);
if (ret)
goto err_set_suspended;
ret = clk_prepare_enable(ptdev->clks.stacks);
if (ret)
goto err_disable_core_clk;
ret = clk_prepare_enable(ptdev->clks.coregroup);
if (ret)
goto err_disable_stacks_clk;
ret = panthor_devfreq_resume(ptdev);
if (ret)
goto err_disable_coregroup_clk;
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
panthor_gpu_resume(ptdev);
panthor_mmu_resume(ptdev);
ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
if (!ret) {
panthor_sched_resume(ptdev);
} else {
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
}
drm_dev_exit(cookie);
if (ret)
goto err_suspend_devfreq;
}
if (atomic_read(&ptdev->reset.pending))
queue_work(ptdev->reset.wq, &ptdev->reset.work);
/* Clear all IOMEM mappings pointing to this device after we've
* resumed. This way the fake mappings pointing to the dummy pages
* are removed and the real iomem mapping will be restored on next
* access.
*/
mutex_lock(&ptdev->pm.mmio_lock);
unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
mutex_unlock(&ptdev->pm.mmio_lock);
return 0;
err_suspend_devfreq:
panthor_devfreq_suspend(ptdev);
err_disable_coregroup_clk:
clk_disable_unprepare(ptdev->clks.coregroup);
err_disable_stacks_clk:
clk_disable_unprepare(ptdev->clks.stacks);
err_disable_core_clk:
clk_disable_unprepare(ptdev->clks.core);
err_set_suspended:
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return ret;
}
int panthor_device_suspend(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
int ret, cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return -EINVAL;
/* Clear all IOMEM mappings pointing to this device before we
* shutdown the power-domain and clocks. Failing to do that results
* in external aborts when the process accesses the iomem region.
* We change the state and call unmap_mapping_range() with the
* mmio_lock held to make sure the vm_fault handler won't set up
* invalid mappings.
*/
mutex_lock(&ptdev->pm.mmio_lock);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
mutex_unlock(&ptdev->pm.mmio_lock);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
cancel_work_sync(&ptdev->reset.work);
/* We prepare everything as if we were resetting the GPU.
* The end of the reset will happen in the resume path though.
*/
panthor_sched_suspend(ptdev);
panthor_fw_suspend(ptdev);
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
drm_dev_exit(cookie);
}
ret = panthor_devfreq_suspend(ptdev);
if (ret) {
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
panthor_gpu_resume(ptdev);
panthor_mmu_resume(ptdev);
drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
panthor_sched_resume(ptdev);
drm_dev_exit(cookie);
}
goto err_set_active;
}
clk_disable_unprepare(ptdev->clks.coregroup);
clk_disable_unprepare(ptdev->clks.stacks);
clk_disable_unprepare(ptdev->clks.core);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return 0;
err_set_active:
/* If something failed and we have to revert back to an
* active state, we also need to clear the MMIO userspace
* mappings, so any dumb pages that were mapped while we
* were trying to suspend gets invalidated.
*/
mutex_lock(&ptdev->pm.mmio_lock);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
mutex_unlock(&ptdev->pm.mmio_lock);
return ret;
}

View file

@ -0,0 +1,394 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_DEVICE_H__
#define __PANTHOR_DEVICE_H__
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
struct panthor_csf;
struct panthor_csf_ctx;
struct panthor_device;
struct panthor_gpu;
struct panthor_group_pool;
struct panthor_heap_pool;
struct panthor_job;
struct panthor_mmu;
struct panthor_fw;
struct panthor_perfcnt;
struct panthor_vm;
struct panthor_vm_pool;
/**
* enum panthor_device_pm_state - PM state
*/
enum panthor_device_pm_state {
/** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
/** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
PANTHOR_DEVICE_PM_STATE_RESUMING,
/** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
PANTHOR_DEVICE_PM_STATE_ACTIVE,
/** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
PANTHOR_DEVICE_PM_STATE_SUSPENDING,
};
/**
* struct panthor_irq - IRQ data
*
* Used to automate IRQ handling for the 3 different IRQs we have in this driver.
*/
struct panthor_irq {
/** @ptdev: Panthor device */
struct panthor_device *ptdev;
/** @irq: IRQ number. */
int irq;
/** @mask: Current mask being applied to xxx_INT_MASK. */
u32 mask;
/** @suspended: Set to true when the IRQ is suspended. */
atomic_t suspended;
};
/**
* struct panthor_device - Panthor device
*/
struct panthor_device {
/** @base: Base drm_device. */
struct drm_device base;
/** @phys_addr: Physical address of the iomem region. */
phys_addr_t phys_addr;
/** @iomem: CPU mapping of the IOMEM region. */
void __iomem *iomem;
/** @clks: GPU clocks. */
struct {
/** @core: Core clock. */
struct clk *core;
/** @stacks: Stacks clock. This clock is optional. */
struct clk *stacks;
/** @coregroup: Core group clock. This clock is optional. */
struct clk *coregroup;
} clks;
/** @coherent: True if the CPU/GPU are memory coherent. */
bool coherent;
/** @gpu_info: GPU information. */
struct drm_panthor_gpu_info gpu_info;
/** @csif_info: Command stream interface information. */
struct drm_panthor_csif_info csif_info;
/** @gpu: GPU management data. */
struct panthor_gpu *gpu;
/** @fw: FW management data. */
struct panthor_fw *fw;
/** @mmu: MMU management data. */
struct panthor_mmu *mmu;
/** @scheduler: Scheduler management data. */
struct panthor_scheduler *scheduler;
/** @devfreq: Device frequency scaling management data. */
struct panthor_devfreq *devfreq;
/** @unplug: Device unplug related fields. */
struct {
/** @lock: Lock used to serialize unplug operations. */
struct mutex lock;
/**
* @done: Completion object signaled when the unplug
* operation is done.
*/
struct completion done;
} unplug;
/** @reset: Reset related fields. */
struct {
/** @wq: Ordered worqueud used to schedule reset operations. */
struct workqueue_struct *wq;
/** @work: Reset work. */
struct work_struct work;
/** @pending: Set to true if a reset is pending. */
atomic_t pending;
} reset;
/** @pm: Power management related data. */
struct {
/** @state: Power state. */
atomic_t state;
/**
* @mmio_lock: Lock protecting MMIO userspace CPU mappings.
*
* This is needed to ensure we map the dummy IO pages when
* the device is being suspended, and the real IO pages when
* the device is being resumed. We can't just do with the
* state atomicity to deal with this race.
*/
struct mutex mmio_lock;
/**
* @dummy_latest_flush: Dummy LATEST_FLUSH page.
*
* Used to replace the real LATEST_FLUSH page when the GPU
* is suspended.
*/
struct page *dummy_latest_flush;
} pm;
};
/**
* struct panthor_file - Panthor file
*/
struct panthor_file {
/** @ptdev: Device attached to this file. */
struct panthor_device *ptdev;
/** @vms: VM pool attached to this file. */
struct panthor_vm_pool *vms;
/** @groups: Scheduling group pool attached to this file. */
struct panthor_group_pool *groups;
};
int panthor_device_init(struct panthor_device *ptdev);
void panthor_device_unplug(struct panthor_device *ptdev);
/**
* panthor_device_schedule_reset() - Schedules a reset operation
*/
static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
{
if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
queue_work(ptdev->reset.wq, &ptdev->reset.work);
}
/**
* panthor_device_reset_is_pending() - Checks if a reset is pending.
*
* Return: true if a reset is pending, false otherwise.
*/
static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
{
return atomic_read(&ptdev->reset.pending) != 0;
}
int panthor_device_mmap_io(struct panthor_device *ptdev,
struct vm_area_struct *vma);
int panthor_device_resume(struct device *dev);
int panthor_device_suspend(struct device *dev);
enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_OK = 0x00,
DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
DRM_PANTHOR_EXCEPTION_OOM = 0x60,
DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
};
/**
* panthor_exception_is_fault() - Checks if an exception is a fault.
*
* Return: true if the exception is a fault, false otherwise.
*/
static inline bool
panthor_exception_is_fault(u32 exception_code)
{
return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
}
const char *panthor_exception_name(struct panthor_device *ptdev,
u32 exception_code);
/**
* PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
* registration function.
*
* The boiler-plate to gracefully deal with shared interrupts is
* auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
* just after the actual handler. The handler prototype is:
*
* void (*handler)(struct panthor_device *, u32 status);
*/
#define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler) \
static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) \
{ \
struct panthor_irq *pirq = data; \
struct panthor_device *ptdev = pirq->ptdev; \
\
if (atomic_read(&pirq->suspended)) \
return IRQ_NONE; \
if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \
return IRQ_NONE; \
\
gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0); \
return IRQ_WAKE_THREAD; \
} \
\
static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data) \
{ \
struct panthor_irq *pirq = data; \
struct panthor_device *ptdev = pirq->ptdev; \
irqreturn_t ret = IRQ_NONE; \
\
while (true) { \
u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \
\
if (!status) \
break; \
\
gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
\
__handler(ptdev, status); \
ret = IRQ_HANDLED; \
} \
\
if (!atomic_read(&pirq->suspended)) \
gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \
\
return ret; \
} \
\
static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \
{ \
int cookie; \
\
atomic_set(&pirq->suspended, true); \
\
if (drm_dev_enter(&pirq->ptdev->base, &cookie)) { \
gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \
synchronize_irq(pirq->irq); \
drm_dev_exit(cookie); \
} \
\
pirq->mask = 0; \
} \
\
static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \
{ \
int cookie; \
\
atomic_set(&pirq->suspended, false); \
pirq->mask = mask; \
\
if (drm_dev_enter(&pirq->ptdev->base, &cookie)) { \
gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \
gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \
drm_dev_exit(cookie); \
} \
} \
\
static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
struct panthor_irq *pirq, \
int irq, u32 mask) \
{ \
pirq->ptdev = ptdev; \
pirq->irq = irq; \
panthor_ ## __name ## _irq_resume(pirq, mask); \
\
return devm_request_threaded_irq(ptdev->base.dev, irq, \
panthor_ ## __name ## _irq_raw_handler, \
panthor_ ## __name ## _irq_threaded_handler, \
IRQF_SHARED, KBUILD_MODNAME "-" # __name, \
pirq); \
}
/**
* panthor_device_mmio_offset() - Turn a user MMIO offset into a kernel one
* @offset: Offset to convert.
*
* With 32-bit systems being limited by the 32-bit representation of mmap2's
* pgoffset field, we need to make the MMIO offset arch specific. This function
* converts a user MMIO offset into something the kernel driver understands.
*
* If the kernel and userspace architecture match, the offset is unchanged. If
* the kernel is 64-bit and userspace is 32-bit, the offset is adjusted to match
* 64-bit offsets. 32-bit kernel with 64-bit userspace is impossible.
*
* Return: Adjusted offset.
*/
static inline u64 panthor_device_mmio_offset(u64 offset)
{
#ifdef CONFIG_ARM64
if (test_tsk_thread_flag(current, TIF_32BIT))
offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT - DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
#endif
return offset;
}
extern struct workqueue_struct *panthor_cleanup_wq;
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,503 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_MCU_H__
#define __PANTHOR_MCU_H__
#include <linux/types.h>
struct panthor_device;
struct panthor_kernel_bo;
#define MAX_CSGS 31
#define MAX_CS_PER_CSG 32
struct panthor_fw_ringbuf_input_iface {
u64 insert;
u64 extract;
};
struct panthor_fw_ringbuf_output_iface {
u64 extract;
u32 active;
};
struct panthor_fw_cs_control_iface {
#define CS_FEATURES_WORK_REGS(x) (((x) & GENMASK(7, 0)) + 1)
#define CS_FEATURES_SCOREBOARDS(x) (((x) & GENMASK(15, 8)) >> 8)
#define CS_FEATURES_COMPUTE BIT(16)
#define CS_FEATURES_FRAGMENT BIT(17)
#define CS_FEATURES_TILER BIT(18)
u32 features;
u32 input_va;
u32 output_va;
};
struct panthor_fw_cs_input_iface {
#define CS_STATE_MASK GENMASK(2, 0)
#define CS_STATE_STOP 0
#define CS_STATE_START 1
#define CS_EXTRACT_EVENT BIT(4)
#define CS_IDLE_SYNC_WAIT BIT(8)
#define CS_IDLE_PROTM_PENDING BIT(9)
#define CS_IDLE_EMPTY BIT(10)
#define CS_IDLE_RESOURCE_REQ BIT(11)
#define CS_TILER_OOM BIT(26)
#define CS_PROTM_PENDING BIT(27)
#define CS_FATAL BIT(30)
#define CS_FAULT BIT(31)
#define CS_REQ_MASK (CS_STATE_MASK | \
CS_EXTRACT_EVENT | \
CS_IDLE_SYNC_WAIT | \
CS_IDLE_PROTM_PENDING | \
CS_IDLE_EMPTY | \
CS_IDLE_RESOURCE_REQ)
#define CS_EVT_MASK (CS_TILER_OOM | \
CS_PROTM_PENDING | \
CS_FATAL | \
CS_FAULT)
u32 req;
#define CS_CONFIG_PRIORITY(x) ((x) & GENMASK(3, 0))
#define CS_CONFIG_DOORBELL(x) (((x) << 8) & GENMASK(15, 8))
u32 config;
u32 reserved1;
u32 ack_irq_mask;
u64 ringbuf_base;
u32 ringbuf_size;
u32 reserved2;
u64 heap_start;
u64 heap_end;
u64 ringbuf_input;
u64 ringbuf_output;
u32 instr_config;
u32 instrbuf_size;
u64 instrbuf_base;
u64 instrbuf_offset_ptr;
};
struct panthor_fw_cs_output_iface {
u32 ack;
u32 reserved1[15];
u64 status_cmd_ptr;
#define CS_STATUS_WAIT_SB_MASK GENMASK(15, 0)
#define CS_STATUS_WAIT_SB_SRC_MASK GENMASK(19, 16)
#define CS_STATUS_WAIT_SB_SRC_NONE (0 << 16)
#define CS_STATUS_WAIT_SB_SRC_WAIT (8 << 16)
#define CS_STATUS_WAIT_SYNC_COND_LE (0 << 24)
#define CS_STATUS_WAIT_SYNC_COND_GT (1 << 24)
#define CS_STATUS_WAIT_SYNC_COND_MASK GENMASK(27, 24)
#define CS_STATUS_WAIT_PROGRESS BIT(28)
#define CS_STATUS_WAIT_PROTM BIT(29)
#define CS_STATUS_WAIT_SYNC_64B BIT(30)
#define CS_STATUS_WAIT_SYNC BIT(31)
u32 status_wait;
u32 status_req_resource;
u64 status_wait_sync_ptr;
u32 status_wait_sync_value;
u32 status_scoreboards;
#define CS_STATUS_BLOCKED_REASON_UNBLOCKED 0
#define CS_STATUS_BLOCKED_REASON_SB_WAIT 1
#define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2
#define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3
#define CS_STATUS_BLOCKED_REASON_DEFERRED 5
#define CS_STATUS_BLOCKED_REASON_RES 6
#define CS_STATUS_BLOCKED_REASON_FLUSH 7
#define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0)
u32 status_blocked_reason;
u32 status_wait_sync_value_hi;
u32 reserved2[6];
#define CS_EXCEPTION_TYPE(x) ((x) & GENMASK(7, 0))
#define CS_EXCEPTION_DATA(x) (((x) >> 8) & GENMASK(23, 0))
u32 fault;
u32 fatal;
u64 fault_info;
u64 fatal_info;
u32 reserved3[10];
u32 heap_vt_start;
u32 heap_vt_end;
u32 reserved4;
u32 heap_frag_end;
u64 heap_address;
};
struct panthor_fw_csg_control_iface {
u32 features;
u32 input_va;
u32 output_va;
u32 suspend_size;
u32 protm_suspend_size;
u32 stream_num;
u32 stream_stride;
};
struct panthor_fw_csg_input_iface {
#define CSG_STATE_MASK GENMASK(2, 0)
#define CSG_STATE_TERMINATE 0
#define CSG_STATE_START 1
#define CSG_STATE_SUSPEND 2
#define CSG_STATE_RESUME 3
#define CSG_ENDPOINT_CONFIG BIT(4)
#define CSG_STATUS_UPDATE BIT(5)
#define CSG_SYNC_UPDATE BIT(28)
#define CSG_IDLE BIT(29)
#define CSG_DOORBELL BIT(30)
#define CSG_PROGRESS_TIMER_EVENT BIT(31)
#define CSG_REQ_MASK (CSG_STATE_MASK | \
CSG_ENDPOINT_CONFIG | \
CSG_STATUS_UPDATE)
#define CSG_EVT_MASK (CSG_SYNC_UPDATE | \
CSG_IDLE | \
CSG_PROGRESS_TIMER_EVENT)
u32 req;
u32 ack_irq_mask;
u32 doorbell_req;
u32 cs_irq_ack;
u32 reserved1[4];
u64 allow_compute;
u64 allow_fragment;
u32 allow_other;
#define CSG_EP_REQ_COMPUTE(x) ((x) & GENMASK(7, 0))
#define CSG_EP_REQ_FRAGMENT(x) (((x) << 8) & GENMASK(15, 8))
#define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16))
#define CSG_EP_REQ_EXCL_COMPUTE BIT(20)
#define CSG_EP_REQ_EXCL_FRAGMENT BIT(21)
#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28))
#define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28)
u32 endpoint_req;
u32 reserved2[2];
u64 suspend_buf;
u64 protm_suspend_buf;
u32 config;
u32 iter_trace_config;
};
struct panthor_fw_csg_output_iface {
u32 ack;
u32 reserved1;
u32 doorbell_ack;
u32 cs_irq_req;
u32 status_endpoint_current;
u32 status_endpoint_req;
#define CSG_STATUS_STATE_IS_IDLE BIT(0)
u32 status_state;
u32 resource_dep;
};
struct panthor_fw_global_control_iface {
u32 version;
u32 features;
u32 input_va;
u32 output_va;
u32 group_num;
u32 group_stride;
u32 perfcnt_size;
u32 instr_features;
};
struct panthor_fw_global_input_iface {
#define GLB_HALT BIT(0)
#define GLB_CFG_PROGRESS_TIMER BIT(1)
#define GLB_CFG_ALLOC_EN BIT(2)
#define GLB_CFG_POWEROFF_TIMER BIT(3)
#define GLB_PROTM_ENTER BIT(4)
#define GLB_PERFCNT_EN BIT(5)
#define GLB_PERFCNT_SAMPLE BIT(6)
#define GLB_COUNTER_EN BIT(7)
#define GLB_PING BIT(8)
#define GLB_FWCFG_UPDATE BIT(9)
#define GLB_IDLE_EN BIT(10)
#define GLB_SLEEP BIT(12)
#define GLB_INACTIVE_COMPUTE BIT(20)
#define GLB_INACTIVE_FRAGMENT BIT(21)
#define GLB_INACTIVE_TILER BIT(22)
#define GLB_PROTM_EXIT BIT(23)
#define GLB_PERFCNT_THRESHOLD BIT(24)
#define GLB_PERFCNT_OVERFLOW BIT(25)
#define GLB_IDLE BIT(26)
#define GLB_DBG_CSF BIT(30)
#define GLB_DBG_HOST BIT(31)
#define GLB_REQ_MASK GENMASK(10, 0)
#define GLB_EVT_MASK GENMASK(26, 20)
u32 req;
u32 ack_irq_mask;
u32 doorbell_req;
u32 reserved1;
u32 progress_timer;
#define GLB_TIMER_VAL(x) ((x) & GENMASK(30, 0))
#define GLB_TIMER_SOURCE_GPU_COUNTER BIT(31)
u32 poweroff_timer;
u64 core_en_mask;
u32 reserved2;
u32 perfcnt_as;
u64 perfcnt_base;
u32 perfcnt_extract;
u32 reserved3[3];
u32 perfcnt_config;
u32 perfcnt_csg_select;
u32 perfcnt_fw_enable;
u32 perfcnt_csg_enable;
u32 perfcnt_csf_enable;
u32 perfcnt_shader_enable;
u32 perfcnt_tiler_enable;
u32 perfcnt_mmu_l2_enable;
u32 reserved4[8];
u32 idle_timer;
};
enum panthor_fw_halt_status {
PANTHOR_FW_HALT_OK = 0,
PANTHOR_FW_HALT_ON_PANIC = 0x4e,
PANTHOR_FW_HALT_ON_WATCHDOG_EXPIRATION = 0x4f,
};
struct panthor_fw_global_output_iface {
u32 ack;
u32 reserved1;
u32 doorbell_ack;
u32 reserved2;
u32 halt_status;
u32 perfcnt_status;
u32 perfcnt_insert;
};
/**
* struct panthor_fw_cs_iface - Firmware command stream slot interface
*/
struct panthor_fw_cs_iface {
/**
* @lock: Lock protecting access to the panthor_fw_cs_input_iface::req
* field.
*
* Needed so we can update the req field concurrently from the interrupt
* handler and the scheduler logic.
*
* TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
* interface sections are mapped uncached/write-combined right now, and
* using cmpxchg() on such mappings leads to SError faults. Revisit when
* we have 'SHARED' GPU mappings hooked up.
*/
spinlock_t lock;
/**
* @control: Command stream slot control interface.
*
* Used to expose command stream slot properties.
*
* This interface is read-only.
*/
struct panthor_fw_cs_control_iface *control;
/**
* @input: Command stream slot input interface.
*
* Used for host updates/events.
*/
struct panthor_fw_cs_input_iface *input;
/**
* @output: Command stream slot output interface.
*
* Used for FW updates/events.
*
* This interface is read-only.
*/
const struct panthor_fw_cs_output_iface *output;
};
/**
* struct panthor_fw_csg_iface - Firmware command stream group slot interface
*/
struct panthor_fw_csg_iface {
/**
* @lock: Lock protecting access to the panthor_fw_csg_input_iface::req
* field.
*
* Needed so we can update the req field concurrently from the interrupt
* handler and the scheduler logic.
*
* TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
* interface sections are mapped uncached/write-combined right now, and
* using cmpxchg() on such mappings leads to SError faults. Revisit when
* we have 'SHARED' GPU mappings hooked up.
*/
spinlock_t lock;
/**
* @control: Command stream group slot control interface.
*
* Used to expose command stream group slot properties.
*
* This interface is read-only.
*/
const struct panthor_fw_csg_control_iface *control;
/**
* @input: Command stream slot input interface.
*
* Used for host updates/events.
*/
struct panthor_fw_csg_input_iface *input;
/**
* @output: Command stream group slot output interface.
*
* Used for FW updates/events.
*
* This interface is read-only.
*/
const struct panthor_fw_csg_output_iface *output;
};
/**
* struct panthor_fw_global_iface - Firmware global interface
*/
struct panthor_fw_global_iface {
/**
* @lock: Lock protecting access to the panthor_fw_global_input_iface::req
* field.
*
* Needed so we can update the req field concurrently from the interrupt
* handler and the scheduler/FW management logic.
*
* TODO: Ideally we'd want to use a cmpxchg() to update the req, but FW
* interface sections are mapped uncached/write-combined right now, and
* using cmpxchg() on such mappings leads to SError faults. Revisit when
* we have 'SHARED' GPU mappings hooked up.
*/
spinlock_t lock;
/**
* @control: Command stream group slot control interface.
*
* Used to expose global FW properties.
*
* This interface is read-only.
*/
const struct panthor_fw_global_control_iface *control;
/**
* @input: Global input interface.
*
* Used for host updates/events.
*/
struct panthor_fw_global_input_iface *input;
/**
* @output: Global output interface.
*
* Used for FW updates/events.
*
* This interface is read-only.
*/
const struct panthor_fw_global_output_iface *output;
};
/**
* panthor_fw_toggle_reqs() - Toggle acknowledge bits to send an event to the FW
* @__iface: The interface to operate on.
* @__in_reg: Name of the register to update in the input section of the interface.
* @__out_reg: Name of the register to take as a reference in the output section of the
* interface.
* @__mask: Mask to apply to the update.
*
* The Host -> FW event/message passing was designed to be lockless, with each side of
* the channel having its writeable section. Events are signaled as a difference between
* the host and FW side in the req/ack registers (when a bit differs, there's an event
* pending, when they are the same, nothing needs attention).
*
* This helper allows one to update the req register based on the current value of the
* ack register managed by the FW. Toggling a specific bit will flag an event. In order
* for events to be re-evaluated, the interface doorbell needs to be rung.
*
* Concurrent accesses to the same req register is covered.
*
* Anything requiring atomic updates to multiple registers requires a dedicated lock.
*/
#define panthor_fw_toggle_reqs(__iface, __in_reg, __out_reg, __mask) \
do { \
u32 __cur_val, __new_val, __out_val; \
spin_lock(&(__iface)->lock); \
__cur_val = READ_ONCE((__iface)->input->__in_reg); \
__out_val = READ_ONCE((__iface)->output->__out_reg); \
__new_val = ((__out_val ^ (__mask)) & (__mask)) | (__cur_val & ~(__mask)); \
WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
spin_unlock(&(__iface)->lock); \
} while (0)
/**
* panthor_fw_update_reqs() - Update bits to reflect a configuration change
* @__iface: The interface to operate on.
* @__in_reg: Name of the register to update in the input section of the interface.
* @__val: Value to set.
* @__mask: Mask to apply to the update.
*
* Some configuration get passed through req registers that are also used to
* send events to the FW. Those req registers being updated from the interrupt
* handler, they require special helpers to update the configuration part as well.
*
* Concurrent accesses to the same req register is covered.
*
* Anything requiring atomic updates to multiple registers requires a dedicated lock.
*/
#define panthor_fw_update_reqs(__iface, __in_reg, __val, __mask) \
do { \
u32 __cur_val, __new_val; \
spin_lock(&(__iface)->lock); \
__cur_val = READ_ONCE((__iface)->input->__in_reg); \
__new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \
WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
spin_unlock(&(__iface)->lock); \
} while (0)
struct panthor_fw_global_iface *
panthor_fw_get_glb_iface(struct panthor_device *ptdev);
struct panthor_fw_csg_iface *
panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
struct panthor_fw_cs_iface *
panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
u32 *acked, u32 timeout_ms);
int panthor_fw_glb_wait_acks(struct panthor_device *ptdev, u32 req_mask, u32 *acked,
u32 timeout_ms);
void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_slot);
struct panthor_kernel_bo *
panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
struct panthor_fw_ringbuf_input_iface **input,
const struct panthor_fw_ringbuf_output_iface **output,
u32 *input_fw_va, u32 *output_fw_va);
struct panthor_kernel_bo *
panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size);
struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev);
void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang);
int panthor_fw_post_reset(struct panthor_device *ptdev);
static inline void panthor_fw_suspend(struct panthor_device *ptdev)
{
panthor_fw_pre_reset(ptdev, false);
}
static inline int panthor_fw_resume(struct panthor_device *ptdev)
{
return panthor_fw_post_reset(ptdev);
}
int panthor_fw_init(struct panthor_device *ptdev);
void panthor_fw_unplug(struct panthor_device *ptdev);
#endif

View file

@ -0,0 +1,230 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <drm/panthor_drm.h>
#include "panthor_device.h"
#include "panthor_gem.h"
#include "panthor_mmu.h"
static void panthor_gem_free_object(struct drm_gem_object *obj)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
/**
* panthor_kernel_bo_destroy() - Destroy a kernel buffer object
* @vm: The VM this BO was mapped to.
* @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
* is skipped.
*/
void panthor_kernel_bo_destroy(struct panthor_vm *vm,
struct panthor_kernel_bo *bo)
{
int ret;
if (IS_ERR_OR_NULL(bo))
return;
panthor_kernel_bo_vunmap(bo);
if (drm_WARN_ON(bo->obj->dev,
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo;
ret = panthor_vm_unmap_range(vm, bo->va_node.start,
panthor_kernel_bo_size(bo));
if (ret)
goto out_free_bo;
panthor_vm_free_va(vm, &bo->va_node);
drm_gem_object_put(bo->obj);
out_free_bo:
kfree(bo);
}
/**
* panthor_kernel_bo_create() - Create and map a GEM object to a VM
* @ptdev: Device.
* @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
* @size: Size of the buffer object.
* @bo_flags: Combination of drm_panthor_bo_flags flags.
* @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
* that are related to map operations).
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
struct panthor_gem_object *bo;
int ret;
if (drm_WARN_ON(&ptdev->base, !vm))
return ERR_PTR(-EINVAL);
kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
if (!kbo)
return ERR_PTR(-ENOMEM);
obj = drm_gem_shmem_create(&ptdev->base, size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free_bo;
}
bo = to_panthor_bo(&obj->base);
size = obj->base.size;
kbo->obj = &obj->base;
bo->flags = bo_flags;
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret)
goto err_put_obj;
ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
if (ret)
goto err_free_va;
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
return kbo;
err_free_va:
panthor_vm_free_va(vm, &kbo->va_node);
err_put_obj:
drm_gem_object_put(&obj->base);
err_free_bo:
kfree(kbo);
return ERR_PTR(ret);
}
static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
/* Don't allow mmap on objects that have the NO_MMAP flag set. */
if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
return -EINVAL;
return drm_gem_shmem_object_mmap(obj, vma);
}
static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
{
/* We can't export GEMs that have an exclusive VM. */
if (to_panthor_bo(obj)->exclusive_vm_root_gem)
return ERR_PTR(-EINVAL);
return drm_gem_prime_export(obj, flags);
}
static const struct drm_gem_object_funcs panthor_gem_funcs = {
.free = panthor_gem_free_object,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = panthor_gem_mmap,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
};
/**
* panthor_gem_create_object - Implementation of driver->gem_create_object.
* @ddev: DRM device
* @size: Size in bytes of the memory the object will reference
*
* This lets the GEM helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
struct panthor_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
return &obj->base.base;
}
/**
* panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
* @file: DRM file.
* @ddev: DRM device.
* @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
* @size: Size of the GEM object to allocate.
* @flags: Combination of drm_panthor_bo_flags flags.
* @handle: Pointer holding the handle pointing to the new GEM object.
*
* Return: Zero on success
*/
int
panthor_gem_create_with_handle(struct drm_file *file,
struct drm_device *ddev,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, u32 *handle)
{
int ret;
struct drm_gem_shmem_object *shmem;
struct panthor_gem_object *bo;
shmem = drm_gem_shmem_create(ddev, *size);
if (IS_ERR(shmem))
return PTR_ERR(shmem);
bo = to_panthor_bo(&shmem->base);
bo->flags = flags;
if (exclusive_vm) {
bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
}
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file, &shmem->base, handle);
if (!ret)
*size = bo->base.base.size;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
return ret;
}

View file

@ -0,0 +1,142 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_GEM_H__
#define __PANTHOR_GEM_H__
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
#include <linux/iosys-map.h>
#include <linux/rwsem.h>
struct panthor_vm;
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
struct panthor_gem_object {
/** @base: Inherit from drm_gem_shmem_object. */
struct drm_gem_shmem_object base;
/**
* @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
* is attached to.
*
* If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
* different VM will fail.
*
* All FW memory objects have this field set to the root GEM of the MCU
* VM.
*/
struct drm_gem_object *exclusive_vm_root_gem;
/**
* @gpuva_list_lock: Custom GPUVA lock.
*
* Used to protect insertion of drm_gpuva elements to the
* drm_gem_object.gpuva.list list.
*
* We can't use the GEM resv for that, because drm_gpuva_link() is
* called in a dma-signaling path, where we're not allowed to take
* resv locks.
*/
struct mutex gpuva_list_lock;
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
};
/**
* struct panthor_kernel_bo - Kernel buffer object.
*
* These objects are only manipulated by the kernel driver and not
* directly exposed to the userspace. The GPU address of a kernel
* BO might be passed to userspace though.
*/
struct panthor_kernel_bo {
/**
* @obj: The GEM object backing this kernel buffer object.
*/
struct drm_gem_object *obj;
/**
* @va_node: VA space allocated to this GEM.
*/
struct drm_mm_node va_node;
/**
* @kmap: Kernel CPU mapping of @gem.
*/
void *kmap;
};
static inline
struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
}
struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
struct drm_gem_object *
panthor_gem_prime_import_sg_table(struct drm_device *ddev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
int
panthor_gem_create_with_handle(struct drm_file *file,
struct drm_device *ddev,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
return bo->va_node.start;
}
static inline size_t
panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
{
return bo->obj->size;
}
static inline int
panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
{
struct iosys_map map;
int ret;
if (bo->kmap)
return 0;
ret = drm_gem_vmap_unlocked(bo->obj, &map);
if (ret)
return ret;
bo->kmap = map.vaddr;
return 0;
}
static inline void
panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
{
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
drm_gem_vunmap_unlocked(bo->obj, &map);
bo->kmap = NULL;
}
}
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va);
void panthor_kernel_bo_destroy(struct panthor_vm *vm,
struct panthor_kernel_bo *bo);
#endif /* __PANTHOR_GEM_H__ */

View file

@ -0,0 +1,482 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include "panthor_device.h"
#include "panthor_gpu.h"
#include "panthor_regs.h"
/**
* struct panthor_gpu - GPU block management data.
*/
struct panthor_gpu {
/** @irq: GPU irq. */
struct panthor_irq irq;
/** @reqs_lock: Lock protecting access to pending_reqs. */
spinlock_t reqs_lock;
/** @pending_reqs: Pending GPU requests. */
u32 pending_reqs;
/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
};
/**
* struct panthor_model - GPU model description
*/
struct panthor_model {
/** @name: Model name. */
const char *name;
/** @arch_major: Major version number of architecture. */
u8 arch_major;
/** @product_major: Major version number of product. */
u8 product_major;
};
/**
* GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
* by a combination of the major architecture version and the major product
* version.
* @_name: Name for the GPU model.
* @_arch_major: Architecture major.
* @_product_major: Product major.
*/
#define GPU_MODEL(_name, _arch_major, _product_major) \
{\
.name = __stringify(_name), \
.arch_major = _arch_major, \
.product_major = _product_major, \
}
static const struct panthor_model gpu_models[] = {
GPU_MODEL(g610, 10, 7),
{},
};
#define GPU_INTERRUPTS_MASK \
(GPU_IRQ_FAULT | \
GPU_IRQ_PROTM_FAULT | \
GPU_IRQ_RESET_COMPLETED | \
GPU_IRQ_CLEAN_CACHES_COMPLETED)
static void panthor_gpu_init_info(struct panthor_device *ptdev)
{
const struct panthor_model *model;
u32 arch_major, product_major;
u32 major, minor, status;
unsigned int i;
ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
for (model = gpu_models; model->name; model++) {
if (model->arch_major == arch_major &&
model->product_major == product_major)
break;
}
drm_info(&ptdev->base,
"mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
major, minor, status);
drm_info(&ptdev->base,
"Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
ptdev->gpu_info.l2_features,
ptdev->gpu_info.tiler_features,
ptdev->gpu_info.mem_features,
ptdev->gpu_info.mmu_features,
ptdev->gpu_info.as_present);
drm_info(&ptdev->base,
"shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
ptdev->gpu_info.tiler_present);
}
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
gpu_read(ptdev, GPU_FAULT_ADDR_LO);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
address);
}
if (status & GPU_IRQ_PROTM_FAULT)
drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
spin_lock(&ptdev->gpu->reqs_lock);
if (status & ptdev->gpu->pending_reqs) {
ptdev->gpu->pending_reqs &= ~status;
wake_up_all(&ptdev->gpu->reqs_acked);
}
spin_unlock(&ptdev->gpu->reqs_lock);
}
PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
/**
* panthor_gpu_unplug() - Called when the GPU is unplugged.
* @ptdev: Device to unplug.
*/
void panthor_gpu_unplug(struct panthor_device *ptdev)
{
unsigned long flags;
/* Make sure the IRQ handler is not running after that point. */
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
/* Wake-up all waiters. */
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
ptdev->gpu->pending_reqs = 0;
wake_up_all(&ptdev->gpu->reqs_acked);
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
/**
* panthor_gpu_init() - Initialize the GPU block
* @ptdev: Device.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_init(struct panthor_device *ptdev)
{
struct panthor_gpu *gpu;
u32 pa_bits;
int ret, irq;
gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
return -ENOMEM;
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
ptdev->gpu = gpu;
panthor_gpu_init_info(ptdev);
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
if (ret)
return ret;
irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
if (irq <= 0)
return ret;
ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
if (ret)
return ret;
return 0;
}
/**
* panthor_gpu_block_power_off() - Power-off a specific block of the GPU
* @ptdev: Device.
* @blk_name: Block name.
* @pwroff_reg: Power-off register for this block.
* @pwrtrans_reg: Power transition register for this block.
* @mask: Sub-elements to power-off.
* @timeout_us: Timeout in microseconds.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_block_power_off(struct panthor_device *ptdev,
const char *blk_name,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
u32 val, i;
int ret;
for (i = 0; i < 2; i++) {
u32 mask32 = mask >> (i * 32);
if (!mask32)
continue;
ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
val, !(mask32 & val),
100, timeout_us);
if (ret) {
drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
blk_name, mask);
return ret;
}
}
if (mask & GENMASK(31, 0))
gpu_write(ptdev, pwroff_reg, mask);
if (mask >> 32)
gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
for (i = 0; i < 2; i++) {
u32 mask32 = mask >> (i * 32);
if (!mask32)
continue;
ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
val, !(mask32 & val),
100, timeout_us);
if (ret) {
drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
blk_name, mask);
return ret;
}
}
return 0;
}
/**
* panthor_gpu_block_power_on() - Power-on a specific block of the GPU
* @ptdev: Device.
* @blk_name: Block name.
* @pwron_reg: Power-on register for this block.
* @pwrtrans_reg: Power transition register for this block.
* @rdy_reg: Power transition ready register.
* @mask: Sub-elements to power-on.
* @timeout_us: Timeout in microseconds.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_block_power_on(struct panthor_device *ptdev,
const char *blk_name,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
u32 val, i;
int ret;
for (i = 0; i < 2; i++) {
u32 mask32 = mask >> (i * 32);
if (!mask32)
continue;
ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
val, !(mask32 & val),
100, timeout_us);
if (ret) {
drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
blk_name, mask);
return ret;
}
}
if (mask & GENMASK(31, 0))
gpu_write(ptdev, pwron_reg, mask);
if (mask >> 32)
gpu_write(ptdev, pwron_reg + 4, mask >> 32);
for (i = 0; i < 2; i++) {
u32 mask32 = mask >> (i * 32);
if (!mask32)
continue;
ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
val, (mask32 & val) == mask32,
100, timeout_us);
if (ret) {
drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
blk_name, mask);
return ret;
}
}
return 0;
}
/**
* panthor_gpu_l2_power_on() - Power-on the L2-cache
* @ptdev: Device.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
{
if (ptdev->gpu_info.l2_present != 1) {
/*
* Only support one core group now.
* ~(l2_present - 1) unsets all bits in l2_present except
* the bottom bit. (l2_present - 2) has all the bits in
* the first core group set. AND them together to generate
* a mask of cores in the first core group.
*/
u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
(ptdev->gpu_info.l2_present - 2);
drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
hweight64(core_mask),
hweight64(ptdev->gpu_info.shader_present));
}
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
/**
* panthor_gpu_flush_caches() - Flush caches
* @ptdev: Device.
* @l2: L2 flush type.
* @lsc: LSC flush type.
* @other: Other flush type.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other)
{
bool timedout = false;
unsigned long flags;
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
}
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
if (!wait_event_timeout(ptdev->gpu->reqs_acked,
!(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
!(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
timedout = true;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
if (timedout) {
drm_err(&ptdev->base, "Flush caches timeout");
return -ETIMEDOUT;
}
return 0;
}
/**
* panthor_gpu_soft_reset() - Issue a soft-reset
* @ptdev: Device.
*
* Return: 0 on success, a negative error code otherwise.
*/
int panthor_gpu_soft_reset(struct panthor_device *ptdev)
{
bool timedout = false;
unsigned long flags;
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
}
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
if (!wait_event_timeout(ptdev->gpu->reqs_acked,
!(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
msecs_to_jiffies(100))) {
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
!(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
timedout = true;
else
ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
}
if (timedout) {
drm_err(&ptdev->base, "Soft reset timeout");
return -ETIMEDOUT;
}
return 0;
}
/**
* panthor_gpu_suspend() - Suspend the GPU block.
* @ptdev: Device.
*
* Suspend the GPU irq. This should be called last in the suspend procedure,
* after all other blocks have been suspented.
*/
void panthor_gpu_suspend(struct panthor_device *ptdev)
{
/*
* It may be preferable to simply power down the L2, but for now just
* soft-reset which will leave the L2 powered down.
*/
panthor_gpu_soft_reset(ptdev);
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
/**
* panthor_gpu_resume() - Resume the GPU block.
* @ptdev: Device.
*
* Resume the IRQ handler and power-on the L2-cache.
* The FW takes care of powering the other blocks.
*/
void panthor_gpu_resume(struct panthor_device *ptdev)
{
panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
panthor_gpu_l2_power_on(ptdev);
}

View file

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Collabora ltd. */
#ifndef __PANTHOR_GPU_H__
#define __PANTHOR_GPU_H__
struct panthor_device;
int panthor_gpu_init(struct panthor_device *ptdev);
void panthor_gpu_unplug(struct panthor_device *ptdev);
void panthor_gpu_suspend(struct panthor_device *ptdev);
void panthor_gpu_resume(struct panthor_device *ptdev);
int panthor_gpu_block_power_on(struct panthor_device *ptdev,
const char *blk_name,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us);
int panthor_gpu_block_power_off(struct panthor_device *ptdev,
const char *blk_name,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us);
/**
* panthor_gpu_power_on() - Power on the GPU block.
*
* Return: 0 on success, a negative error code otherwise.
*/
#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_on(ptdev, #type, \
type ## _PWRON_LO, \
type ## _PWRTRANS_LO, \
type ## _READY_LO, \
mask, timeout_us)
/**
* panthor_gpu_power_off() - Power off the GPU block.
*
* Return: 0 on success, a negative error code otherwise.
*/
#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_off(ptdev, #type, \
type ## _PWROFF_LO, \
type ## _PWRTRANS_LO, \
mask, timeout_us)
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
#endif

View file

@ -0,0 +1,597 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2023 Collabora ltd. */
#include <linux/iosys-map.h>
#include <linux/rwsem.h>
#include <drm/panthor_drm.h>
#include "panthor_device.h"
#include "panthor_gem.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
/*
* The GPU heap context is an opaque structure used by the GPU to track the
* heap allocations. The driver should only touch it to initialize it (zero all
* fields). Because the CPU and GPU can both access this structure it is
* required to be GPU cache line aligned.
*/
#define HEAP_CONTEXT_SIZE 32
/**
* struct panthor_heap_chunk_header - Heap chunk header
*/
struct panthor_heap_chunk_header {
/**
* @next: Next heap chunk in the list.
*
* This is a GPU VA.
*/
u64 next;
/** @unknown: MBZ. */
u32 unknown[14];
};
/**
* struct panthor_heap_chunk - Structure used to keep track of allocated heap chunks.
*/
struct panthor_heap_chunk {
/** @node: Used to insert the heap chunk in panthor_heap::chunks. */
struct list_head node;
/** @bo: Buffer object backing the heap chunk. */
struct panthor_kernel_bo *bo;
};
/**
* struct panthor_heap - Structure used to manage tiler heap contexts.
*/
struct panthor_heap {
/** @chunks: List containing all heap chunks allocated so far. */
struct list_head chunks;
/** @lock: Lock protecting insertion in the chunks list. */
struct mutex lock;
/** @chunk_size: Size of each chunk. */
u32 chunk_size;
/** @max_chunks: Maximum number of chunks. */
u32 max_chunks;
/**
* @target_in_flight: Number of in-flight render passes after which
* we'd let the FW wait for fragment job to finish instead of allocating new chunks.
*/
u32 target_in_flight;
/** @chunk_count: Number of heap chunks currently allocated. */
u32 chunk_count;
};
#define MAX_HEAPS_PER_POOL 128
/**
* struct panthor_heap_pool - Pool of heap contexts
*
* The pool is attached to a panthor_file and can't be shared across processes.
*/
struct panthor_heap_pool {
/** @refcount: Reference count. */
struct kref refcount;
/** @ptdev: Device. */
struct panthor_device *ptdev;
/** @vm: VM this pool is bound to. */
struct panthor_vm *vm;
/** @lock: Lock protecting access to @xa. */
struct rw_semaphore lock;
/** @xa: Array storing panthor_heap objects. */
struct xarray xa;
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */
struct panthor_kernel_bo *gpu_contexts;
};
static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
{
u32 l2_features = ptdev->gpu_info.l2_features;
u32 gpu_cache_line_size = GPU_L2_FEATURES_LINE_SIZE(l2_features);
return ALIGN(HEAP_CONTEXT_SIZE, gpu_cache_line_size);
}
static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id)
{
return panthor_heap_ctx_stride(pool->ptdev) * id;
}
static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
{
return pool->gpu_contexts->kmap +
panthor_get_heap_ctx_offset(pool, id);
}
static void panthor_free_heap_chunk(struct panthor_vm *vm,
struct panthor_heap *heap,
struct panthor_heap_chunk *chunk)
{
mutex_lock(&heap->lock);
list_del(&chunk->node);
heap->chunk_count--;
mutex_unlock(&heap->lock);
panthor_kernel_bo_destroy(vm, chunk->bo);
kfree(chunk);
}
static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
struct panthor_vm *vm,
struct panthor_heap *heap,
bool initial_chunk)
{
struct panthor_heap_chunk *chunk;
struct panthor_heap_chunk_header *hdr;
int ret;
chunk = kmalloc(sizeof(*chunk), GFP_KERNEL);
if (!chunk)
return -ENOMEM;
chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
PANTHOR_VM_KERNEL_AUTO_VA);
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
}
ret = panthor_kernel_bo_vmap(chunk->bo);
if (ret)
goto err_destroy_bo;
hdr = chunk->bo->kmap;
memset(hdr, 0, sizeof(*hdr));
if (initial_chunk && !list_empty(&heap->chunks)) {
struct panthor_heap_chunk *prev_chunk;
u64 prev_gpuva;
prev_chunk = list_first_entry(&heap->chunks,
struct panthor_heap_chunk,
node);
prev_gpuva = panthor_kernel_bo_gpuva(prev_chunk->bo);
hdr->next = (prev_gpuva & GENMASK_ULL(63, 12)) |
(heap->chunk_size >> 12);
}
panthor_kernel_bo_vunmap(chunk->bo);
mutex_lock(&heap->lock);
list_add(&chunk->node, &heap->chunks);
heap->chunk_count++;
mutex_unlock(&heap->lock);
return 0;
err_destroy_bo:
panthor_kernel_bo_destroy(vm, chunk->bo);
err_free_chunk:
kfree(chunk);
return ret;
}
static void panthor_free_heap_chunks(struct panthor_vm *vm,
struct panthor_heap *heap)
{
struct panthor_heap_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
panthor_free_heap_chunk(vm, heap, chunk);
}
static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
struct panthor_vm *vm,
struct panthor_heap *heap,
u32 chunk_count)
{
int ret;
u32 i;
for (i = 0; i < chunk_count; i++) {
ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
if (ret)
return ret;
}
return 0;
}
static int
panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
{
struct panthor_heap *heap;
heap = xa_erase(&pool->xa, handle);
if (!heap)
return -EINVAL;
panthor_free_heap_chunks(pool->vm, heap);
mutex_destroy(&heap->lock);
kfree(heap);
return 0;
}
/**
* panthor_heap_destroy() - Destroy a heap context
* @pool: Pool this context belongs to.
* @handle: Handle returned by panthor_heap_create().
*/
int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
{
int ret;
down_write(&pool->lock);
ret = panthor_heap_destroy_locked(pool, handle);
up_write(&pool->lock);
return ret;
}
/**
* panthor_heap_create() - Create a heap context
* @pool: Pool to instantiate the heap context from.
* @initial_chunk_count: Number of chunk allocated at initialization time.
* Must be at least 1.
* @chunk_size: The size of each chunk. Must be a power of two between 256k
* and 2M.
* @max_chunks: Maximum number of chunks that can be allocated.
* @target_in_flight: Maximum number of in-flight render passes.
* @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
* context.
* @first_chunk_gpu_va: Pointer holding the GPU address of the first chunk
* assigned to the heap context.
*
* Return: a positive handle on success, a negative error otherwise.
*/
int panthor_heap_create(struct panthor_heap_pool *pool,
u32 initial_chunk_count,
u32 chunk_size,
u32 max_chunks,
u32 target_in_flight,
u64 *heap_ctx_gpu_va,
u64 *first_chunk_gpu_va)
{
struct panthor_heap *heap;
struct panthor_heap_chunk *first_chunk;
struct panthor_vm *vm;
int ret = 0;
u32 id;
if (initial_chunk_count == 0)
return -EINVAL;
if (hweight32(chunk_size) != 1 ||
chunk_size < SZ_256K || chunk_size > SZ_2M)
return -EINVAL;
down_read(&pool->lock);
vm = panthor_vm_get(pool->vm);
up_read(&pool->lock);
/* The pool has been destroyed, we can't create a new heap. */
if (!vm)
return -EINVAL;
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap) {
ret = -ENOMEM;
goto err_put_vm;
}
mutex_init(&heap->lock);
INIT_LIST_HEAD(&heap->chunks);
heap->chunk_size = chunk_size;
heap->max_chunks = max_chunks;
heap->target_in_flight = target_in_flight;
ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
initial_chunk_count);
if (ret)
goto err_free_heap;
first_chunk = list_first_entry(&heap->chunks,
struct panthor_heap_chunk,
node);
*first_chunk_gpu_va = panthor_kernel_bo_gpuva(first_chunk->bo);
down_write(&pool->lock);
/* The pool has been destroyed, we can't create a new heap. */
if (!pool->vm) {
ret = -EINVAL;
} else {
ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL);
if (!ret) {
void *gpu_ctx = panthor_get_heap_ctx(pool, id);
memset(gpu_ctx, 0, panthor_heap_ctx_stride(pool->ptdev));
*heap_ctx_gpu_va = panthor_kernel_bo_gpuva(pool->gpu_contexts) +
panthor_get_heap_ctx_offset(pool, id);
}
}
up_write(&pool->lock);
if (ret)
goto err_free_heap;
panthor_vm_put(vm);
return id;
err_free_heap:
panthor_free_heap_chunks(pool->vm, heap);
mutex_destroy(&heap->lock);
kfree(heap);
err_put_vm:
panthor_vm_put(vm);
return ret;
}
/**
* panthor_heap_return_chunk() - Return an unused heap chunk
* @pool: The pool this heap belongs to.
* @heap_gpu_va: The GPU address of the heap context.
* @chunk_gpu_va: The chunk VA to return.
*
* This function is used when a chunk allocated with panthor_heap_grow()
* couldn't be linked to the heap context through the FW interface because
* the group requesting the allocation was scheduled out in the meantime.
*/
int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u64 chunk_gpu_va)
{
u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
struct panthor_heap_chunk *chunk, *tmp, *removed = NULL;
struct panthor_heap *heap;
int ret;
if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
return -EINVAL;
down_read(&pool->lock);
heap = xa_load(&pool->xa, heap_id);
if (!heap) {
ret = -EINVAL;
goto out_unlock;
}
chunk_gpu_va &= GENMASK_ULL(63, 12);
mutex_lock(&heap->lock);
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) {
if (panthor_kernel_bo_gpuva(chunk->bo) == chunk_gpu_va) {
removed = chunk;
list_del(&chunk->node);
heap->chunk_count--;
break;
}
}
mutex_unlock(&heap->lock);
if (removed) {
panthor_kernel_bo_destroy(pool->vm, chunk->bo);
kfree(chunk);
ret = 0;
} else {
ret = -EINVAL;
}
out_unlock:
up_read(&pool->lock);
return ret;
}
/**
* panthor_heap_grow() - Make a heap context grow.
* @pool: The pool this heap belongs to.
* @heap_gpu_va: The GPU address of the heap context.
* @renderpasses_in_flight: Number of render passes currently in-flight.
* @pending_frag_count: Number of fragment jobs waiting for execution/completion.
* @new_chunk_gpu_va: Pointer used to return the chunk VA.
*/
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u32 renderpasses_in_flight,
u32 pending_frag_count,
u64 *new_chunk_gpu_va)
{
u64 offset = heap_gpu_va - panthor_kernel_bo_gpuva(pool->gpu_contexts);
u32 heap_id = (u32)offset / panthor_heap_ctx_stride(pool->ptdev);
struct panthor_heap_chunk *chunk;
struct panthor_heap *heap;
int ret;
if (offset > U32_MAX || heap_id >= MAX_HEAPS_PER_POOL)
return -EINVAL;
down_read(&pool->lock);
heap = xa_load(&pool->xa, heap_id);
if (!heap) {
ret = -EINVAL;
goto out_unlock;
}
/* If we reached the target in-flight render passes, or if we
* reached the maximum number of chunks, let the FW figure another way to
* find some memory (wait for render passes to finish, or call the exception
* handler provided by the userspace driver, if any).
*/
if (renderpasses_in_flight > heap->target_in_flight ||
(pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) {
ret = -EBUSY;
goto out_unlock;
} else if (heap->chunk_count >= heap->max_chunks) {
ret = -ENOMEM;
goto out_unlock;
}
/* FIXME: panthor_alloc_heap_chunk() triggers a kernel BO creation,
* which goes through the blocking allocation path. Ultimately, we
* want a non-blocking allocation, so we can immediately report to the
* FW when the system is running out of memory. In that case, the FW
* can call a user-provided exception handler, which might try to free
* some tiler memory by issuing an intermediate fragment job. If the
* exception handler can't do anything, it will flag the queue as
* faulty so the job that triggered this tiler chunk allocation and all
* further jobs in this queue fail immediately instead of having to
* wait for the job timeout.
*/
ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
if (ret)
goto out_unlock;
chunk = list_first_entry(&heap->chunks,
struct panthor_heap_chunk,
node);
*new_chunk_gpu_va = (panthor_kernel_bo_gpuva(chunk->bo) & GENMASK_ULL(63, 12)) |
(heap->chunk_size >> 12);
ret = 0;
out_unlock:
up_read(&pool->lock);
return ret;
}
static void panthor_heap_pool_release(struct kref *refcount)
{
struct panthor_heap_pool *pool =
container_of(refcount, struct panthor_heap_pool, refcount);
xa_destroy(&pool->xa);
kfree(pool);
}
/**
* panthor_heap_pool_put() - Release a heap pool reference
* @pool: Pool to release the reference on. Can be NULL.
*/
void panthor_heap_pool_put(struct panthor_heap_pool *pool)
{
if (pool)
kref_put(&pool->refcount, panthor_heap_pool_release);
}
/**
* panthor_heap_pool_get() - Get a heap pool reference
* @pool: Pool to get the reference on. Can be NULL.
*
* Return: @pool.
*/
struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool)
{
if (pool)
kref_get(&pool->refcount);
return pool;
}
/**
* panthor_heap_pool_create() - Create a heap pool
* @ptdev: Device.
* @vm: The VM this heap pool will be attached to.
*
* Heap pools might contain up to 128 heap contexts, and are per-VM.
*
* Return: A valid pointer on success, a negative error code otherwise.
*/
struct panthor_heap_pool *
panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
{
size_t bosize = ALIGN(MAX_HEAPS_PER_POOL *
panthor_heap_ctx_stride(ptdev),
4096);
struct panthor_heap_pool *pool;
int ret = 0;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
/* We want a weak ref here: the heap pool belongs to the VM, so we're
* sure that, as long as the heap pool exists, the VM exists too.
*/
pool->vm = vm;
pool->ptdev = ptdev;
init_rwsem(&pool->lock);
xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1);
kref_init(&pool->refcount);
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
PANTHOR_VM_KERNEL_AUTO_VA);
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
}
ret = panthor_kernel_bo_vmap(pool->gpu_contexts);
if (ret)
goto err_destroy_pool;
return pool;
err_destroy_pool:
panthor_heap_pool_destroy(pool);
return ERR_PTR(ret);
}
/**
* panthor_heap_pool_destroy() - Destroy a heap pool.
* @pool: Pool to destroy.
*
* This function destroys all heap contexts and their resources. Thus
* preventing any use of the heap context or the chunk attached to them
* after that point.
*
* If the GPU still has access to some heap contexts, a fault should be
* triggered, which should flag the command stream groups using these
* context as faulty.
*
* The heap pool object is only released when all references to this pool
* are released.
*/
void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
{
struct panthor_heap *heap;
unsigned long i;
if (!pool)
return;
down_write(&pool->lock);
xa_for_each(&pool->xa, i, heap)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
if (!IS_ERR_OR_NULL(pool->gpu_contexts))
panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts);
/* Reflects the fact the pool has been destroyed. */
pool->vm = NULL;
up_write(&pool->lock);
panthor_heap_pool_put(pool);
}

View file

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_HEAP_H__
#define __PANTHOR_HEAP_H__
#include <linux/types.h>
struct panthor_device;
struct panthor_heap_pool;
struct panthor_vm;
int panthor_heap_create(struct panthor_heap_pool *pool,
u32 initial_chunk_count,
u32 chunk_size,
u32 max_chunks,
u32 target_in_flight,
u64 *heap_ctx_gpu_va,
u64 *first_chunk_gpu_va);
int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle);
struct panthor_heap_pool *
panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm);
void panthor_heap_pool_destroy(struct panthor_heap_pool *pool);
struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool);
void panthor_heap_pool_put(struct panthor_heap_pool *pool);
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u32 renderpasses_in_flight,
u32 pending_frag_count,
u64 *new_chunk_gpu_va);
int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u64 chunk_gpu_va);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_MMU_H__
#define __PANTHOR_MMU_H__
#include <linux/dma-resv.h>
struct drm_exec;
struct drm_sched_job;
struct panthor_gem_object;
struct panthor_heap_pool;
struct panthor_vm;
struct panthor_vma;
struct panthor_mmu;
int panthor_mmu_init(struct panthor_device *ptdev);
void panthor_mmu_unplug(struct panthor_device *ptdev);
void panthor_mmu_pre_reset(struct panthor_device *ptdev);
void panthor_mmu_post_reset(struct panthor_device *ptdev);
void panthor_mmu_suspend(struct panthor_device *ptdev);
void panthor_mmu_resume(struct panthor_device *ptdev);
int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
u64 offset, u64 size, u64 va, u32 flags);
int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size);
struct panthor_gem_object *
panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
void panthor_vm_put(struct panthor_vm *vm);
struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
u64 kernel_va_start, u64 kernel_va_size,
u64 kernel_auto_va_start,
u64 kernel_auto_va_size);
int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec,
struct panthor_vm *vm,
u32 slot_count);
int panthor_vm_add_bos_resvs_deps_to_job(struct panthor_vm *vm,
struct drm_sched_job *job);
void panthor_vm_add_job_fence_to_bos_resvs(struct panthor_vm *vm,
struct drm_sched_job *job);
struct dma_resv *panthor_vm_resv(struct panthor_vm *vm);
struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm);
void panthor_vm_pool_destroy(struct panthor_file *pfile);
int panthor_vm_pool_create(struct panthor_file *pfile);
int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
struct panthor_vm_pool *pool,
struct drm_panthor_vm_create *args);
int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle);
struct panthor_vm *panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle);
bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm);
bool panthor_vm_is_unusable(struct panthor_vm *vm);
/*
* PANTHOR_VM_KERNEL_AUTO_VA: Use this magic address when you want the GEM
* logic to auto-allocate the virtual address in the reserved kernel VA range.
*/
#define PANTHOR_VM_KERNEL_AUTO_VA ~0ull
int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node);
void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node);
int panthor_vm_bind_exec_sync_op(struct drm_file *file,
struct panthor_vm *vm,
struct drm_panthor_vm_bind_op *op);
struct drm_sched_job *
panthor_vm_bind_job_create(struct drm_file *file,
struct panthor_vm *vm,
const struct drm_panthor_vm_bind_op *op);
void panthor_vm_bind_job_put(struct drm_sched_job *job);
int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
struct drm_sched_job *job);
void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
struct dma_fence *fence,
enum dma_resv_usage private_usage,
enum dma_resv_usage extobj_usage);
int panthor_mmu_pt_cache_init(void);
void panthor_mmu_pt_cache_fini(void);
#ifdef CONFIG_DEBUG_FS
void panthor_mmu_debugfs_init(struct drm_minor *minor);
#endif
#endif

Some files were not shown because too many files have changed in this diff Show more