Linux 6.4-rc5

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmR80iseHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGhxkH/2+2NEZjO5SCj9z6
 FGrJunmXMOqoryoC3oL953Zhp3oeB/gB7GDdFQLg8tv0QprD6W0L9uorIgFi3lWU
 doGWKSjFVEdu6RoUV09bTwm1DislZKJF3NCwTyeb44c3HnTzUvd/zFrY29YNRi9C
 j5KXQIq91dke3qQi/3uCLFRKvmr2ss/lXapScwXFhQjaM2VmAMc51xIxkuRz3H01
 EmcbJx3Rj9zPxx3Nc7ONMvtHE5+xuVsMdq3dLFCS9Xc/f+qbCCQdRIy9AjaRuR4c
 F5nRjwDjq0iv2diF0gK4WtD8fvxVuLaqS0RAi0jsOKVfLphLwkqkPQbbMd5K8qwJ
 xTNAiW0=
 =FwWR
 -----END PGP SIGNATURE-----

Merge tag 'v6.4-rc5' into v4l_for_linus

Linux 6.4-rc5

* tag 'v6.4-rc5': (303 commits)
  Linux 6.4-rc5
  leds: qcom-lpg: Fix PWM period limits
  selftests/ftrace: Choose target function for filter test from samples
  KVM: selftests: Add test for race in kvm_recalculate_apic_map()
  KVM: x86: Bail from kvm_recalculate_phys_map() if x2APIC ID is out-of-bounds
  KVM: x86: Account fastpath-only VM-Exits in vCPU stats
  KVM: SVM: vNMI pending bit is V_NMI_PENDING_MASK not V_NMI_BLOCKING_MASK
  KVM: x86/mmu: Grab memslot for correct address space in NX recovery worker
  tpm, tpm_tis: correct tpm_tis_flags enumeration values
  Revert "ext4: remove ac->ac_found > sbi->s_mb_min_to_scan dead check in ext4_mb_check_limits"
  riscv: Implement missing huge_ptep_get
  riscv: Fix huge_ptep_set_wrprotect when PTE is a NAPOT
  module/decompress: Fix error checking on zstd decompression
  fork, vhost: Use CLONE_THREAD to fix freezer/ps regression
  dt-bindings: serial: 8250_omap: add rs485-rts-active-high
  selinux: don't use make's grouped targets feature yet
  riscv: perf: Fix callchain parse error with kernel tracepoint events
  mptcp: fix active subflow finalization
  mptcp: add annotations around sk->sk_shutdown accesses
  mptcp: fix data race around msk->first access
  ...
This commit is contained in:
Mauro Carvalho Chehab 2023-06-09 09:24:21 +01:00
commit 4b0a5014ee
324 changed files with 2916 additions and 1626 deletions

View file

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lattice Slave SPI sysCONFIG FPGA manager
maintainers:
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
- Vladimir Georgiev <v.georgiev@metrotek.ru>
description: |
Lattice sysCONFIG port, which is used for FPGA configuration, among others,

View file

@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip Polarfire FPGA manager.
maintainers:
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
- Vladimir Georgiev <v.georgiev@metrotek.ru>
description:
Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to

View file

@ -39,6 +39,12 @@ properties:
power-domains:
maxItems: 1
vref-supply:
description: |
External ADC reference voltage supply on VREFH pad. If VERID[MVI] is
set, there are additional, internal reference voltages selectable.
VREFH1 is always from VREFH pad.
"#io-channel-cells":
const: 1
@ -72,6 +78,7 @@ examples:
assigned-clocks = <&clk IMX_SC_R_ADC_0>;
assigned-clock-rates = <24000000>;
power-domains = <&pd IMX_SC_R_ADC_0>;
vref-supply = <&reg_1v8>;
#io-channel-cells = <1>;
};
};

View file

@ -90,7 +90,7 @@ patternProperties:
of the MAX chips to the GyroADC, while MISO line of each Maxim
ADC connects to a shared input pin of the GyroADC.
enum:
- adi,7476
- adi,ad7476
- fujitsu,mb88101a
- maxim,max1162
- maxim,max11100

View file

@ -70,6 +70,7 @@ properties:
dsr-gpios: true
rng-gpios: true
dcd-gpios: true
rs485-rts-active-high: true
rts-gpio: true
power-domains: true
clock-frequency: true

View file

@ -287,7 +287,7 @@ properties:
description:
High-Speed PHY interface selection between UTMI+ and ULPI when the
DWC_USB3_HSPHY_INTERFACE has value 3.
$ref: /schemas/types.yaml#/definitions/uint8
$ref: /schemas/types.yaml#/definitions/string
enum: [utmi, ulpi]
snps,quirk-frame-length-adjustment:

View file

@ -52,3 +52,22 @@ Build kernel with:
Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
table support without extra kernel parameter.
Implementation notes
====================
We specifically decided not to use VMA information in order to avoid relying on
MM states (except for limited "struct page" info). The page table check is a
separate from Linux-MM state machine that verifies that the user accessible
pages are not falsely shared.
PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
regions into the userspace via /dev/mem. At the same time, pages may change
their properties (e.g., from anonymous pages to named pages) while they are
still being mapped in the userspace, leading to "corruption" detected by the
page table check.
Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
/dev/mem. However, these pages are always considered as named pages, so they
won't break the logic used in the page table check.

View file

@ -60,22 +60,6 @@ attribute-sets:
type: nest
nested-attributes: bitset-bits
-
name: u64-array
attributes:
-
name: u64
type: nest
multi-attr: true
nested-attributes: u64
-
name: s32-array
attributes:
-
name: s32
type: nest
multi-attr: true
nested-attributes: s32
-
name: string
attributes:
@ -705,16 +689,16 @@ attribute-sets:
type: u8
-
name: corrected
type: nest
nested-attributes: u64-array
type: binary
sub-type: u64
-
name: uncorr
type: nest
nested-attributes: u64-array
type: binary
sub-type: u64
-
name: corr-bits
type: nest
nested-attributes: u64-array
type: binary
sub-type: u64
-
name: fec
attributes:
@ -827,8 +811,8 @@ attribute-sets:
type: u32
-
name: index
type: nest
nested-attributes: s32-array
type: binary
sub-type: s32
-
name: module
attributes:

View file

@ -40,6 +40,7 @@ flow_steering_mode: Device flow steering mode
---------------------------------------------
The flow steering mode parameter controls the flow steering mode of the driver.
Two modes are supported:
1. 'dmfs' - Device managed flow steering.
2. 'smfs' - Software/Driver managed flow steering.
@ -99,6 +100,7 @@ between representors and stacked devices.
By default metadata is enabled on the supported devices in E-switch.
Metadata is applicable only for E-switch in switchdev mode and
users may disable it when NONE of the below use cases will be in use:
1. HCA is in Dual/multi-port RoCE mode.
2. VF/SF representor bonding (Usually used for Live migration)
3. Stacked devices
@ -180,7 +182,8 @@ User commands examples:
$ devlink health diagnose pci/0000:82:00.0 reporter tx
NOTE: This command has valid output only when interface is up, otherwise the command has empty output.
.. note::
This command has valid output only when interface is up, otherwise the command has empty output.
- Show number of tx errors indicated, number of recover flows ended successfully,
is autorecover enabled and graceful period from last recover::
@ -232,8 +235,9 @@ User commands examples:
$ devlink health dump show pci/0000:82:00.0 reporter fw
NOTE: This command can run only on the PF which has fw tracer ownership,
running it on other PF or any VF will return "Operation not permitted".
.. note::
This command can run only on the PF which has fw tracer ownership,
running it on other PF or any VF will return "Operation not permitted".
fw fatal reporter
-----------------
@ -256,7 +260,8 @@ User commands examples:
$ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
NOTE: This command can run only on PF.
.. note::
This command can run only on PF.
vnic reporter
-------------
@ -265,28 +270,37 @@ It is responsible for querying the vnic diagnostic counters from fw and displayi
them in realtime.
Description of the vnic counters:
total_q_under_processor_handle: number of queues in an error state due to
an async error or errored command.
send_queue_priority_update_flow: number of QP/SQ priority/SL update
events.
cq_overrun: number of times CQ entered an error state due to an
overflow.
async_eq_overrun: number of times an EQ mapped to async events was
overrun.
comp_eq_overrun: number of times an EQ mapped to completion events was
overrun.
quota_exceeded_command: number of commands issued and failed due to quota
exceeded.
invalid_command: number of commands issued and failed dues to any reason
other than quota exceeded.
nic_receive_steering_discard: number of packets that completed RX flow
steering but were discarded due to a mismatch in flow table.
- total_q_under_processor_handle
number of queues in an error state due to
an async error or errored command.
- send_queue_priority_update_flow
number of QP/SQ priority/SL update events.
- cq_overrun
number of times CQ entered an error state due to an overflow.
- async_eq_overrun
number of times an EQ mapped to async events was overrun.
comp_eq_overrun number of times an EQ mapped to completion events was
overrun.
- quota_exceeded_command
number of commands issued and failed due to quota exceeded.
- invalid_command
number of commands issued and failed dues to any reason other than quota
exceeded.
- nic_receive_steering_discard
number of packets that completed RX flow
steering but were discarded due to a mismatch in flow table.
User commands examples:
- Diagnose PF/VF vnic counters
- Diagnose PF/VF vnic counters::
$ devlink health diagnose pci/0000:82:00.1 reporter vnic
- Diagnose representor vnic counters (performed by supplying devlink port of the
representor, which can be obtained via devlink port command)
representor, which can be obtained via devlink port command)::
$ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
NOTE: This command can run over all interfaces such as PF/VF and representor ports.
.. note::
This command can run over all interfaces such as PF/VF and representor ports.

View file

@ -35,7 +35,7 @@ Documentation written by Tom Zanussi
in place of an explicit value field - this is simply a count of
event hits. If 'values' isn't specified, an implicit 'hitcount'
value will be automatically created and used as the only value.
Keys can be any field, or the special string 'stacktrace', which
Keys can be any field, or the special string 'common_stacktrace', which
will use the event's kernel stacktrace as the key. The keywords
'keys' or 'key' can be used to specify keys, and the keywords
'values', 'vals', or 'val' can be used to specify values. Compound
@ -54,7 +54,7 @@ Documentation written by Tom Zanussi
'compatible' if the fields named in the trigger share the same
number and type of fields and those fields also have the same names.
Note that any two events always share the compatible 'hitcount' and
'stacktrace' fields and can therefore be combined using those
'common_stacktrace' fields and can therefore be combined using those
fields, however pointless that may be.
'hist' triggers add a 'hist' file to each event's subdirectory.
@ -547,9 +547,9 @@ Extended error information
the hist trigger display symbolic call_sites, we can have the hist
trigger additionally display the complete set of kernel stack traces
that led to each call_site. To do that, we simply use the special
value 'stacktrace' for the key parameter::
value 'common_stacktrace' for the key parameter::
# echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
# echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
/sys/kernel/tracing/events/kmem/kmalloc/trigger
The above trigger will use the kernel stack trace in effect when an
@ -561,9 +561,9 @@ Extended error information
every callpath to a kmalloc for a kernel compile)::
# cat /sys/kernel/tracing/events/kmem/kmalloc/hist
# trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
# trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
{ stacktrace:
{ common_stacktrace:
__kmalloc_track_caller+0x10b/0x1a0
kmemdup+0x20/0x50
hidraw_report_event+0x8a/0x120 [hid]
@ -581,7 +581,7 @@ Extended error information
cpu_startup_entry+0x315/0x3e0
rest_init+0x7c/0x80
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
{ stacktrace:
{ common_stacktrace:
__kmalloc_track_caller+0x10b/0x1a0
kmemdup+0x20/0x50
hidraw_report_event+0x8a/0x120 [hid]
@ -596,7 +596,7 @@ Extended error information
do_IRQ+0x5a/0xf0
ret_from_intr+0x0/0x30
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
{ stacktrace:
{ common_stacktrace:
kmem_cache_alloc_trace+0xeb/0x150
aa_alloc_task_context+0x27/0x40
apparmor_cred_prepare+0x1f/0x50
@ -608,7 +608,7 @@ Extended error information
.
.
.
{ stacktrace:
{ common_stacktrace:
__kmalloc+0x11b/0x1b0
i915_gem_execbuffer2+0x6c/0x2c0 [i915]
drm_ioctl+0x349/0x670 [drm]
@ -616,7 +616,7 @@ Extended error information
SyS_ioctl+0x81/0xa0
system_call_fastpath+0x12/0x6a
} hitcount: 17726 bytes_req: 13944120 bytes_alloc: 19593808
{ stacktrace:
{ common_stacktrace:
__kmalloc+0x11b/0x1b0
load_elf_phdrs+0x76/0xa0
load_elf_binary+0x102/0x1650
@ -625,7 +625,7 @@ Extended error information
SyS_execve+0x3a/0x50
return_from_execve+0x0/0x23
} hitcount: 33348 bytes_req: 17152128 bytes_alloc: 20226048
{ stacktrace:
{ common_stacktrace:
kmem_cache_alloc_trace+0xeb/0x150
apparmor_file_alloc_security+0x27/0x40
security_file_alloc+0x16/0x20
@ -636,7 +636,7 @@ Extended error information
SyS_open+0x1e/0x20
system_call_fastpath+0x12/0x6a
} hitcount: 4766422 bytes_req: 9532844 bytes_alloc: 38131376
{ stacktrace:
{ common_stacktrace:
__kmalloc+0x11b/0x1b0
seq_buf_alloc+0x1b/0x50
seq_read+0x2cc/0x370
@ -1026,7 +1026,7 @@ Extended error information
First we set up an initially paused stacktrace trigger on the
netif_receive_skb event::
# echo 'hist:key=stacktrace:vals=len:pause' > \
# echo 'hist:key=common_stacktrace:vals=len:pause' > \
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
Next, we set up an 'enable_hist' trigger on the sched_process_exec
@ -1060,9 +1060,9 @@ Extended error information
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
{ stacktrace:
{ common_stacktrace:
__netif_receive_skb_core+0x46d/0x990
__netif_receive_skb+0x18/0x60
netif_receive_skb_internal+0x23/0x90
@ -1079,7 +1079,7 @@ Extended error information
kthread+0xd2/0xf0
ret_from_fork+0x42/0x70
} hitcount: 85 len: 28884
{ stacktrace:
{ common_stacktrace:
__netif_receive_skb_core+0x46d/0x990
__netif_receive_skb+0x18/0x60
netif_receive_skb_internal+0x23/0x90
@ -1097,7 +1097,7 @@ Extended error information
irq_thread+0x11f/0x150
kthread+0xd2/0xf0
} hitcount: 98 len: 664329
{ stacktrace:
{ common_stacktrace:
__netif_receive_skb_core+0x46d/0x990
__netif_receive_skb+0x18/0x60
process_backlog+0xa8/0x150
@ -1115,7 +1115,7 @@ Extended error information
inet_sendmsg+0x64/0xa0
sock_sendmsg+0x3d/0x50
} hitcount: 115 len: 13030
{ stacktrace:
{ common_stacktrace:
__netif_receive_skb_core+0x46d/0x990
__netif_receive_skb+0x18/0x60
netif_receive_skb_internal+0x23/0x90
@ -1142,14 +1142,14 @@ Extended error information
into the histogram. In order to avoid having to set everything up
again, we can just clear the histogram first::
# echo 'hist:key=stacktrace:vals=len:clear' >> \
# echo 'hist:key=common_stacktrace:vals=len:clear' >> \
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
Just to verify that it is in fact cleared, here's what we now see in
the hist file::
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
Totals:
Hits: 0
@ -1485,12 +1485,12 @@ Extended error information
And here's an example that shows how to combine histogram data from
any two events even if they don't share any 'compatible' fields
other than 'hitcount' and 'stacktrace'. These commands create a
other than 'hitcount' and 'common_stacktrace'. These commands create a
couple of triggers named 'bar' using those fields::
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
/sys/kernel/tracing/events/sched/sched_process_fork/trigger
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
/sys/kernel/tracing/events/net/netif_rx/trigger
And displaying the output of either shows some interesting if
@ -1501,16 +1501,16 @@ Extended error information
# event histogram
#
# trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
# trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
#
{ stacktrace:
{ common_stacktrace:
kernel_clone+0x18e/0x330
kernel_thread+0x29/0x30
kthreadd+0x154/0x1b0
ret_from_fork+0x3f/0x70
} hitcount: 1
{ stacktrace:
{ common_stacktrace:
netif_rx_internal+0xb2/0xd0
netif_rx_ni+0x20/0x70
dev_loopback_xmit+0xaa/0xd0
@ -1528,7 +1528,7 @@ Extended error information
call_cpuidle+0x3b/0x60
cpu_startup_entry+0x22d/0x310
} hitcount: 1
{ stacktrace:
{ common_stacktrace:
netif_rx_internal+0xb2/0xd0
netif_rx_ni+0x20/0x70
dev_loopback_xmit+0xaa/0xd0
@ -1543,7 +1543,7 @@ Extended error information
SyS_sendto+0xe/0x10
entry_SYSCALL_64_fastpath+0x12/0x6a
} hitcount: 2
{ stacktrace:
{ common_stacktrace:
netif_rx_internal+0xb2/0xd0
netif_rx+0x1c/0x60
loopback_xmit+0x6c/0xb0
@ -1561,7 +1561,7 @@ Extended error information
sock_sendmsg+0x38/0x50
___sys_sendmsg+0x14e/0x270
} hitcount: 76
{ stacktrace:
{ common_stacktrace:
netif_rx_internal+0xb2/0xd0
netif_rx+0x1c/0x60
loopback_xmit+0x6c/0xb0
@ -1579,7 +1579,7 @@ Extended error information
sock_sendmsg+0x38/0x50
___sys_sendmsg+0x269/0x270
} hitcount: 77
{ stacktrace:
{ common_stacktrace:
netif_rx_internal+0xb2/0xd0
netif_rx+0x1c/0x60
loopback_xmit+0x6c/0xb0
@ -1597,7 +1597,7 @@ Extended error information
sock_sendmsg+0x38/0x50
SYSC_sendto+0xef/0x170
} hitcount: 88
{ stacktrace:
{ common_stacktrace:
kernel_clone+0x18e/0x330
SyS_clone+0x19/0x20
entry_SYSCALL_64_fastpath+0x12/0x6a
@ -1949,7 +1949,7 @@ uninterruptible state::
# cd /sys/kernel/tracing
# echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
# echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
# echo 1 > events/synthetic/block_lat/enable
# cat trace

View file

@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
F: drivers/net/ethernet/amazon/
AMAZON RDMA EFA DRIVER
M: Gal Pressman <galpress@amazon.com>
M: Michael Margolin <mrgolin@amazon.com>
R: Gal Pressman <gal.pressman@linux.dev>
R: Yossi Leybovich <sleybo@amazon.com>
L: linux-rdma@vger.kernel.org
S: Supported
@ -1600,7 +1601,7 @@ F: drivers/media/i2c/ar0521.c
ARASAN NAND CONTROLLER DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
R: Michal Simek <michal.simek@amd.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
@ -1763,7 +1764,7 @@ F: include/linux/amba/mmci.h
ARM PRIMECELL PL35X NAND CONTROLLER DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
R: Michal Simek <michal.simek@amd.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
@ -1771,7 +1772,7 @@ F: drivers/mtd/nand/raw/pl35x-nand-controller.c
ARM PRIMECELL PL35X SMC DRIVER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
R: Michal Simek <michal.simek@amd.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml
@ -5138,7 +5139,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
R: Paulo Alcantara <pc@cjr.nz> (DFS, global name space)
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
R: Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
R: Tom Talpey <tom@talpey.com> (RDMA, smbdirect)
@ -9343,7 +9344,7 @@ F: include/linux/hisi_acc_qm.h
HISILICON ROCE DRIVER
M: Haoyue Xu <xuhaoyue1@hisilicon.com>
M: Wenpeng Liang <liangwenpeng@huawei.com>
M: Junxian Huang <huangjunxian6@hisilicon.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@ -10114,7 +10115,7 @@ S: Maintained
F: Documentation/process/kernel-docs.rst
INDUSTRY PACK SUBSYSTEM (IPACK)
M: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
M: Vaibhav Gupta <vaibhavgupta40@gmail.com>
M: Jens Taprogge <jens.taprogge@taprogge.org>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: industrypack-devel@lists.sourceforge.net
@ -13836,7 +13837,7 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c
MICROCHIP POLARFIRE FPGA DRIVERS
M: Conor Dooley <conor.dooley@microchip.com>
R: Ivan Bornyakov <i.bornyakov@metrotek.ru>
R: Vladimir Georgiev <v.georgiev@metrotek.ru>
L: linux-fpga@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
*
* The walker will walk the page-table entries corresponding to the input
* address range specified, visiting entries according to the walker flags.
* Invalid entries are treated as leaf entries. Leaf entries are reloaded
* after invoking the walker callback, allowing the walker to descend into
* a newly installed table.
* Invalid entries are treated as leaf entries. The visited page table entry is
* reloaded after invoking the walker callback, allowing the walker to descend
* into a newly installed table.
*
* Returning a negative error code from the walker callback function will
* terminate the walk immediately with the same error code.

View file

@ -115,8 +115,14 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
/*
* Automatically generated definitions for system registers, the

View file

@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {

View file

@ -575,7 +575,7 @@ struct pkvm_mem_donation {
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
};
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
{
struct check_walk_data *d = ctx->arg;
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
return -EINVAL;
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
}
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!addr_is_allowed_memory(addr))
return PKVM_NOPAGE;
if (!kvm_pte_valid(pte) && pte)
return PKVM_NOPAGE;
@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
return host_stage2_set_owner_locked(addr, size, host_id);
}
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
return PKVM_NOPAGE;

View file

@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};
@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View file

@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
.flags = flags,
};
int ret = 0;
bool reload = false;
kvm_pteref_t childp;
bool table = kvm_pte_table(ctx.old, level);
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
reload = true;
}
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
reload = true;
}
/*
* Reload the page table after invoking the walker callback for leaf
* entries or after pre-order traversal, to allow the walker to descend
* into a newly installed or replaced table.
*/
if (reload) {
ctx.old = READ_ONCE(*ptep);
table = kvm_pte_table(ctx.old, level);
}
@ -1320,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
};
WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
WARN_ON(mm_ops->page_count(pgtable) != 1);
mm_ops->put_page(pgtable);
}

View file

@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View file

@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
struct arm_pmu *pmu = NULL;
struct arm_pmu *tmp, *pmu = NULL;
struct arm_pmu_entry *entry;
int cpu;
/*
* Create a dummy event that only counts user cycles. As we'll never
* leave this function with the event being live, it will never
* count anything. But it allows us to probe some of the PMU
* details. Yes, this is terrible.
*/
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.pinned = 1;
attr.disabled = 0;
attr.exclude_user = 0;
attr.exclude_kernel = 1;
attr.exclude_hv = 1;
attr.exclude_host = 1;
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
attr.sample_period = GENMASK(63, 0);
mutex_lock(&arm_pmus_lock);
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, &attr);
cpu = smp_processor_id();
list_for_each_entry(entry, &arm_pmus, entry) {
tmp = entry->arm_pmu;
if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
return NULL;
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
pmu = tmp;
break;
}
}
if (event->pmu) {
pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL;
}
perf_event_disable(event);
perf_event_release_kernel(event);
mutex_unlock(&arm_pmus_lock);
return pmu;
}
@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EBUSY;
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
/*
* No PMU set, get the default one.
*
* The observant among you will notice that the supported_cpus
* mask does not get updated for the default PMU even though it
* is quite possible the selected instance supports only a
* subset of cores in the system. This is intentional, and
* upholds the preexisting behavior on heterogeneous systems
* where vCPUs can be scheduled on any core but the guest
* counters could stop working.
*/
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu)
return -ENODEV;

View file

@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
return true;
}
static bool access_dcgsw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!kvm_has_mte(vcpu->kvm)) {
kvm_inject_undefined(vcpu);
return false;
}
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
return access_dcsw(vcpu, p, r);
}
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
{
switch (r->aarch32_map) {
@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
*/
static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),

View file

@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_lock(&vcpu->kvm->slots_lock);
ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->slots_lock);
}
return ret;
}
@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
/**
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
* is a GICv2. A GICv3 must be explicitly initialized by userspace using the
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
* @kvm: kvm struct pointer
*/
@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
int kvm_vgic_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
gpa_t dist_base;
int ret = 0;
if (likely(vgic_ready(kvm)))
return 0;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
goto out;
@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
else
ret = vgic_v3_map_resources(kvm);
if (ret)
if (ret) {
__kvm_vgic_destroy(kvm);
else
dist->ready = true;
goto out;
}
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base,
kvm_vgic_global_state.type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
out:
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
return ret;
}

View file

@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
static int vgic_its_create(struct kvm_device *dev, u32 type)
{
int ret;
struct vgic_its *its;
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (!its)
return -ENOMEM;
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_initialized(dev->kvm)) {
int ret = vgic_v4_init(dev->kvm);
ret = vgic_v4_init(dev->kvm);
if (ret < 0) {
mutex_unlock(&dev->kvm->arch.config_lock);
kfree(its);
return ret;
}
@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
/* Yep, even more trickery for lock ordering... */
#ifdef CONFIG_LOCKDEP
mutex_lock(&dev->kvm->arch.config_lock);
mutex_lock(&its->cmd_lock);
mutex_lock(&its->its_lock);
mutex_unlock(&its->its_lock);
mutex_unlock(&its->cmd_lock);
mutex_unlock(&dev->kvm->arch.config_lock);
#endif
its->vgic_its_base = VGIC_ADDR_UNDEF;
@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
dev->private = its;
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
static void vgic_its_destroy(struct kvm_device *kvm_dev)

View file

@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->arch.config_lock);
/*
* Since we can't hold config_lock while registering the redistributor
* iodevs, take the slots_lock immediately.
*/
mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (r)
goto out;
mutex_lock(&kvm->arch.config_lock);
if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r)
@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} else {
addr = *addr_ptr;
}
mutex_unlock(&kvm->arch.config_lock);
out:
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
if (!r && !write)
r = put_user(addr, uaddr);

View file

@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
struct vgic_redist_region *rdreg;
gpa_t rd_base;
int ret;
int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
return 0;
goto out_unlock;
/*
* We may be creating VCPUs before having set the base address for the
@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
*/
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
if (!rdreg)
return 0;
goto out_unlock;
if (!vgic_v3_check_base(kvm))
return -EINVAL;
if (!vgic_v3_check_base(kvm)) {
ret = -EINVAL;
goto out_unlock;
}
vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
rd_dev->redist_vcpu = vcpu;
mutex_lock(&kvm->slots_lock);
mutex_unlock(&kvm->arch.config_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
2 * SZ_64K, &rd_dev->dev);
mutex_unlock(&kvm->slots_lock);
if (ret)
return ret;
/* Protected by slots_lock */
rdreg->free_index++;
return 0;
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
/* The current c failed, so iterate over the previous ones. */
int i;
mutex_lock(&kvm->slots_lock);
for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
mutex_unlock(&kvm->slots_lock);
}
return ret;
@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{
int ret;
mutex_lock(&kvm->arch.config_lock);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
mutex_unlock(&kvm->arch.config_lock);
if (ret)
return ret;
@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
if (ret) {
struct vgic_redist_region *rdreg;
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View file

@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type type)
{
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
int ret = 0;
unsigned int len;
switch (type) {
@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
io_device->iodev_type = IODEV_DIST;
io_device->redist_vcpu = NULL;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
len, &io_device->dev);
mutex_unlock(&kvm->slots_lock);
return ret;
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
len, &io_device->dev);
}

View file

@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,

View file

@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0;
unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) {
@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);

View file

@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
}
}
/* Must be called with the kvm lock held */
void vgic_v4_configure_vsgis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
kvm_arm_halt_guest(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {

View file

@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
targets += aesp8-ppc.S ghashp8-ppc.S
targets += aesp10-ppc.S ghashp10-ppc.S
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y

View file

@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("aes");
asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
void *key);
asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
struct aes_key {
@ -93,7 +93,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
gctx->aadLen = alen;
i = alen & ~0xf;
if (i) {
gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
aad += i;
alen -= i;
}
@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
nXi[i] ^= aad[i];
memset(gctx->aad_hash, 0, 16);
gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
} else {
memcpy(gctx->aad_hash, nXi, 16);
}
@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
{
__be32 counter = cpu_to_be32(1);
aes_p8_encrypt(hash->H, hash->H, rdkey);
aes_p10_encrypt(hash->H, hash->H, rdkey);
set_subkey(hash->H);
gcm_init_htable(hash->Htable+32, hash->H);
@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
/*
* Encrypt counter vector as iv tag and increment counter.
*/
aes_p8_encrypt(iv, gctx->ivtag, rdkey);
aes_p10_encrypt(iv, gctx->ivtag, rdkey);
counter = cpu_to_be32(2);
*((__be32 *)(iv+12)) = counter;
@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
/*
* hash (AAD len and len)
*/
gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
for (i = 0; i < 16; i++)
hash->Htable[i] ^= gctx->ivtag[i];
@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
int ret;
vsx_begin();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
vsx_end();
return ret ? -EINVAL : 0;

View file

@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl";
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
$FRAME=8*$SIZE_T;
$prefix="aes_p8";
$prefix="aes_p10";
$sp="r1";
$vrsave="r12";

View file

@ -64,7 +64,7 @@ $code=<<___;
.text
.globl .gcm_init_p8
.globl .gcm_init_p10
lis r0,0xfff0
li r8,0x10
mfspr $vrsave,256
@ -110,7 +110,7 @@ $code=<<___;
.long 0
.byte 0,12,0x14,0,0,0,2,0
.long 0
.size .gcm_init_p8,.-.gcm_init_p8
.size .gcm_init_p10,.-.gcm_init_p10
.globl .gcm_init_htable
lis r0,0xfff0
@ -237,7 +237,7 @@ $code=<<___;
.long 0
.size .gcm_init_htable,.-.gcm_init_htable
.globl .gcm_gmult_p8
.globl .gcm_gmult_p10
lis r0,0xfff8
li r8,0x10
mfspr $vrsave,256
@ -283,9 +283,9 @@ $code=<<___;
.long 0
.byte 0,12,0x14,0,0,0,2,0
.long 0
.size .gcm_gmult_p8,.-.gcm_gmult_p8
.size .gcm_gmult_p10,.-.gcm_gmult_p10
.globl .gcm_ghash_p8
.globl .gcm_ghash_p10
lis r0,0xfff8
li r8,0x10
mfspr $vrsave,256
@ -350,7 +350,7 @@ Loop:
.long 0
.byte 0,12,0x14,0,0,0,4,0
.long 0
.size .gcm_ghash_p8,.-.gcm_ghash_p8
.size .gcm_ghash_p10,.-.gcm_ghash_p10
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
.align 2

View file

@ -317,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
long rpages = npages;
unsigned long limit;
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
return tce_free_pSeriesLP(tbl->it_index, tcenum,
tbl->it_page_shift, npages);
rc = plpar_tce_stuff((u64)tbl->it_index,
(u64)tcenum << tbl->it_page_shift, 0, npages);
do {
limit = min_t(unsigned long, rpages, 512);
rc = plpar_tce_stuff((u64)tbl->it_index,
(u64)tcenum << tbl->it_page_shift, 0, limit);
rpages -= limit;
tcenum += limit;
} while (rpages > 0 && !rc);
if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");

View file

@ -88,7 +88,7 @@ static unsigned long ndump = 64;
static unsigned long nidump = 16;
static unsigned long ncsum = 4096;
static int termch;
static char tmpstr[128];
static char tmpstr[KSYM_NAME_LEN];
static int tracing_enabled;
static long bus_error_jmp[JMP_BUF_LEN];

View file

@ -799,8 +799,11 @@ menu "Power management options"
source "kernel/power/Kconfig"
# Hibernation is only possible on systems where the SBI implementation has
# marked its reserved memory as not accessible from, or does not run
# from the same memory as, Linux
config ARCH_HIBERNATION_POSSIBLE
def_bool y
def_bool NONPORTABLE
config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION

View file

@ -1,2 +1,6 @@
ifdef CONFIG_RELOCATABLE
KBUILD_CFLAGS += -fno-pie
endif
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
obj-$(CONFIG_ERRATA_THEAD) += thead/

View file

@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET
pte_t huge_ptep_get(pte_t *ptep);
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte

View file

@ -10,4 +10,11 @@
#include <linux/perf_event.h>
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->epc = (__ip); \
(regs)->s0 = (unsigned long) __builtin_frame_address(0); \
(regs)->sp = current_stack_pointer; \
(regs)->status = SR_PP; \
}
#endif /* _ASM_RISCV_PERF_EVENT_H */

View file

@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
endif
ifdef CONFIG_RELOCATABLE
CFLAGS_alternative.o += -fno-pie
CFLAGS_cpufeature.o += -fno-pie
endif
ifdef CONFIG_KASAN
KASAN_SANITIZE_alternative.o := n
KASAN_SANITIZE_cpufeature.o := n

View file

@ -3,6 +3,30 @@
#include <linux/err.h>
#ifdef CONFIG_RISCV_ISA_SVNAPOT
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long pte_num;
int i;
pte_t orig_pte = ptep_get(ptep);
if (!pte_present(orig_pte) || !pte_napot(orig_pte))
return orig_pte;
pte_num = napot_pte_num(napot_cont_order(orig_pte));
for (i = 0; i < pte_num; i++, ptep++) {
pte_t pte = ptep_get(ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
return orig_pte;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
@ -218,6 +242,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
unsigned long order;
pte_t orig_pte;
int i, pte_num;
if (!pte_napot(pte)) {
@ -228,9 +253,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
order = napot_cont_order(pte);
pte_num = napot_pte_num(order);
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
orig_pte = pte_wrprotect(orig_pte);
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
ptep_set_wrprotect(mm, addr, ptep);
set_pte_at(mm, addr, ptep, orig_pte);
}
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View file

@ -922,9 +922,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t dtb_pa)
{
#ifndef CONFIG_BUILTIN_DTB
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
#ifndef CONFIG_BUILTIN_DTB
/* Make sure the fdt fixmap address is always aligned on PMD size */
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));

View file

@ -773,8 +773,6 @@
.octa 0x3F893781E95FE1576CDA64D2BA0CB204
#ifdef CONFIG_AS_GFNI
.section .rodata.cst8, "aM", @progbits, 8
.align 8
/* AES affine: */
#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0)
.Ltf_aff_bitmatrix:

View file

@ -39,7 +39,7 @@ extern void fpu_flush_thread(void);
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (cpu_feature_enabled(X86_FEATURE_FPU) &&
!(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
save_fpregs_to_fpstate(old_fpu);
/*
* The save operation preserved register state, so the

View file

@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void)
struct fpu *fpu = &current->thread.fpu;
int cpu = smp_processor_id();
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER)))
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
return;
if (!fpregs_state_valid(fpu, cpu)) {

View file

@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
this_cpu_write(in_kernel_fpu, true);
if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
save_fpregs_to_fpstate(&current->thread.fpu);

View file

@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
u32 xapic_id = kvm_xapic_id(apic);
u32 physical_id;
/*
* For simplicity, KVM always allocates enough space for all possible
* xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
* without the optimized map.
*/
if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
return -EINVAL;
/*
* Bail if a vCPU was added and/or enabled its APIC between allocating
* the map and doing the actual calculations for the map. Note, KVM
* hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
* the compiler decides to reload x2apic_id after this check.
*/
if (x2apic_id > new->max_apic_id)
return -E2BIG;
/*
* Deliberately truncate the vCPU ID when detecting a mismatched APIC
* ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
*/
if (vcpu->kvm->arch.x2apic_format) {
/* See also kvm_apic_match_physical_addr(). */
if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
x2apic_id <= new->max_apic_id)
if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
new->phys_map[x2apic_id] = apic;
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])

View file

@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
*/
slot = NULL;
if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
slot = gfn_to_memslot(kvm, sp->gfn);
struct kvm_memslots *slots;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, sp->gfn);
WARN_ON_ONCE(!slot);
}

View file

@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
if (!is_vnmi_enabled(svm))
return false;
return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
}
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)

View file

@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
break;
}
/* Note, VM-Exits that go down the "slow" path are accounted below. */
++vcpu->stat.exits;
}
/*

View file

@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk)
void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
{
struct request_queue *q = disk->queue;
unsigned int old_model = q->limits.zoned;
switch (model) {
case BLK_ZONED_HM:
@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
*/
blk_queue_zone_write_granularity(q,
queue_logical_block_size(q));
} else {
} else if (old_model != BLK_ZONED_NONE) {
disk_clear_zone_settings(disk);
}
}

View file

@ -7,7 +7,6 @@
#ifndef APEI_INTERNAL_H
#define APEI_INTERNAL_H
#include <linux/cper.h>
#include <linux/acpi.h>
struct apei_exec_context;
@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
return sizeof(*estatus) + estatus->data_length;
}
void cper_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus);
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
int apei_osc_setup(void);
#endif

View file

@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cper.h>
#include <linux/io.h>
#include "apei-internal.h"

View file

@ -2694,18 +2694,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
return 0;
}
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
{
if (!sata_pmp_attached(ap)) {
if (likely(devno >= 0 &&
devno < ata_link_max_devices(&ap->link)))
/*
* For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
* or 2 (IDE master + slave case). However, the former case includes
* libsas hosted devices which are numbered per scsi host, leading
* to devno potentially being larger than 0 but with each struct
* ata_device having its own struct ata_port and struct ata_link.
* To accommodate these, ignore devno and always use device number 0.
*/
if (likely(!sata_pmp_attached(ap))) {
int link_max_devices = ata_link_max_devices(&ap->link);
if (link_max_devices == 1)
return &ap->link.device[0];
if (devno < link_max_devices)
return &ap->link.device[devno];
} else {
if (likely(devno >= 0 &&
devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
return NULL;
}
/*
* For PMP-attached devices, the device number corresponds to C
* (channel) of SCSI [H:C:I:L], indicating the port pmp link
* for the device.
*/
if (devno < ap->nr_pmp_links)
return &ap->pmp_link[devno].device[0];
return NULL;
}

View file

@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
/*
* Comparing cache IDs only makes sense if the leaves
* belong to the same cache level of same type. Skip
* the check if level and type do not match.
*/
if (sib_leaf->level != this_leaf->level ||
sib_leaf->type != this_leaf->type)
continue;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@ -400,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
coherency_max_size = this_leaf->coherency_line_size;
}
/* shared_cpu_map is now populated for the cpu */
this_cpu_ci->cpu_map_populated = true;
return 0;
}
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index, sib_index;
@ -419,6 +432,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
/*
* Comparing cache IDs only makes sense if the leaves
* belong to the same cache level of same type. Skip
* the check if level and type do not match.
*/
if (sib_leaf->level != this_leaf->level ||
sib_leaf->type != this_leaf->type)
continue;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@ -427,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
}
/* cpu is no longer populated in the shared map */
this_cpu_ci->cpu_map_populated = false;
}
static void free_cache_attributes(unsigned int cpu)

View file

@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
char *outbuf;
alg = crypto_alloc_shash("sha256", 0, 0);
if (!alg)
if (IS_ERR(alg))
return;
sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);

View file

@ -4,16 +4,23 @@
# subsystems should select the appropriate symbols.
config REGMAP
bool "Register Map support" if KUNIT_ALL_TESTS
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
select IRQ_DOMAIN if REGMAP_IRQ
select MDIO_BUS if REGMAP_MDIO
bool
help
Enable support for the Register Map (regmap) access API.
Usually, this option is automatically selected when needed.
However, you may want to enable it manually for running the regmap
KUnit tests.
If unsure, say N.
config REGMAP_KUNIT
tristate "KUnit tests for regmap"
depends on KUNIT
depends on KUNIT && REGMAP
default KUNIT_ALL_TESTS
select REGMAP
select REGMAP_RAM
config REGMAP_AC97

View file

@ -203,15 +203,18 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
mas_for_each(&mas, entry, max) {
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
mas_pause(&mas);
rcu_read_unlock();
ret = regcache_sync_val(map, r, entry[r - mas.index]);
if (ret != 0)
goto out;
rcu_read_lock();
}
}
out:
rcu_read_unlock();
out:
map->cache_bypass = false;
return ret;

View file

@ -59,6 +59,10 @@ static int regmap_sdw_config_check(const struct regmap_config *config)
if (config->pad_bits != 0)
return -ENOTSUPP;
/* Only bulk writes are supported not multi-register writes */
if (config->can_multi_write)
return -ENOTSUPP;
return 0;
}

View file

@ -2082,6 +2082,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
size_t max_data = map->max_raw_write - map->format.reg_bytes -
map->format.pad_bytes;
int ret, i;
if (!val_count)
@ -2089,8 +2091,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->use_single_write)
chunk_regs = 1;
else if (map->max_raw_write && val_len > map->max_raw_write)
chunk_regs = map->max_raw_write / val_bytes;
else if (map->max_raw_write && val_len > max_data)
chunk_regs = max_data / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;

View file

@ -84,10 +84,10 @@ enum tis_defaults {
#define ILB_REMAP_SIZE 0x100
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
TPM_TIS_INVALID_STATUS = BIT(1),
TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
TPM_TIS_IRQ_TESTED = BIT(3),
TPM_TIS_ITPM_WORKAROUND = 0,
TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3,
};
struct tpm_tis_data {

View file

@ -132,7 +132,7 @@
#define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */
#define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */
#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */
#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */
#define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */
#define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */
#define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */
@ -153,8 +153,6 @@
#define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */
/* Bitfields in CFG */
#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
#define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */
#define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */
#define ATC_SRC_REP BIT(8) /* Source Replay Mod */
@ -181,10 +179,15 @@
#define ATC_DPIP_HOLE GENMASK(15, 0)
#define ATC_DPIP_BOUNDARY GENMASK(25, 16)
#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \
FIELD_PREP(ATC_SRC_PER, (id)))
#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \
FIELD_PREP(ATC_DST_PER, (id)))
#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */
#define ATC_SRC_PER_ID(id) \
({ typeof(id) _id = (id); \
FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
FIELD_PREP(ATC_SRC_PER, _id); })
#define ATC_DST_PER_ID(id) \
({ typeof(id) _id = (id); \
FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
FIELD_PREP(ATC_DST_PER, _id); })

View file

@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
NULL,
src_addr, dst_addr,
xt, xt->sgl);
if (!first)
return NULL;
/* Length of the block is (BLEN+1) microblocks. */
for (i = 0; i < xt->numf - 1; i++)
@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
src_addr, dst_addr,
xt, chunk);
if (!desc) {
list_splice_tail_init(&first->descs_list,
&atchan->free_descs_list);
if (first)
list_splice_tail_init(&first->descs_list,
&atchan->free_descs_list);
return NULL;
}

View file

@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
if (wq_dedicated(wq)) {
rc = idxd_wq_set_pasid(wq, pasid);
if (rc < 0) {
iommu_sva_unbind_device(sva);
dev_err(dev, "wq set pasid failed: %d\n", rc);
goto failed_set_pasid;
}

View file

@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
}
static bool _start(struct pl330_thread *thrd)
static bool pl330_start_thread(struct pl330_thread *thrd)
{
switch (_state(thrd)) {
case PL330_STATE_FAULT_COMPLETING:
@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
thrd->req_running = -1;
/* Get going again ASAP */
_start(thrd);
pl330_start_thread(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&descdone->rqd, &pl330->req_done);
@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
}
@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = false;
}

View file

@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
static int udma_pm_suspend(struct device *dev)
static int __maybe_unused udma_pm_suspend(struct device *dev)
{
struct udma_dev *ud = dev_get_drvdata(dev);
struct dma_device *dma_dev = &ud->ddev;
@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev)
return 0;
}
static int udma_pm_resume(struct device *dev)
static int __maybe_unused udma_pm_resume(struct device *dev)
{
struct udma_dev *ud = dev_get_drvdata(dev);
struct dma_device *dma_dev = &ud->ddev;

View file

@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
$(call if_changed,$(zboot-method-y))
OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
# avoid eager evaluation to prevent references to non-existent build artifacts
OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
--rename-section .data=.gzdata,load,alloc,readonly,contents
$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
$(call if_changed,objcopy)

View file

@ -1133,4 +1133,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
#endif

View file

@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(9, 3, 0):
/* GC 10.3.7 */
case IP_VERSION(10, 3, 7):
/* GC 11.0.1 */
case IP_VERSION(11, 0, 1):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
/* Don't enable it by default yet.
*/

View file

@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
adev->jpeg.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
return 0;
}

View file

@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{
struct amdgpu_jpeg_inst {
struct amdgpu_ring ring_dec;
struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
};
@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/

View file

@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
adev->vcn.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
return 0;
}

View file

@ -234,6 +234,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
atomic_t sched_score;
struct amdgpu_irq_src irq;
struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state;
@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif

View file

@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
}
@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
}
return 0;
@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
break;
case VCN_2_6__SRCID_DJPEG0_POISON:
case VCN_2_6__SRCID_EJPEG0_POISON:
amdgpu_jpeg_process_poison_irq(adev, source, entry);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
.process = jpeg_v2_5_process_interrupt,
};
static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
.set = jpeg_v2_6_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].irq.num_types = 1;
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
}
}
@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
.ras_block = {
.hw_ops = &jpeg_v2_6_ras_hw_ops,
.ras_late_init = amdgpu_jpeg_ras_late_init,
},
};

View file

@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return 0;
}
@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
break;
case VCN_4_0__SRCID_DJPEG0_POISON:
case VCN_4_0__SRCID_EJPEG0_POISON:
amdgpu_jpeg_process_poison_irq(adev, source, entry);
break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
.process = jpeg_v4_0_process_interrupt,
};
static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
.set = jpeg_v4_0_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
adev->jpeg.inst->ras_poison_irq.num_types = 1;
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_jpeg_ras_late_init,
},
};

View file

@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
}
@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break;
case VCN_2_6__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
.process = vcn_v2_5_process_interrupt,
};
static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
.set = vcn_v2_6_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
continue;
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v2_6_ras = {
.ras_block = {
.hw_ops = &vcn_v2_6_ras_hw_ops,
.ras_late_init = amdgpu_vcn_ras_late_init,
},
};

View file

@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
if (r)
return r;
@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle)
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
return 0;
}
/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block RAS interrupt state
*/
static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
/**
* vcn_v4_0_process_interrupt - process VCN block interrupt
*
@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
case VCN_4_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.process = vcn_v4_0_process_interrupt,
};
static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
.set = vcn_v4_0_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
/**
* vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
*
@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v4_0_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_vcn_ras_late_init,
},
};

View file

@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
dc_dmub_srv_p_state_delegate(dc,
true, context);
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
dc->clk_mgr->clks.fw_based_mclk_switching = true;
} else {
dc->clk_mgr->clks.fw_based_mclk_switching = false;
}
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,

View file

@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
}
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context)
struct dc_state *context)
{
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
/* Any transition into an FPO config should disable MCLK switching first to avoid
* driver and FW P-State synchronization issues.
*/
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
/*
* enabled -> enabled: do not disable
* enabled -> disabled: disable
* disabled -> enabled: don't care
* disabled -> disabled: don't care
*/
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
/* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}

View file

@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
static int si_set_temperature_range(struct amdgpu_device *adev)
{
int ret;
ret = si_thermal_enable_alert(adev, false);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
return ret;
}
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
if (ret)
return ret;
#if 0 //TODO ?
si_dpm_powergate_uvd(adev, true);
#endif
return 0;
}

View file

@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)

View file

@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_VCLK:
case SMU_DCLK:
for (i = 0; i < count; i++) {
ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)

View file

@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;

View file

@ -866,7 +866,7 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;

View file

@ -1000,7 +1000,7 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;

View file

@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
stream->oa_buffer.last_ctx_id = ctx_id;
}
/*
* Clear out the report id and timestamp as a means to detect unlanded
* reports.
*/
oa_report_id_clear(stream, report32);
oa_timestamp_clear(stream, report32);
if (is_power_of_2(report_size)) {
/*
* Clear out the report id and timestamp as a means
* to detect unlanded reports.
*/
oa_report_id_clear(stream, report32);
oa_timestamp_clear(stream, report32);
} else {
/* Zero out the entire report */
memset(report32, 0, report_size);
}
}
if (start_offset != *offset) {

View file

@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = {
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,

View file

@ -529,6 +529,7 @@
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f

View file

@ -314,6 +314,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
dbg_hid("%s:timeout waiting for response\n", __func__);
memset(response, 0, sizeof(struct hidpp_report));
ret = -ETIMEDOUT;
goto exit;
}
if (response->report_id == REPORT_ID_HIDPP_SHORT &&

View file

@ -2224,7 +2224,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
} else if (strstr(product_name, "Wacom") ||
strstr(product_name, "wacom") ||
strstr(product_name, "WACOM")) {
strscpy(name, product_name, sizeof(name));
if (strscpy(name, product_name, sizeof(name)) < 0) {
hid_warn(wacom->hdev, "String overflow while assembling device name");
}
} else {
snprintf(name, sizeof(name), "Wacom %s", product_name);
}
@ -2242,7 +2244,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
} else {
strscpy(name, features->name, sizeof(name));
if (strscpy(name, features->name, sizeof(name)) < 0) {
hid_warn(wacom->hdev, "String overflow while assembling device name");
}
}
snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s",
@ -2410,8 +2414,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail_quirks;
}
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
error = hid_hw_open(hdev);
if (error) {
hid_err(hdev, "hw open failed\n");
goto fail_quirks;
}
}
wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);
@ -2500,8 +2509,10 @@ static void wacom_wireless_work(struct work_struct *work)
goto fail;
}
strscpy(wacom_wac->name, wacom_wac1->name,
sizeof(wacom_wac->name));
if (strscpy(wacom_wac->name, wacom_wac1->name,
sizeof(wacom_wac->name)) < 0) {
hid_warn(wacom->hdev, "String overflow while assembling device name");
}
}
return;

View file

@ -831,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
/* serial number of the tool */
wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
(data[6] << 4) + (data[7] >> 4);

View file

@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev)
data->ien_reg = KX022A_REG_INC4;
} else {
irq = fwnode_irq_get_byname(fwnode, "INT2");
if (irq <= 0)
if (irq < 0)
return dev_err_probe(dev, irq, "No suitable IRQ\n");
data->inc_reg = KX022A_REG_INC5;

View file

@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
adev = ACPI_COMPANION(indio_dev->dev.parent);
if (!adev)
return 0;
return -ENXIO;
/* Read _ONT data, which should be a package of 6 integers. */
status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
if (status == AE_NOT_FOUND) {
return 0;
return -ENXIO;
} else if (ACPI_FAILURE(status)) {
dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
status);

View file

@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = {
.unprepare = ad4130_int_clk_unprepare,
};
static void ad4130_clk_del_provider(void *of_node)
{
of_clk_del_provider(of_node);
}
static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
struct clk_init_data init;
const char *clk_name;
struct clk *clk;
int ret;
if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
st->mclk_sel != AD4130_MCLK_76_8KHZ)
@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
if (IS_ERR(clk))
return PTR_ERR(clk);
return of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
if (ret)
return ret;
return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
}
static int ad4130_setup(struct iio_dev *indio_dev)

View file

@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = {
__AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
__AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
#define AD719x_TEMP_CHANNEL(_si, _address) \
__AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),

View file

@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
init_completion(&sigma_delta->completion);
sigma_delta->irq_dis = true;
/* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,

View file

@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
{
struct imx93_adc *adc = iio_priv(indio_dev);
struct device *dev = adc->dev;
long ret;
u32 vref_uv;
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = vref_uv = regulator_get_voltage(adc->vref);
ret = regulator_get_voltage(adc->vref);
if (ret < 0)
return ret;
*val = vref_uv / 1000;
*val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;

View file

@ -19,6 +19,7 @@
#include <dt-bindings/iio/adc/mediatek,mt6370_adc.h>
#define MT6370_REG_DEV_INFO 0x100
#define MT6370_REG_CHG_CTRL3 0x113
#define MT6370_REG_CHG_CTRL7 0x117
#define MT6370_REG_CHG_ADC 0x121
@ -27,6 +28,7 @@
#define MT6370_ADC_START_MASK BIT(0)
#define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4)
#define MT6370_AICR_ICHG_MASK GENMASK(7, 2)
#define MT6370_VENID_MASK GENMASK(7, 4)
#define MT6370_AICR_100_mA 0x0
#define MT6370_AICR_150_mA 0x1
@ -47,6 +49,10 @@
#define ADC_CONV_TIME_MS 35
#define ADC_CONV_POLLING_TIME_US 1000
#define MT6370_VID_RT5081 0x8
#define MT6370_VID_RT5081A 0xA
#define MT6370_VID_MT6370 0xE
struct mt6370_adc_data {
struct device *dev;
struct regmap *regmap;
@ -55,6 +61,7 @@ struct mt6370_adc_data {
* from being read at the same time.
*/
struct mutex adc_lock;
unsigned int vid;
};
static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
@ -98,6 +105,30 @@ static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
return ret;
}
static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv)
{
switch (priv->vid) {
case MT6370_VID_RT5081:
case MT6370_VID_RT5081A:
case MT6370_VID_MT6370:
return 3350;
default:
return 3875;
}
}
static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv)
{
switch (priv->vid) {
case MT6370_VID_RT5081:
case MT6370_VID_RT5081A:
case MT6370_VID_MT6370:
return 2680;
default:
return 3870;
}
}
static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
int chan, int *val1, int *val2)
{
@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_AICR_250_mA:
case MT6370_AICR_300_mA:
case MT6370_AICR_350_mA:
*val1 = 3350;
*val1 = mt6370_adc_get_ibus_scale(priv);
break;
default:
*val1 = 5000;
@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_ICHG_600_mA:
case MT6370_ICHG_700_mA:
case MT6370_ICHG_800_mA:
*val1 = 2680;
*val1 = mt6370_adc_get_ibat_scale(priv);
break;
default:
*val1 = 5000;
@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = {
MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)),
};
static int mt6370_get_vendor_info(struct mt6370_adc_data *priv)
{
unsigned int dev_info;
int ret;
ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
if (ret)
return ret;
priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
return 0;
}
static int mt6370_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev)
priv->regmap = regmap;
mutex_init(&priv->adc_lock);
ret = mt6370_get_vendor_info(priv);
if (ret)
return dev_err_probe(dev, ret, "Failed to get vid\n");
ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0);
if (ret)
return dev_err_probe(dev, ret, "Failed to reset ADC\n");

View file

@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
ret = mxs_lradc_adc_trigger_init(iio);
if (ret)
goto err_trig;
return ret;
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_adc_trigger_handler,
&mxs_lradc_adc_buffer_ops);
if (ret)
return ret;
goto err_trig;
adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
err_dev:
mxs_lradc_adc_hw_stop(adc);
mxs_lradc_adc_trigger_remove(iio);
err_trig:
iio_triggered_buffer_cleanup(iio);
err_trig:
mxs_lradc_adc_trigger_remove(iio);
return ret;
}
@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
mxs_lradc_adc_hw_stop(adc);
mxs_lradc_adc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
mxs_lradc_adc_trigger_remove(iio);
return 0;
}

View file

@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
if (adc_chan > PALMAS_ADC_CH_MAX)
if (adc_chan >= PALMAS_ADC_CH_MAX)
return -EINVAL;
mutex_lock(&adc->lock);
@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev,
int old;
int ret;
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);

Some files were not shown because too many files have changed in this diff Show more