Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
  commit 077cdda764 ("net/mlx5e: TC, Fix memory leak with rules with internal port")
  commit 31108d142f ("net/mlx5: Fix some error handling paths in 'mlx5e_tc_add_fdb_flow()'")
  commit 4390c6edc0 ("net/mlx5: Fix some error handling paths in 'mlx5e_tc_add_fdb_flow()'")
  https://lore.kernel.org/all/20211229065352.30178-1-saeed@kernel.org/

net/smc/smc_wr.c
  commit 49dc9013e3 ("net/smc: Use the bitmap API when applicable")
  commit 349d43127d ("net/smc: fix kernel panic caused by race of smc_sock")
  bitmap_zero()/memset() is removed by the fix

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-12-30 12:12:12 -08:00
commit aec53e60e0
265 changed files with 2237 additions and 1192 deletions

View file

@ -1689,6 +1689,8 @@
architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port
i8042.probe_defer
[HW] Allow deferred probing upon i8042 probe errors
i810= [HW,DRM]
@ -2413,8 +2415,12 @@
Default is 1 (enabled)
kvm-intel.emulate_invalid_guest_state=
[KVM,Intel] Enable emulation of invalid guest states
Default is 0 (disabled)
[KVM,Intel] Disable emulation of invalid guest state.
Ignored if kvm-intel.enable_unrestricted_guest=1, as
guest state is never invalid for unrestricted guests.
This param doesn't apply to nested guests (L2), as KVM
never emulates invalid L2 guest state.
Default is 1 (enabled)
kvm-intel.flexpriority=
[KVM,Intel] Disable FlexPriority feature (TPR shadow).

View file

@ -51,6 +51,19 @@ patternProperties:
description:
Properties for single BUCK regulator.
properties:
op_mode:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
default: 1
description: |
Describes the different operating modes of the regulator with power
mode change in SOC. The different possible values are:
0 - always off mode
1 - on in normal mode
2 - low power mode
3 - suspend mode
required:
- regulator-name
@ -63,6 +76,18 @@ patternProperties:
Properties for single BUCK regulator.
properties:
op_mode:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
default: 1
description: |
Describes the different operating modes of the regulator with power
mode change in SOC. The different possible values are:
0 - always off mode
1 - on in normal mode
2 - low power mode
3 - suspend mode
s5m8767,pmic-ext-control-gpios:
maxItems: 1
description: |

View file

@ -25,7 +25,8 @@ ip_default_ttl - INTEGER
ip_no_pmtu_disc - INTEGER
Disable Path MTU Discovery. If enabled in mode 1 and a
fragmentation-required ICMP is received, the PMTU to this
destination will be set to min_pmtu (see below). You will need
destination will be set to the smallest of the old MTU to
this destination and min_pmtu (see below). You will need
to raise min_pmtu to the smallest interface MTU on your system
manually if you want to avoid locally generated fragments.
@ -49,7 +50,8 @@ ip_no_pmtu_disc - INTEGER
Default: FALSE
min_pmtu - INTEGER
default 552 - minimum discovered Path MTU
default 552 - minimum Path MTU. Unless this is changed mannually,
each cached pmtu will never be lower than this setting.
ip_forward_use_pmtu - BOOLEAN
By default we don't trust protocol path MTUs while forwarding

View file

@ -326,6 +326,8 @@ usi-headset
Headset support on USI machines
dual-codecs
Lenovo laptops with dual codecs
alc285-hp-amp-init
HP laptops which require speaker amplifier initialization (ALC285)
ALC680
======

View file

@ -14852,7 +14852,7 @@ PCIE DRIVER FOR MEDIATEK
M: Ryder Lee <ryder.lee@mediatek.com>
M: Jianjun Wang <jianjun.wang@mediatek.com>
L: linux-pci@vger.kernel.org
L: linux-mediatek@lists.infradead.org
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/pci/mediatek*
F: drivers/pci/controller/*mediatek*
@ -17438,7 +17438,7 @@ F: drivers/video/fbdev/sm712*
SILVACO I3C DUAL-ROLE MASTER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Conor Culhane <conor.culhane@silvaco.com>
L: linux-i3c@lists.infradead.org
L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
F: drivers/i3c/master/svc-i3c-master.c
@ -21074,7 +21074,7 @@ S: Maintained
F: arch/x86/kernel/cpu/zhaoxin.c
ZONEFS FILESYSTEM
M: Damien Le Moal <damien.lemoal@wdc.com>
M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
M: Naohiro Aota <naohiro.aota@wdc.com>
R: Johannes Thumshirn <jth@kernel.org>
L: linux-fsdevel@vger.kernel.org

View file

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Gobble Gobble
# *DOCUMENTATION*

View file

@ -309,6 +309,7 @@ mdio {
ethphy: ethernet-phy@1 {
reg = <1>;
qca,clk-out-frequency = <125000000>;
};
};
};

View file

@ -17,7 +17,6 @@
#ifdef CONFIG_EFI
void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);

View file

@ -596,11 +596,9 @@ call_fpe:
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@ -609,7 +607,7 @@ call_fpe:
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
THUMB( lsl r8, r8, #2 )
THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop

View file

@ -114,6 +114,7 @@ ENTRY(secondary_startup)
add r12, r12, r10
ret r12
1: bl __after_proc_init
ldr r7, __secondary_data @ reload r7
ldr sp, [r7, #12] @ set up the stack pointer
ldr r0, [r7, #16] @ set up task pointer
mov fp, #0

View file

@ -69,7 +69,7 @@ &emac {
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
status = "okay";
};

View file

@ -719,7 +719,7 @@ i2c0: i2c@2000000 {
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@ -768,7 +768,7 @@ i2c4: i2c@2040000 {
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
status = "disabled";
};

View file

@ -14,7 +14,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif

View file

@ -6,5 +6,7 @@
#define PCI_IOSIZE SZ_64K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
#define pci_remap_iospace pci_remap_iospace
#include <asm/mach-generic/spaces.h>
#endif

View file

@ -20,10 +20,6 @@
#include <linux/list.h>
#include <linux/of.h>
#ifdef CONFIG_PCI_DRIVERS_GENERIC
#define pci_remap_iospace pci_remap_iospace
#endif
#ifdef CONFIG_PCI_DRIVERS_LEGACY
/*

View file

@ -47,6 +47,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
pci_read_bridge_bases(bus);
}
#ifdef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
unsigned long vaddr;
@ -60,3 +61,4 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
set_io_port_base(vaddr);
return 0;
}
#endif

View file

@ -85,11 +85,6 @@ config MMU
config STACK_GROWSUP
def_bool y
config ARCH_DEFCONFIG
string
default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
config GENERIC_LOCKBREAK
bool
default y

View file

@ -14,7 +14,7 @@ static inline void
_futex_spin_lock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1;
long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
preempt_disable();
arch_spin_lock(s);
@ -24,7 +24,7 @@ static inline void
_futex_spin_unlock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1;
long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
preempt_enable();

View file

@ -472,7 +472,7 @@ lws_start:
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
or,od %r1,%r30,%r30
/* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20

View file

@ -730,6 +730,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
/* CPU could not fetch instruction, so clear stale IIR value. */
regs->iir = 0xbaadf00d;
fallthrough;
case 27:
/* Data memory protection ID trap */

View file

@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
const char *name)
{
long reladdr;
func_desc_t desc;
int i;
if (is_mprofile_ftrace_call(name))
return create_ftrace_stub(entry, addr, me);
memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) {
if (patch_instruction(&entry->jump[i],
ppc_inst(ppc64_stub_insns[i])))
return 0;
}
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
entry->jump[0] |= PPC_HA(reladdr);
entry->jump[1] |= PPC_LO(reladdr);
entry->funcdata = func_desc(addr);
entry->magic = STUB_MAGIC;
if (patch_instruction(&entry->jump[0],
ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
return 0;
if (patch_instruction(&entry->jump[1],
ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
return 0;
// func_desc_t is 8 bytes if ABIv2, else 16 bytes
desc = func_desc(addr);
for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
if (patch_instruction(((u32 *)&entry->funcdata) + i,
ppc_inst(((u32 *)(&desc))[i])))
return 0;
}
if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
return 0;
return 1;
}
@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
me->name, *instruction, instruction);
return 0;
}
/* ld r2,R2_STACK_OFFSET(r1) */
*instruction = PPC_INST_LD_TOC;
if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
return 0;
return 1;
}
@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
*(uint32_t *)location
= (*(uint32_t *)location & ~0x03fffffc)
value = (*(uint32_t *)location & ~0x03fffffc)
| (value & 0x03fffffc);
if (patch_instruction((u32 *)location, ppc_inst(value)))
return -EFAULT;
break;
case R_PPC64_REL64:

View file

@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
pte_t pte = __pte(st->current_flags);
if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
return;
if (!pte_write(pte) || !pte_exec(pte))

View file

@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
local_irq_save(flags);
hard_irq_disable();
if (qoriq_pm_ops)
if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(cpu);
/* if cpu is not spinning, reset it */
@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
booting_thread_hwid = cpu_thread_in_core(nr);
primary = cpu_first_thread_sibling(nr);
if (qoriq_pm_ops)
if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(nr);
/*

View file

@ -76,6 +76,7 @@ mmc@0 {
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
};
};

View file

@ -2,6 +2,7 @@
/* Copyright (c) 2020 SiFive, Inc */
#include "fu740-c000.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
@ -54,10 +55,21 @@ &i2c0 {
temperature-sensor@4c {
compatible = "ti,tmp451";
reg = <0x4c>;
vcc-supply = <&vdd_bpro>;
interrupt-parent = <&gpio>;
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
};
eeprom@54 {
compatible = "microchip,24c02", "atmel,24c02";
reg = <0x54>;
vcc-supply = <&vdd_bpro>;
label = "board-id";
pagesize = <16>;
read-only;
size = <256>;
};
pmic@58 {
compatible = "dlg,da9063";
reg = <0x58>;
@ -65,48 +77,44 @@ pmic@58 {
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
regulators {
vdd_bcore1: bcore1 {
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <900000>;
regulator-min-microamp = <5000000>;
regulator-max-microamp = <5000000>;
regulator-always-on;
};
onkey {
compatible = "dlg,da9063-onkey";
};
vdd_bcore2: bcore2 {
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <900000>;
regulator-min-microamp = <5000000>;
regulator-max-microamp = <5000000>;
rtc {
compatible = "dlg,da9063-rtc";
};
wdt {
compatible = "dlg,da9063-watchdog";
};
regulators {
vdd_bcore: bcores-merged {
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
regulator-min-microamp = <4800000>;
regulator-max-microamp = <4800000>;
regulator-always-on;
};
vdd_bpro: bpro {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <2500000>;
regulator-max-microamp = <2500000>;
regulator-min-microamp = <2400000>;
regulator-max-microamp = <2400000>;
regulator-always-on;
};
vdd_bperi: bperi {
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
regulator-min-microvolt = <1060000>;
regulator-max-microvolt = <1060000>;
regulator-min-microamp = <1500000>;
regulator-max-microamp = <1500000>;
regulator-always-on;
};
vdd_bmem: bmem {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-min-microamp = <3000000>;
regulator-max-microamp = <3000000>;
regulator-always-on;
};
vdd_bio: bio {
vdd_bmem_bio: bmem-bio-merged {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-min-microamp = <3000000>;
@ -117,86 +125,66 @@ vdd_bio: bio {
vdd_ldo1: ldo1 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <100000>;
regulator-max-microamp = <100000>;
regulator-always-on;
};
vdd_ldo2: ldo2 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-always-on;
};
vdd_ldo3: ldo3 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo4: ldo4 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
vdd_ldo5: ldo5 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <100000>;
regulator-max-microamp = <100000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo6: ldo6 {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
vdd_ldo7: ldo7 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo8: ldo8 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ld09: ldo9 {
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-always-on;
};
vdd_ldo10: ldo10 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-min-microamp = <300000>;
regulator-max-microamp = <300000>;
regulator-always-on;
};
vdd_ldo11: ldo11 {
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-min-microamp = <300000>;
regulator-max-microamp = <300000>;
regulator-always-on;
};
};
@ -223,6 +211,7 @@ mmc@0 {
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
};
};
@ -245,4 +234,8 @@ &pwm1 {
&gpio {
status = "okay";
gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3",
"PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN",
"ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4",
"EN_VDD_SD", "SD_CD";
};

View file

@ -13,7 +13,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif

View file

@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
extern void efi_thunk_runtime_setup(void);
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size,

View file

@ -47,6 +47,7 @@ KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags)
KVM_X86_OP(set_rflags)
KVM_X86_OP(get_if_flag)
KVM_X86_OP(tlb_flush_all)
KVM_X86_OP(tlb_flush_current)
KVM_X86_OP_NULL(tlb_remote_flush)

View file

@ -1349,6 +1349,7 @@ struct kvm_x86_ops {
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
bool (*get_if_flag)(struct kvm_vcpu *vcpu);
void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
void (*tlb_flush_current)(struct kvm_vcpu *vcpu);

View file

@ -4,8 +4,8 @@
#include <asm/cpufeature.h>
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
#define PKRU_AD_BIT 0x1u
#define PKRU_WD_BIT 0x2u
#define PKRU_BITS_PER_PKEY 2
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS

View file

@ -713,9 +713,6 @@ static void __init early_reserve_memory(void)
early_reserve_initrd();
if (efi_enabled(EFI_BOOT))
efi_memblock_x86_reserve_range();
memblock_x86_reserve_range_setup_data();
reserve_ibft_region();
@ -742,28 +739,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
static char * __init prepare_command_line(void)
{
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
/* append boot loader cmdline to builtin */
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
parse_early_param();
return command_line;
}
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@ -852,23 +827,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
/*
* x86_configure_nx() is called before parse_early_param() (called by
* prepare_command_line()) to detect whether hardware doesn't support
* NX (so that the early EHCI debug console setup can safely call
* set_fixmap()). It may then be called again from within noexec_setup()
* during parsing early parameters to honor the respective command line
* option.
*/
x86_configure_nx();
/*
* This parses early params and it needs to run before
* early_reserve_memory() because latter relies on such settings
* supplied as early params.
*/
*cmdline_p = prepare_command_line();
/*
* Do some memory reservations *before* memory is added to memblock, so
* memblock allocations won't overwrite it.
@ -902,6 +860,36 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
/* append boot loader cmdline to builtin */
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/*
* x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug
* console setup can safely call set_fixmap()). It may then be called
* again from within noexec_setup() during parsing early parameters
* to honor the respective command line option.
*/
x86_configure_nx();
parse_early_param();
if (efi_enabled(EFI_BOOT))
efi_memblock_x86_reserve_range();
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux

View file

@ -3987,7 +3987,21 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
{
if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa);
/* Special roots, e.g. pae_root, are not backed by shadow pages. */
if (sp && is_obsolete_sp(vcpu->kvm, sp))
return true;
/*
* Roots without an associated shadow page are considered invalid if
* there is a pending request to free obsolete roots. The request is
* only a hint that the current root _may_ be obsolete and needs to be
* reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
* previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
* to reload even if no vCPU is actively using the root.
*/
if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu))
return true;
return fault->slot &&

View file

@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
*/
void tdp_iter_restart(struct tdp_iter *iter)
{
iter->yielded = false;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->level = iter->root_level;
@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter)
*/
void tdp_iter_next(struct tdp_iter *iter)
{
if (iter->yielded) {
tdp_iter_restart(iter);
return;
}
if (try_step_down(iter))
return;

View file

@ -45,6 +45,12 @@ struct tdp_iter {
* iterator walks off the end of the paging structure.
*/
bool valid;
/*
* True if KVM dropped mmu_lock and yielded in the middle of a walk, in
* which case tdp_iter_next() needs to restart the walk at the root
* level instead of advancing to the next entry.
*/
bool yielded;
};
/*

View file

@ -502,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
WARN_ON_ONCE(iter->yielded);
lockdep_assert_held_read(&kvm->mmu_lock);
/*
@ -575,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track,
bool record_dirty_log)
{
WARN_ON_ONCE(iter->yielded);
lockdep_assert_held_write(&kvm->mmu_lock);
/*
@ -640,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
* If this function should yield and flush is set, it will perform a remote
* TLB flush before yielding.
*
* If this function yields, it will also reset the tdp_iter's walk over the
* paging structure and the calling function should skip to the next
* iteration to allow the iterator to continue its traversal from the
* paging structure root.
* If this function yields, iter->yielded is set and the caller must skip to
* the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
* over the paging structures to allow the iterator to continue its traversal
* from the paging structure root.
*
* Return true if this function yielded and the iterator's traversal was reset.
* Return false if a yield was not needed.
* Returns true if this function yielded.
*/
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
struct tdp_iter *iter, bool flush,
bool shared)
static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
struct tdp_iter *iter,
bool flush, bool shared)
{
WARN_ON(iter->yielded);
/* Ensure forward progress has been made before yielding. */
if (iter->next_last_level_gfn == iter->yielded_gfn)
return false;
@ -671,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
WARN_ON(iter->gfn > iter->next_last_level_gfn);
tdp_iter_restart(iter);
return true;
iter->yielded = true;
}
return false;
return iter->yielded;
}
/*

View file

@ -1585,6 +1585,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
{
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
return sev_es_guest(vcpu->kvm)
? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
: kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
@ -3568,14 +3577,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;
if (sev_es_guest(vcpu->kvm)) {
/*
* SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
* bit to determine the state of the IF flag.
*/
if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
return true;
} else if (is_guest_mode(vcpu)) {
if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
@ -3586,7 +3588,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (nested_exit_on_intr(svm))
return false;
} else {
if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
if (!svm_get_if_flag(vcpu))
return true;
}
@ -4621,6 +4623,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
.get_if_flag = svm_get_if_flag,
.tlb_flush_all = svm_flush_tlb,
.tlb_flush_current = svm_flush_tlb,

View file

@ -1363,6 +1363,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmx->emulation_required = vmx_emulation_required(vcpu);
}
static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
{
return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
}
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@ -3959,8 +3964,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return 0;
if (vcpu != kvm_get_running_vcpu() &&
!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
@ -5877,18 +5881,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
vmx_flush_pml_buffer(vcpu);
/*
* We should never reach this point with a pending nested VM-Enter, and
* more specifically emulation of L2 due to invalid guest state (see
* below) should never happen as that means we incorrectly allowed a
* nested VM-Enter with an invalid vmcs12.
* KVM should never reach this point with a pending nested VM-Enter.
* More specifically, short-circuiting VM-Entry to emulate L2 due to
* invalid guest state should never happen as that means KVM knowingly
* allowed a nested VM-Enter with an invalid vmcs12. More below.
*/
if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
return -EIO;
/* If guest state is invalid, start emulating */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
if (is_guest_mode(vcpu)) {
/*
* PML is never enabled when running L2, bail immediately if a
@ -5910,10 +5910,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
*/
nested_mark_vmcs12_pages_dirty(vcpu);
/*
* Synthesize a triple fault if L2 state is invalid. In normal
* operation, nested VM-Enter rejects any attempt to enter L2
* with invalid state. However, those checks are skipped if
* state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
* L2 state is invalid, it means either L1 modified SMRAM state
* or userspace provided bad state. Synthesize TRIPLE_FAULT as
* doing so is architecturally allowed in the RSM case, and is
* the least awful solution for the userspace case without
* risking false positives.
*/
if (vmx->emulation_required) {
nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
return 1;
}
if (nested_vmx_reflect_vmexit(vcpu))
return 1;
}
/* If guest state is invalid, start emulating. L2 is handled above. */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
if (exit_reason.failed_vmentry) {
dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@ -6608,9 +6628,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
* consistency check VM-Exit due to invalid guest state and bail.
*/
if (unlikely(vmx->emulation_required)) {
/* We don't emulate invalid state of a nested guest */
vmx->fail = is_guest_mode(vcpu);
vmx->fail = 0;
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
vmx->exit_reason.failed_vmentry = 1;
@ -7579,6 +7597,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
.get_if_flag = vmx_get_if_flag,
.tlb_flush_all = vmx_flush_tlb_all,
.tlb_flush_current = vmx_flush_tlb_current,

View file

@ -1331,7 +1331,7 @@ static const u32 msrs_to_save_all[] = {
MSR_IA32_UMWAIT_CONTROL,
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@ -3413,7 +3413,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated)
return 1;
if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
if (kvm_get_msr_feature(&msr_ent))
return 1;
if (data & ~msr_ent.data)
return 1;
@ -9001,14 +9001,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
/*
* if_flag is obsolete and useless, so do not bother
* setting it for SEV-ES guests. Userspace can just
* use kvm_run->ready_for_interrupt_injection.
*/
kvm_run->if_flag = !vcpu->arch.guest_state_protected
&& (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);

View file

@ -2311,7 +2311,14 @@ static void ioc_timer_fn(struct timer_list *timer)
hwm = current_hweight_max(iocg);
new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
usage, &now);
if (new_hwi < hwm) {
/*
* Donation calculation assumes hweight_after_donation
* to be positive, a condition that a donor w/ hwa < 2
* can't meet. Don't bother with donation if hwa is
* below 2. It's not gonna make a meaningful difference
* anyway.
*/
if (new_hwi < hwm && hwa >= 2) {
iocg->hweight_donating = hwa;
iocg->hweight_after_donation = new_hwi;
list_add(&iocg->surplus_list, &surpluses);

View file

@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",

View file

@ -2859,8 +2859,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
tf->protocol = ATA_PROT_NCQ_NODATA;
if ((cdb[2 + cdb_offset] & 0x3) == 0) {
/*
* When T_LENGTH is zero (No data is transferred), dir should
* be DMA_NONE.
*/
if (scmd->sc_data_direction != DMA_NONE) {
fp = 2 + cdb_offset;
goto invalid_fld;
}
if (ata_is_ncq(tf->protocol))
tf->protocol = ATA_PROT_NCQ_NODATA;
}
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;

View file

@ -37,7 +37,7 @@ struct charlcd_priv {
bool must_clear;
/* contains the LCD config state */
unsigned long int flags;
unsigned long flags;
/* Current escape sequence and it's length or -1 if outside */
struct {
@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd)
* Since charlcd_init_display() needs to write data, we have to
* enable mark the LCD initialized just before.
*/
if (WARN_ON(!lcd->ops->init_display))
return -EINVAL;
ret = lcd->ops->init_display(lcd);
if (ret)
return ret;

View file

@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list)) {
while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);

View file

@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->dev_info;
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
}
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long id;
unsigned int op;
eoiflag = 0;
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
id = bret.id;
@ -1646,6 +1651,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
xen_irq_lateeoi(irq, eoiflag);
return IRQ_HANDLED;
err:
@ -1653,6 +1660,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
/* No EOI in order to avoid further interrupts. */
pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}
@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
if (err)
goto fail;
err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
"blkif", rinfo);
err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
0, "blkif", rinfo);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");

View file

@ -687,11 +687,11 @@ static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
{
/* Keep the clock and PM reference counts consistent. */
if (pm_runtime_status_suspended(rsb->dev))
pm_runtime_resume(rsb->dev);
reset_control_assert(rsb->rstc);
clk_disable_unprepare(rsb->clk);
/* Keep the clock and PM reference counts consistent. */
if (!pm_runtime_status_suspended(rsb->dev))
clk_disable_unprepare(rsb->clk);
}
static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)

View file

@ -3031,7 +3031,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
schedule_work(&bmc->remove_work);
queue_work(remove_work_wq, &bmc->remove_work);
}
/*
@ -5392,22 +5392,27 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
init_srcu_struct(&ipmi_interfaces_srcu);
rv = init_srcu_struct(&ipmi_interfaces_srcu);
if (rv)
goto out;
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
if (!remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
goto out_wq;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
if (!remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
goto out;
}
initialized = true;
out_wq:
if (rv)
cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;

View file

@ -1659,6 +1659,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
ssif_info->client = client;
i2c_set_clientdata(client, ssif_info);
rv = ssif_check_and_remove(client, ssif_info);
/* If rv is 0 and addr source is not SI_ACPI, continue probing */
if (!rv && ssif_info->addr_source == SI_ACPI) {
@ -1679,9 +1682,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
ssif_info->client = client;
i2c_set_clientdata(client, ssif_info);
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@ -1881,6 +1881,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_err(&ssif_info->client->dev,
"Unable to start IPMI SSIF: %d\n", rv);
i2c_set_clientdata(client, NULL);
kfree(ssif_info);
}
kfree(resp);

View file

@ -211,6 +211,12 @@ static u32 uof_get_ae_mask(u32 obj_num)
return adf_4xxx_fw_config[obj_num].ae_mask;
}
static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
{
/* For the moment do not report vf2pf sources */
return 0;
}
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_4xxx_class;
@ -254,6 +260,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->enable_pfvf_comms = pfvf_comms_disabled;
hw_data->get_vf2pf_sources = get_vf2pf_sources;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;

View file

@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
{
u32 priority = chan->chip->dw->hdata->priority[chan->id];
struct axi_dma_chan_config config;
struct axi_dma_chan_config config = {};
u32 irq_mask;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
config.prior = priority;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
switch (chan->direction) {
case DMA_MEM_TO_DEV:
dw_axi_dma_set_byte_halfword(chan, true);

View file

@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* DMA configuration */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
if (err) {
pci_err(pdev, "DMA mask 64 set failed\n");
return err;
} else {
pci_err(pdev, "DMA mask 64 set failed\n");
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pci_err(pdev, "DMA mask 32 set failed\n");
return err;
}
}
/* Data structure allocation */

View file

@ -137,10 +137,10 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
spin_lock(&idxd->dev_lock);
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",

View file

@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
LIST_HEAD(flist);
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
found = desc;
continue;
}
list_add_tail(&desc->list, &ie->work_list);
if (d->completion->status)
list_add_tail(&d->list, &flist);
else
list_add_tail(&d->list, &ie->work_list);
}
}
@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
/*
* complete_desc() will return desc to allocator and the desc can be
* acquired by a different process and the desc->list can be modified.
* Delete desc from list so the list trasversing does not get corrupted
* by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
complete_desc(d, IDXD_COMPLETE_NORMAL);
}
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)

View file

@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
MODULE_ALIAS("platform: " DRIVER_NAME);
MODULE_ALIAS("platform:" DRIVER_NAME);

View file

@ -4534,45 +4534,60 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
irq_res.sets = 1;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], "tchan");
irq_res.sets = rm_res->sets;
}
irq_res.sets = rm_res->sets;
/* rchan and matching default flow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
irq_res.sets++;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], "rchan");
irq_res.sets += rm_res->sets;
}
irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num;
irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = 0;
irq_res.desc[0].num = ud->tchan_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num;
irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
if (rm_res->desc[j].num) {
irq_res.desc[i].start = rm_res->desc[j].start +
ud->soc_data->oes.udma_rchan;
irq_res.desc[i].num = rm_res->desc[j].num;
}
if (rm_res->desc[j].num_sec) {
irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
ud->soc_data->oes.udma_rchan;
irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = 0;
irq_res.desc[i].num = ud->rchan_cnt;
} else {
for (j = 0; j < rm_res->sets; j++, i++) {
if (rm_res->desc[j].num) {
irq_res.desc[i].start = rm_res->desc[j].start +
ud->soc_data->oes.udma_rchan;
irq_res.desc[i].num = rm_res->desc[j].num;
}
if (rm_res->desc[j].num_sec) {
irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
ud->soc_data->oes.udma_rchan;
irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
}
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@ -4690,14 +4705,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
irq_res.sets++;
} else {
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->bchan_map,
&rm_res->desc[i],
"bchan");
irq_res.sets += rm_res->sets;
}
irq_res.sets += rm_res->sets;
}
/* tchan ranges */
@ -4705,14 +4721,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
irq_res.sets += 2;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i],
"tchan");
irq_res.sets += rm_res->sets * 2;
}
irq_res.sets += rm_res->sets * 2;
}
/* rchan ranges */
@ -4720,47 +4737,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
irq_res.sets += 2;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i],
"rchan");
irq_res.sets += rm_res->sets * 2;
}
irq_res.sets += rm_res->sets * 2;
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
if (ud->bchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = oes->bcdma_bchan_ring;
irq_res.desc[0].num = ud->bchan_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
}
}
}
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_tchan_data;
irq_res.desc[i].num = rm_res->desc[j].num;
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->bcdma_tchan_data;
irq_res.desc[i].num = ud->tchan_cnt;
irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = ud->tchan_cnt;
i += 2;
} else {
for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_tchan_data;
irq_res.desc[i].num = rm_res->desc[j].num;
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
}
}
}
if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_rchan_data;
irq_res.desc[i].num = rm_res->desc[j].num;
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->bcdma_rchan_data;
irq_res.desc[i].num = ud->rchan_cnt;
irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = ud->rchan_cnt;
i += 2;
} else {
for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_rchan_data;
irq_res.desc[i].num = rm_res->desc[j].num;
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
}
}
}
@ -4858,39 +4900,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
if (IS_ERR(rm_res)) {
/* all rflows are assigned exclusively to Linux */
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
irq_res.sets = 1;
} else {
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rflow_in_use,
&rm_res->desc[i], "rflow");
irq_res.sets = rm_res->sets;
}
irq_res.sets = rm_res->sets;
/* tflow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) {
/* all tflows are assigned exclusively to Linux */
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
irq_res.sets++;
} else {
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tflow_map,
&rm_res->desc[i], "tflow");
irq_res.sets += rm_res->sets;
}
irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = oes->pktdma_tchan_flow;
irq_res.desc[0].num = ud->tflow_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
for (j = 0; j < rm_res->sets; j++, i++) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->pktdma_rchan_flow;
irq_res.desc[i].num = ud->rflow_cnt;
} else {
for (j = 0; j < rm_res->sets; j++, i++) {
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);

View file

@ -46,6 +46,7 @@
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
struct irq_chip irqchip;
/*
* Cache pin direction to save us one transfer, since the hardware has
@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
mutex_unlock(&dln2->irq_lock);
}
static struct irq_chip dln2_gpio_irqchip = {
.name = "dln2-irq",
.irq_mask = dln2_irq_mask,
.irq_unmask = dln2_irq_unmask,
.irq_set_type = dln2_irq_set_type,
.irq_bus_lock = dln2_irq_bus_lock,
.irq_bus_sync_unlock = dln2_irq_bus_unlock,
};
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
const void *data, int len)
{
@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.direction_output = dln2_gpio_direction_output;
dln2->gpio.set_config = dln2_gpio_set_config;
dln2->irqchip.name = "dln2-irq",
dln2->irqchip.irq_mask = dln2_irq_mask,
dln2->irqchip.irq_unmask = dln2_irq_unmask,
dln2->irqchip.irq_set_type = dln2_irq_set_type,
dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
girq = &dln2->gpio.irq;
girq->chip = &dln2_gpio_irqchip;
girq->chip = &dln2->irqchip;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;

View file

@ -100,11 +100,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
virtqueue_kick(vgpio->request_vq);
mutex_unlock(&vgpio->lock);
if (!wait_for_completion_timeout(&line->completion, HZ)) {
dev_err(dev, "GPIO operation timed out\n");
ret = -ETIMEDOUT;
goto out;
}
wait_for_completion(&line->completion);
if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
dev_err(dev, "GPIO request failed: %d\n", gpio);

View file

@ -3070,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
WREG32(mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
}
}

View file

@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);

View file

@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */

View file

@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */

View file

@ -1808,6 +1808,14 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
/*
* Pair the operations did in gmc_v9_0_hw_init and thus maintain
* a correct cached state for GMC. Otherwise, the "gate" again
* operation on S3 resuming will fail due to wrong cached state.
*/
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);

View file

@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return;
if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
}
if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GMC,
enable);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)

View file

@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);

View file

@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */

View file

@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */

View file

@ -189,8 +189,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,

View file

@ -1051,6 +1051,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
DRM_WARN("Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
fw_inst_const = dmub_fw->data +

View file

@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};

View file

@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle,
pp_dpm_powergate_vce(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_GMC:
pp_dpm_powergate_mmhub(handle);
/*
* For now, this is only used on PICASSO.
* And only "gate" operation is supported.
*/
if (gate)
pp_dpm_powergate_mmhub(handle);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = pp_dpm_powergate_gfx(handle, gate);

View file

@ -191,6 +191,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
kfree(smu_table->gpu_metrics_table);
smu_table->gpu_metrics_table = NULL;
return 0;
}

View file

@ -198,6 +198,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
int smu_v13_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
uint8_t smu_minor, smu_debug;
@ -210,6 +211,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xffff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
adev->pm.fw_version = smu_version;
switch (smu->adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):

View file

@ -1121,7 +1121,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
if (ast_state)
__drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *

View file

@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info,
sizes->fb_width, sizes->fb_height);
info->par = fb_helper;
snprintf(info->fix.id, sizeof(info->fix.id), "%s",
/*
* The DRM drivers fbdev emulation device name can be confusing if the
* driver name also has a "drm" suffix on it. Leading to names such as
* "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
* be changed due user-space tools (e.g: pm-utils) matching against it.
*/
snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
fb_helper->dev->driver->name);
}

View file

@ -596,7 +596,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
continue;
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
if (fw->size - offset < 0) {
if (offset > fw->size) {
drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
continue;
}

View file

@ -1662,11 +1662,11 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_parent(cn));
list_del_init(&cn->guc_id.link);
ce->guc_id = cn->guc_id;
ce->guc_id.id = cn->guc_id.id;
spin_lock(&ce->guc_state.lock);
spin_lock(&cn->guc_state.lock);
clr_context_registered(cn);
spin_unlock(&ce->guc_state.lock);
spin_unlock(&cn->guc_state.lock);
set_context_guc_id_invalid(cn);

View file

@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_BAD;
}
if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
return MODE_BAD;
if (hdmi->conf) {
if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
return MODE_BAD;
if (hdmi->conf->max_mode_clock &&
mode->clock > hdmi->conf->max_mode_clock)
return MODE_CLOCK_HIGH;
if (hdmi->conf->max_mode_clock &&
mode->clock > hdmi->conf->max_mode_clock)
return MODE_CLOCK_HIGH;
}
if (mode->clock < 27000)
return MODE_CLOCK_LOW;

View file

@ -458,7 +458,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width,
{
struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
drm_mode_set_name(&mode);
return mode;

View file

@ -65,8 +65,23 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static int holtek_mouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int ret;
if (!hid_is_usb(hdev))
return -EINVAL;
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "hid parse failed: %d\n", ret);
return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed: %d\n", ret);
return ret;
}
return 0;
}

View file

@ -57,6 +57,9 @@ static int vivaldi_probe(struct hid_device *hdev,
int ret;
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
hid_set_drvdata(hdev, drvdata);
ret = hid_parse(hdev);

View file

@ -35,13 +35,14 @@
* explicitly as max6659, or if its address is not 0x4c.
* These chips lack the remote temperature offset feature.
*
* This driver also supports the MAX6654 chip made by Maxim. This chip can
* be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
* otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
* by setting the configuration register accordingly, and is done during
* initialization. Extended precision is only available at conversion rates
* of 1 Hz and slower. Note that extended precision is not enabled by
* default, as this driver initializes all chips to 2 Hz by design.
* This driver also supports the MAX6654 chip made by Maxim. This chip can be
* at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar
* to MAX6657/MAX6658/MAX6659, but does not support critical temperature
* limits. Extended range is available by setting the configuration register
* accordingly, and is done during initialization. Extended precision is only
* available at conversion rates of 1 Hz and slower. Note that extended
* precision is not enabled by default, as this driver initializes all chips
* to 2 Hz by design.
*
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
@ -188,6 +189,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
#define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */
#define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@ -197,6 +200,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */
#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
@ -354,38 +358,43 @@ struct lm90_params {
static const struct lm90_params lm90_params[] = {
[adm1032] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT,
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[adt7461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
| LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[g781] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT,
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm90] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm99] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[max6646] = {
.flags = LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@ -396,50 +405,51 @@ static const struct lm90_params lm90_params[] = {
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
.flags = LM90_PAUSE_FOR_CONFIG,
.flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6659] = {
.flags = LM90_HAVE_EMERGENCY,
.flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6680] = {
.flags = LM90_HAVE_OFFSET,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
| LM90_HAVE_CRIT_ALRM_SWP,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
.alert_alarms = 0x1c7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[w83l771] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[sa56004] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
},
[tmp461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@ -668,20 +678,22 @@ static int lm90_update_limits(struct device *dev)
struct i2c_client *client = data->client;
int val;
val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
if (val < 0)
return val;
data->temp8[LOCAL_CRIT] = val;
if (data->flags & LM90_HAVE_CRIT) {
val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
if (val < 0)
return val;
data->temp8[LOCAL_CRIT] = val;
val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
if (val < 0)
return val;
data->temp8[REMOTE_CRIT] = val;
val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
if (val < 0)
return val;
data->temp8[REMOTE_CRIT] = val;
val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
if (val < 0)
return val;
data->temp_hyst = val;
val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
if (val < 0)
return val;
data->temp_hyst = val;
}
val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
if (val < 0)
@ -809,7 +821,7 @@ static int lm90_update_device(struct device *dev)
val = lm90_read_reg(client, LM90_REG_R_STATUS);
if (val < 0)
return val;
data->alarms = val; /* lower 8 bit of alarms */
data->alarms = val & ~LM90_STATUS_BUSY;
if (data->kind == max6696) {
val = lm90_select_remote_channel(data, 1);
@ -1160,8 +1172,8 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
else
temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
/* prevent integer underflow */
val = max(val, -128000l);
/* prevent integer overflow/underflow */
val = clamp_val(val, -128000l, 255000l);
data->temp_hyst = hyst_to_reg(temp - val);
err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@ -1192,6 +1204,7 @@ static const u8 lm90_temp_emerg_index[3] = {
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
@ -1217,7 +1230,10 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
*val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
break;
case hwmon_temp_crit_alarm:
*val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
*val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
else
*val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
break;
case hwmon_temp_emergency_alarm:
*val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
@ -1465,12 +1481,11 @@ static int lm90_detect(struct i2c_client *client,
if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
return -ENODEV;
if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
if (config2 < 0)
return -ENODEV;
} else
config2 = 0; /* Make compiler happy */
}
if ((address == 0x4C || address == 0x4D)
&& man_id == 0x01) { /* National Semiconductor */
@ -1903,11 +1918,14 @@ static int lm90_probe(struct i2c_client *client)
info->config = data->channel_config;
data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
if (data->flags & LM90_HAVE_CRIT) {
data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
}
if (data->flags & LM90_HAVE_OFFSET)
data->channel_config[1] |= HWMON_T_OFFSET;

View file

@ -1594,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
u32 clock_cycles_of_1us;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
else
clock_cycles_of_1us = HNS_ROCE_1US_CFG;
hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
@ -4802,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
return ret;
}
static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{
#define QP_ACK_TIMEOUT_MAX_HIP08 20
#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 20.\n");
return false;
}
*timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 31.\n");
return false;
}
}
return true;
}
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@ -4811,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret = 0;
u8 timeout;
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@ -4820,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
if (attr->timeout < 31) {
hr_reg_write(context, QPC_AT, attr->timeout);
timeout = attr->timeout;
if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
hr_reg_write(context, QPC_AT, timeout);
hr_reg_clear(qpc_mask, QPC_AT);
} else {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 30.\n");
}
}
@ -4882,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
hr_reg_write(context, QPC_MIN_RNR_TIME,
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
}
@ -5499,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
dev_info(hr_dev->dev,
"cq_period(%u) reached the upper limit, adjusted to 65.\n",
cq_period);
cq_period = HNS_ROCE_MAX_CQ_PERIOD;
}
cq_period *= HNS_ROCE_CLOCK_ADJUST;
}
hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
@ -5894,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
eq->eq_period);
eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
}
eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
}
hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);

View file

@ -1444,6 +1444,14 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
#define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999
#define HNS_ROCE_1NS_CFG 0
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0

View file

@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
static void free_srq_wrid(struct hns_roce_srq *srq)
{
kfree(srq->wrid);
kvfree(srq->wrid);
srq->wrid = NULL;
}

View file

@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
goto free_pbc;
goto free_pkt;
}
pkt->addrlimit = addrlimit;

View file

@ -456,9 +456,10 @@ struct iqs626_private {
unsigned int suspend_mode;
};
static int iqs626_parse_events(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
static noinline_for_stack int
iqs626_parse_events(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@ -604,9 +605,10 @@ static int iqs626_parse_events(struct iqs626_private *iqs626,
return 0;
}
static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
static noinline_for_stack int
iqs626_parse_ati_target(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@ -885,9 +887,10 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
return 0;
}
static int iqs626_parse_channel(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
static noinline_for_stack int
iqs626_parse_channel(struct iqs626_private *iqs626,
const struct fwnode_handle *ch_node,
enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;

View file

@ -1588,7 +1588,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
*/
static int elantech_change_report_id(struct psmouse *psmouse)
{
unsigned char param[2] = { 0x10, 0x03 };
/*
* NOTE: the code is expecting to receive param[] as an array of 3
* items (see __ps2_command()), even if in this case only 2 are
* actually needed. Make sure the array size is 3 to avoid potential
* stack out-of-bound accesses.
*/
unsigned char param[3] = { 0x10, 0x03 };
if (elantech_write_reg_params(psmouse, 0x7, param) ||
elantech_read_reg_params(psmouse, 0x7, param) ||

View file

@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{ }
};
static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
{
/* ASUS ZenBook UX425UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
},
{
/* ASUS ZenBook UM325UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
},
},
{ }
};
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_kbdreset_table))
i8042_kbdreset = true;
if (dmi_check_system(i8042_dmi_probe_defer_table))
i8042_probe_defer = true;
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to

View file

@ -45,6 +45,10 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
static bool i8042_probe_defer;
module_param_named(probe_defer, i8042_probe_defer, bool, 0);
MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
enum i8042_controller_reset_mode {
I8042_RESET_NEVER,
I8042_RESET_ALWAYS,
@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
* LCS/Telegraphics.
*/
static int __init i8042_check_mux(void)
static int i8042_check_mux(void)
{
unsigned char mux_version;
@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
/*
* The following is used to test AUX IRQ delivery.
*/
static struct completion i8042_aux_irq_delivered __initdata;
static bool i8042_irq_being_tested __initdata;
static struct completion i8042_aux_irq_delivered;
static bool i8042_irq_being_tested;
static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
{
unsigned long flags;
unsigned char str, data;
@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
* verifies success by readinng CTR. Used when testing for presence of AUX
* port.
*/
static int __init i8042_toggle_aux(bool on)
static int i8042_toggle_aux(bool on)
{
unsigned char param;
int i;
@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
* the presence of an AUX interface.
*/
static int __init i8042_check_aux(void)
static int i8042_check_aux(void)
{
int retval = -1;
bool irq_registered = false;
@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
pr_err("Can't read CTR while initializing i8042\n");
return -EIO;
return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
}
} while (n < 2 || ctr[0] != ctr[1]);
@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
i8042_controller_reset(false);
}
static int __init i8042_create_kbd_port(void)
static int i8042_create_kbd_port(void)
{
struct serio *serio;
struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
return 0;
}
static int __init i8042_create_aux_port(int idx)
static int i8042_create_aux_port(int idx)
{
struct serio *serio;
int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
return 0;
}
static void __init i8042_free_kbd_port(void)
static void i8042_free_kbd_port(void)
{
kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
}
static void __init i8042_free_aux_ports(void)
static void i8042_free_aux_ports(void)
{
int i;
@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
}
}
static void __init i8042_register_ports(void)
static void i8042_register_ports(void)
{
int i;
@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
}
static int __init i8042_setup_aux(void)
static int i8042_setup_aux(void)
{
int (*aux_enable)(void);
int error;
@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
return error;
}
static int __init i8042_setup_kbd(void)
static int i8042_setup_kbd(void)
{
int error;
@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
return 0;
}
static int __init i8042_probe(struct platform_device *dev)
static int i8042_probe(struct platform_device *dev)
{
int error;
@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
.probe = i8042_probe,
.remove = i8042_remove,
.shutdown = i8042_shutdown,
};
@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
static int __init i8042_init(void)
{
struct platform_device *pdev;
int err;
dbg_init();
@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
/* Set this before creating the dev to allow i8042_command to work right away */
i8042_present = true;
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
err = platform_driver_register(&i8042_driver);
if (err)
goto err_platform_exit;
i8042_platform_device = platform_device_alloc("i8042", -1);
if (!i8042_platform_device) {
err = -ENOMEM;
goto err_unregister_driver;
}
err = platform_device_add(i8042_platform_device);
if (err)
goto err_free_device;
bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink;
return 0;
err_free_device:
platform_device_put(i8042_platform_device);
err_unregister_driver:
platform_driver_unregister(&i8042_driver);
err_platform_exit:
i8042_platform_exit();
return err;

View file

@ -1882,7 +1882,7 @@ static int mxt_read_info_block(struct mxt_data *data)
if (error) {
dev_err(&client->dev, "Error %d parsing object table\n", error);
mxt_free_object_table(data);
goto err_free_mem;
return error;
}
data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);

View file

@ -117,6 +117,19 @@
#define ELAN_POWERON_DELAY_USEC 500
#define ELAN_RESET_DELAY_MSEC 20
/* FW boot code version */
#define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C 0x72
#define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C 0x82
#define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C 0x92
#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C 0x6D
#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C 0x6E
#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C 0x77
#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C 0x78
#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB 0x67
#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB 0x68
#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB 0x74
#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB 0x75
enum elants_chip_id {
EKTH3500,
EKTF3624,
@ -736,6 +749,37 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
return 0;
}
static bool elants_i2c_should_check_remark_id(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
const u8 bootcode_version = ts->iap_version;
bool check;
/* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */
if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) ||
(bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) {
dev_dbg(&client->dev,
"eKTH3900/eKTH5312(0x%02x) are not support remark id\n",
bootcode_version);
check = false;
} else if (bootcode_version >= 0x60) {
check = true;
} else {
check = false;
}
return check;
}
static int elants_i2c_do_update_firmware(struct i2c_client *client,
const struct firmware *fw,
bool force)
@ -749,7 +793,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
u16 send_id;
int page, n_fw_pages;
int error;
bool check_remark_id = ts->iap_version >= 0x60;
bool check_remark_id = elants_i2c_should_check_remark_id(ts);
/* Recovery mode detection! */
if (force) {

View file

@ -102,6 +102,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
{ .id = "9110", .data = &gt911_chip_data },
{ .id = "9111", .data = &gt911_chip_data },
{ .id = "927", .data = &gt911_chip_data },
{ .id = "928", .data = &gt911_chip_data },
@ -650,10 +651,16 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
usleep_range(6000, 10000); /* T4: > 5ms */
/* end select I2C slave addr */
error = gpiod_direction_input(ts->gpiod_rst);
if (error)
goto error;
/*
* Put the reset pin back in to input / high-impedance mode to save
* power. Only do this in the non ACPI case since some ACPI boards
* don't have a pull-up, so there the reset pin must stay active-high.
*/
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
error = gpiod_direction_input(ts->gpiod_rst);
if (error)
goto error;
}
return 0;
@ -787,6 +794,14 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
return -EINVAL;
}
/*
* Normally we put the reset pin in input / high-impedance mode to save
* power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
* case, leave the pin as is. This results in the pin not being touched
* at all on x86/ACPI boards, except when needed for error-recover.
*/
ts->gpiod_rst_flags = GPIOD_ASIS;
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
}
#else
@ -812,6 +827,12 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
/*
* By default we request the reset pin as input, leaving it in
* high-impedance when not resetting the controller to save power.
*/
ts->gpiod_rst_flags = GPIOD_IN;
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
if (IS_ERR(ts->avdd28)) {
error = PTR_ERR(ts->avdd28);
@ -849,7 +870,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)

View file

@ -87,6 +87,7 @@ struct goodix_ts_data {
struct gpio_desc *gpiod_rst;
int gpio_count;
int gpio_int_idx;
enum gpiod_flags gpiod_rst_flags;
char id[GOODIX_ID_MAX_LEN + 1];
char cfg_name[64];
u16 version;

View file

@ -207,7 +207,7 @@ static int goodix_firmware_upload(struct goodix_ts_data *ts)
error = goodix_reset_no_int_sync(ts);
if (error)
return error;
goto release;
error = goodix_enter_upload_mode(ts->client);
if (error)

View file

@ -381,7 +381,7 @@ mISDNInit(void)
err = mISDN_inittimer(&debug);
if (err)
goto error2;
err = l1_init(&debug);
err = Isdnl1_Init(&debug);
if (err)
goto error3;
err = Isdnl2_Init(&debug);
@ -395,7 +395,7 @@ mISDNInit(void)
error5:
Isdnl2_cleanup();
error4:
l1_cleanup();
Isdnl1_cleanup();
error3:
mISDN_timer_cleanup();
error2:
@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
{
misdn_sock_cleanup();
Isdnl2_cleanup();
l1_cleanup();
Isdnl1_cleanup();
mISDN_timer_cleanup();
class_unregister(&mISDN_class);

View file

@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
extern int l1_init(u_int *);
extern void l1_cleanup(void);
extern int Isdnl1_Init(u_int *);
extern void Isdnl1_cleanup(void);
extern int Isdnl2_Init(u_int *);
extern void Isdnl2_cleanup(void);

View file

@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
EXPORT_SYMBOL(create_l1);
int
l1_init(u_int *deb)
Isdnl1_Init(u_int *deb)
{
debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT;
@ -409,7 +409,7 @@ l1_init(u_int *deb)
}
void
l1_cleanup(void)
Isdnl1_cleanup(void)
{
mISDN_FsmFree(&l1fsm_s);
}

View file

@ -1139,6 +1139,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
struct cache_set *c = dc->disk.c;
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
@ -1156,7 +1157,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
calc_cached_dev_sectors(dc->disk.c);
calc_cached_dev_sectors(c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);

View file

@ -2264,7 +2264,7 @@ void mmc_start_host(struct mmc_host *host)
_mmc_detect_change(host, 0, false);
}
void mmc_stop_host(struct mmc_host *host)
void __mmc_stop_host(struct mmc_host *host)
{
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
@ -2273,6 +2273,11 @@ void mmc_stop_host(struct mmc_host *host)
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
}
void mmc_stop_host(struct mmc_host *host)
{
__mmc_stop_host(host);
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;

View file

@ -70,6 +70,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void __mmc_stop_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
void _mmc_detect_change(struct mmc_host *host, unsigned long delay,

View file

@ -80,9 +80,18 @@ static void mmc_host_classdev_release(struct device *dev)
kfree(host);
}
static int mmc_host_classdev_shutdown(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
__mmc_stop_host(host);
return 0;
}
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
.shutdown_pre = mmc_host_classdev_shutdown,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};

Some files were not shown because too many files have changed in this diff Show more