powerpc/pseries: Move some PAPR paravirt functions to their own file

These functions will be used by the queued spinlock implementation,
and may be useful elsewhere too, so move them out of spinlock.h.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200724131423.1362108-2-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2020-07-24 23:14:18 +10:00 committed by Michael Ellerman
parent dbce456280
commit 20d444d06f
3 changed files with 66 additions and 29 deletions

View file

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_PARAVIRT_H
#define _ASM_POWERPC_PARAVIRT_H
#include <linux/jump_label.h>
#include <asm/smp.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
#endif
#ifdef CONFIG_PPC_SPLPAR
DECLARE_STATIC_KEY_FALSE(shared_processor);
static inline bool is_shared_processor(void)
{
return static_branch_unlikely(&shared_processor);
}
/* If bit 0 is set, the cpu has been preempted */
static inline u32 yield_count_of(int cpu)
{
__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
return be32_to_cpu(yield_count);
}
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}
#else
static inline bool is_shared_processor(void)
{
return false;
}
static inline u32 yield_count_of(int cpu)
{
return 0;
}
extern void ___bad_yield_to_preempted(void);
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
___bad_yield_to_preempted(); /* This would be a bug */
}
#endif
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
if (!is_shared_processor())
return false;
if (yield_count_of(cpu) & 1)
return true;
return false;
}
#endif /* _ASM_POWERPC_PARAVIRT_H */

View file

@ -15,11 +15,10 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
#include <linux/jump_label.h>
#include <linux/irqflags.h>
#include <asm/paravirt.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
#endif
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
@ -35,18 +34,6 @@
#define LOCK_TOKEN 1
#endif
#ifdef CONFIG_PPC_PSERIES
DECLARE_STATIC_KEY_FALSE(shared_processor);
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
if (!static_branch_unlikely(&shared_processor))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}
#endif
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@ -110,15 +97,6 @@ static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
#endif
static inline bool is_shared_processor(void)
{
#ifdef CONFIG_PPC_SPLPAR
return static_branch_unlikely(&shared_processor);
#else
return false;
#endif
}
static inline void spin_yield(arch_spinlock_t *lock)
{
if (is_shared_processor())

View file

@ -27,14 +27,14 @@ void splpar_spin_yield(arch_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
yield_count = yield_count_of(holder_cpu);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (lock->slock != lock_value)
return; /* something has changed */
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
yield_to_preempted(holder_cpu, yield_count);
}
EXPORT_SYMBOL_GPL(splpar_spin_yield);
@ -53,13 +53,13 @@ void splpar_rw_yield(arch_rwlock_t *rw)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
yield_count = yield_count_of(holder_cpu);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (rw->lock != lock_value)
return; /* something has changed */
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
yield_to_preempted(holder_cpu, yield_count);
}
#endif