linux/arch/powerpc/kvm/book3s_32_sr.S
Thomas Gleixner d94d71cb45 treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 266
Based on 1 normalized pattern(s):

  this program is free software you can redistribute it and or modify
  it under the terms of the gnu general public license version 2 as
  published by the free software foundation this program is
  distributed in the hope that it will be useful but without any
  warranty without even the implied warranty of merchantability or
  fitness for a particular purpose see the gnu general public license
  for more details you should have received a copy of the gnu general
  public license along with this program if not write to the free
  software foundation 51 franklin street fifth floor boston ma 02110
  1301 usa

extracted by the scancode license scanner the SPDX license identifier

  GPL-2.0-only

has been chosen to replace the boilerplate/reference in 67 file(s).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Allison Randal <allison@lohutok.net>
Reviewed-by: Richard Fontana <rfontana@redhat.com>
Reviewed-by: Alexios Zavras <alexios.zavras@intel.com>
Cc: linux-spdx@vger.kernel.org
Link: https://lkml.kernel.org/r/20190529141333.953658117@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-05 17:30:28 +02:00

133 lines
3.1 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright SUSE Linux Products GmbH 2009
*
* Authors: Alexander Graf <agraf@suse.de>
*/
/******************************************************************************
* *
* Entry code *
* *
*****************************************************************************/
.macro LOAD_GUEST_SEGMENTS
/* Required state:
*
* MSR = ~IR|DR
* R1 = host R1
* R2 = host R2
* R3 = shadow vcpu
* all other volatile GPRS = free except R4, R6
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
* SVCPU[LR] = guest LR
*/
#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
mtsr n, r9
XCHG_SR(0)
XCHG_SR(1)
XCHG_SR(2)
XCHG_SR(3)
XCHG_SR(4)
XCHG_SR(5)
XCHG_SR(6)
XCHG_SR(7)
XCHG_SR(8)
XCHG_SR(9)
XCHG_SR(10)
XCHG_SR(11)
XCHG_SR(12)
XCHG_SR(13)
XCHG_SR(14)
XCHG_SR(15)
/* Clear BATs. */
#define KVM_KILL_BAT(n, reg) \
mtspr SPRN_IBAT##n##U,reg; \
mtspr SPRN_IBAT##n##L,reg; \
mtspr SPRN_DBAT##n##U,reg; \
mtspr SPRN_DBAT##n##L,reg; \
li r9, 0
KVM_KILL_BAT(0, r9)
KVM_KILL_BAT(1, r9)
KVM_KILL_BAT(2, r9)
KVM_KILL_BAT(3, r9)
.endm
/******************************************************************************
* *
* Exit code *
* *
*****************************************************************************/
.macro LOAD_HOST_SEGMENTS
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
* R12 = exit handler id
* R13 = shadow vcpu - SHADOW_VCPU_OFF
* SVCPU.* = guest *
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
* SVCPU[LR] = guest LR
*
*/
/* Restore BATs */
/* We only overwrite the upper part, so we only restoree
the upper part. */
#define KVM_LOAD_BAT(n, reg, RA, RB) \
lwz RA,(n*16)+0(reg); \
lwz RB,(n*16)+4(reg); \
mtspr SPRN_IBAT##n##U,RA; \
mtspr SPRN_IBAT##n##L,RB; \
lwz RA,(n*16)+8(reg); \
lwz RB,(n*16)+12(reg); \
mtspr SPRN_DBAT##n##U,RA; \
mtspr SPRN_DBAT##n##L,RB; \
lis r9, BATS@ha
addi r9, r9, BATS@l
tophys(r9, r9)
KVM_LOAD_BAT(0, r9, r10, r11)
KVM_LOAD_BAT(1, r9, r10, r11)
KVM_LOAD_BAT(2, r9, r10, r11)
KVM_LOAD_BAT(3, r9, r10, r11)
/* Restore Segment Registers */
/* 0xc - 0xf */
li r0, 4
mtctr r0
LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
lis r4, 0xc000
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
addis r4, r4, 0x1000 /* address of next segment */
bdnz 3b
/* 0x0 - 0xb */
/* 'current->mm' needs to be in r4 */
tophys(r4, r2)
lwz r4, MM(r4)
tophys(r4, r4)
/* This only clobbers r0, r3, r4 and r5 */
bl switch_mmu_context
.endm