summaryrefslogtreecommitdiffstats
path: root/src/kernel
diff options
context:
space:
mode:
authorDean Sanner <dsanner@us.ibm.com>2017-12-05 15:10:45 -0600
committerDaniel M. Crowell <dcrowell@us.ibm.com>2018-09-24 12:17:16 -0500
commitfcfd722a6abb01f780c10de0f5d801a9c4210ebd (patch)
tree49cfafdc273b1aa7989d851d7c17fbb74dd54b0b /src/kernel
parent69cc45d8f059a113f6bad12e1fdd82123497893a (diff)
downloadtalos-hostboot-fcfd722a6abb01f780c10de0f5d801a9c4210ebd.tar.gz
talos-hostboot-fcfd722a6abb01f780c10de0f5d801a9c4210ebd.zip
Support HB running in SMF
Support SMF for P9N/P9C. Lots of minor tweaks to make this work, but the biggest is to run userspace in problem state This is needed because for SMF Hostboot will need to run in S=1, HV=0,PR=1 (and kernel in S=1, HV=1, PR=0) This commit makes P9 HB userpsace run in HV=0 PR=1 and kernel in HV=1, PR=0. Change-Id: Ia4771df5e8858c6b7ae54b0746e62b283afb4bc4 RTC: 197243 Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/50530 Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com> Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com> Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com> Reviewed-by: Dean Sanner <dsanner@us.ibm.com> Reviewed-by: Nicholas E. Bofferding <bofferdn@us.ibm.com> Reviewed-by: Christian R. Geddes <crgeddes@us.ibm.com> Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/cpumgr.C7
-rw-r--r--src/kernel/exception.C21
-rw-r--r--src/kernel/intmsghandler.C82
-rw-r--r--src/kernel/misc.C18
-rw-r--r--src/kernel/shutdown.S38
-rw-r--r--src/kernel/start.S214
-rw-r--r--src/kernel/syscall.C33
7 files changed, 359 insertions, 54 deletions
diff --git a/src/kernel/cpumgr.C b/src/kernel/cpumgr.C
index c0c1be333..a2dff9415 100644
--- a/src/kernel/cpumgr.C
+++ b/src/kernel/cpumgr.C
@@ -55,6 +55,11 @@ size_t CpuManager::cv_cpuSeq = 0;
uint8_t CpuManager::cv_forcedMemPeriodic = 0;
InteractiveDebug CpuManager::cv_interactive_debug;
+const uint64_t WAKEUP_MSR_VALUE = 0x9000000000001000;
+const uint64_t WAKEUP_LPCR_VALUE = 0x000000000000F00A;
+const uint64_t WAKEUP_RPR_VALUE = 0x0001032021223F;
+const uint64_t MSR_SMF_MASK = 0x0000000000400000;
+
CpuManager::CpuManager() : iv_lastStartTimebase(0)
{
for (int i = 0; i < KERNEL_MAX_SUPPORTED_NODES; i++)
@@ -331,6 +336,8 @@ void CpuManager::activateCPU(cpu_t * i_cpu)
uint64_t msr = getMSR();
msr |= 0x1000; // MSR[ME] is not saved on initial wakeup, but we set on
// entering userspace, so ignore this bit in assert.
+ msr &= ~MSR_SMF_MASK; //Don't check SMF as it is variable
+ //ie keep HB code agnostic
kassert(WAKEUP_MSR_VALUE == msr);
setLPCR(WAKEUP_LPCR_VALUE);
setRPR(WAKEUP_RPR_VALUE);
diff --git a/src/kernel/exception.C b/src/kernel/exception.C
index d12b6da7d..d3a1dbe6b 100644
--- a/src/kernel/exception.C
+++ b/src/kernel/exception.C
@@ -38,6 +38,8 @@
#include <kernel/hbterminatetypes.H>
#include <kernel/kernel_reasoncodes.H>
#include <kernel/misc.H>
+#include <kernel/cpumgr.H>
+#include <kernel/scheduler.H>
namespace ExceptionHandles
@@ -409,6 +411,25 @@ void kernel_execute_external()
}
extern "C"
+void kernel_execute_hyp_external()
+{
+ // SRR0 set to the effective addr the thread
+ // would have attempted to execute next
+ // SRR1 [33:36,42:47] set to zero
+ // all others copied from MSR
+
+ // Mustn't switch tasks if external interrupt due to
+ // the fact external interrupts come in as HYP exceptions
+ // and all other come in as regular exceptions. If HYP
+ // comes on top of regular... need to leave existing task
+ // as is. The task switching is performed as part of the
+ // custom sendMessage in InterruptMsgHdlr.
+
+ //Do work
+ InterruptMsgHdlr::handleInterrupt();
+}
+
+extern "C"
void kernel_execute_unhandled_exception()
{
task_t* t = TaskManager::getCurrentTask();
diff --git a/src/kernel/intmsghandler.C b/src/kernel/intmsghandler.C
index e40c16291..f1e483750 100644
--- a/src/kernel/intmsghandler.C
+++ b/src/kernel/intmsghandler.C
@@ -158,6 +158,88 @@ void InterruptMsgHdlr::handleInterrupt()
}
}
+void InterruptMsgHdlr::sendMessage(msg_sys_types_t i_type, void* i_key,
+ void* i_data, task_t* i_task)
+{
+ // Task to switch to due to waiter being ready to handle message.
+ task_t* ready_task = nullptr;
+
+ // Save pending info for when we get the response.
+ MessageHandler_Pending* mhp = new MessageHandler_Pending();
+ mhp->key = i_key;
+ mhp->task = i_task;
+
+ // Update block status for task.
+ if (nullptr != i_task)
+ {
+ i_task->state = TASK_STATE_BLOCK_USRSPACE;
+ i_task->state_info = i_key;
+ }
+
+ // Send userspace message if one hasn't been sent for this key.
+ if (!iv_pending.find(i_key))
+ {
+ // Create message.
+ msg_t* m = new msg_t();
+ m->type = i_type;
+ m->data[0] = reinterpret_cast<uint64_t>(i_key);
+ m->data[1] = reinterpret_cast<uint64_t>(i_data);
+ m->extra_data = nullptr;
+ m->__reserved__async = 1;
+
+ // Create pending response object.
+ MessagePending* mp = new MessagePending();
+ mp->key = m;
+ mp->task = reinterpret_cast<task_t*>(this);
+
+ // Send to userspace...
+ iv_msgq->lock.lock();
+ task_t* waiter = iv_msgq->waiting.remove();
+ if (nullptr == waiter) // No waiting task, queue for msg_wait call.
+ {
+ iv_msgq->messages.insert(mp);
+ }
+ else // Waiting task, set msg as return and release.
+ {
+ TASK_SETRTN(waiter, (uint64_t) m);
+ iv_msgq->responses.insert(mp);
+ ready_task = waiter;
+ }
+ iv_msgq->lock.unlock();
+ }
+
+ // Defer task while waiting for message response.
+ if (nullptr != i_task)
+ {
+ if (i_task == TaskManager::getCurrentTask())
+ {
+ // Switch to ready waiter, or pick a new task off the scheduler.
+ if (ready_task)
+ {
+ TaskManager::setCurrentTask(ready_task);
+ ready_task = nullptr;
+ }
+ else
+ {
+ // Select next task off scheduler.
+ i_task->cpu->scheduler->setNextRunnable();
+ }
+ }
+ }
+
+ // Add ready waiter to the task queue
+ if (nullptr != ready_task)
+ {
+ task_t* current = TaskManager::getCurrentTask();
+ current->cpu->scheduler->addTask(ready_task);
+ ready_task = nullptr;
+ }
+
+ // Insert pending info into our queue until response is recv'd.
+ iv_pending.insert(mhp);
+}
+
+
void InterruptMsgHdlr::addCpuCore(uint64_t i_pir)
{
task_t* t = TaskManager::getCurrentTask();
diff --git a/src/kernel/misc.C b/src/kernel/misc.C
index ed964da79..b91aeb79f 100644
--- a/src/kernel/misc.C
+++ b/src/kernel/misc.C
@@ -42,10 +42,11 @@
#include <kernel/timemgr.H>
#include <util/singleton.H>
#include <kernel/doorbell.H>
+#include <arch/pvrformat.H>
extern "C"
void kernel_shutdown(size_t, uint64_t, uint64_t, uint64_t,
- uint64_t, uint64_t) NO_RETURN;
+ uint64_t, uint64_t, uint64_t) NO_RETURN;
extern HB_Descriptor kernel_hbDescriptor;
@@ -118,6 +119,14 @@ namespace KernelMisc
}
else
{
+ //Determine if P9N/P9C and apply URMOR hack
+ uint64_t l_urmor_hack = 0x0;
+ PVR_t l_pvr(getPVR());
+ if((l_pvr.chipFamily == PVR_t::P9_ALL))
+ {
+ l_urmor_hack = 1;
+ }
+
static Barrier* l_barrier = new Barrier(CpuManager::getCpuCount());
static uint64_t l_lowestPIR = 0xfffffffffffffffful;
@@ -216,7 +225,8 @@ namespace KernelMisc
g_payload_entry,
g_payload_data,
local_master_pir, //master PIR if local master
- start_payload_data_area_address);
+ start_payload_data_area_address,
+ l_urmor_hack);
}
}
else
@@ -319,7 +329,7 @@ namespace KernelMisc
// Create kernel save area and store ptr in bottom of kernel stack.
task_t* saveArea = new task_t();
- saveArea->context.msr_mask = 0xD030; // EE, ME, PR, IR, DR.
+ saveArea->context.msr_mask = 0x100000000000D030; //HV,EE,ME,PR,IR,DR.
*(reinterpret_cast<task_t**>(cpu->kernel_stack_bottom)) = saveArea;
// Set register to indicate we want a 'stop 15' to occur (state loss)
@@ -420,7 +430,7 @@ namespace KernelMisc
// Create kernel save area and store ptr in bottom of kernel stack.
task_t* saveArea = new task_t();
- saveArea->context.msr_mask = 0xD030; // EE, ME, PR, IR, DR.
+ saveArea->context.msr_mask = 0x100000000000D030; //HV,EE,ME,PR,IR,DR.
*(reinterpret_cast<task_t**>(cpu->kernel_stack_bottom)) = saveArea;
// Set register to indicate we want a 'stop 15' to ocur (state loss)
diff --git a/src/kernel/shutdown.S b/src/kernel/shutdown.S
index 0c42a19cf..e6b38dce7 100644
--- a/src/kernel/shutdown.S
+++ b/src/kernel/shutdown.S
@@ -5,7 +5,7 @@
#
# OpenPOWER HostBoot Project
#
-# Contributors Listed Below - COPYRIGHT 2012,2017
+# Contributors Listed Below - COPYRIGHT 2012,2018
# [+] International Business Machines Corp.
#
#
@@ -24,6 +24,9 @@
# IBM_PROLOG_END_TAG
.include "kernel/ppcconsts.S"
+.set P9_URMOR_OPAL_HACK, 0x7c997ba6
+
+
#define KERNEL_BARRIER(addr, count, temp) \
/* Increment thread count. */ \
1: \
@@ -56,7 +59,7 @@
;// <sync barrier 1>
;// All nodes have reported cpu_count
;// <sync barrier 2>
- ;// Thread0 on each core updates HRMOR.
+ ;// Thread0 on each core updates HRMOR & URMOR
;// <sync barrier 3>
;// All threads execute - isync ; slbia ; isync
;// <sync barrier 4>
@@ -72,6 +75,7 @@
;// @param[in] r6 - Payload Data
;// @param[in] r7 - PIR of local master cpu - only set by local master
;// @param[in] r8 - System address of start_payload_data_area
+ ;// @param[in] r9 - Perform URMOR Hack
;//
.global kernel_shutdown
kernel_shutdown:
@@ -81,12 +85,12 @@ kernel_shutdown:
;// Retrieve existing HRMOR.
mfspr r0, HRMOR
;// Determine physical address of EA[0]=1 mode instruction.
- lis r9, kernel_shutdown_ea0_1_mode@h
- ori r9, r9, kernel_shutdown_ea0_1_mode@l
- or r9, r9, r0 ;// Apply HRMOR.
- or r9, r9, r10 ;// Apply EA[0] = 1.
+ lis r12, kernel_shutdown_ea0_1_mode@h
+ ori r12, r12, kernel_shutdown_ea0_1_mode@l
+ or r12, r12, r0 ;// Apply HRMOR.
+ or r12, r12, r10 ;// Apply EA[0] = 1.
;// Jump to enter EA[0] = 1
- mtlr r9
+ mtlr r12
blr
kernel_shutdown_ea0_1_mode:
@@ -113,6 +117,26 @@ kernel_shutdown_ea0_1_mode:
;// threads update HRMOR so we don't have to know about
;// fused/normal core differences
mtspr HRMOR, r4
+
+ ;// Check to see if SMF bit is off... if so skip
+ ;// URMOR set as don't have permissions
+ mfmsr r10
+ andis. r10, r10, 64 ;// Check if 41 (SMF) is on
+ beq skip_urmor ;// if result of AND = zero then CR[EQ] bit set
+
+ ;// See if we need to do the URMOR hack
+ ;// Due to bug in P9, need to subtract op-code
+
+ cmpwi cr0, r9, 0x1 ;// Hack requested == 0x1
+ bne cr0, skip_urmor_hack
+
+ lis r10, P9_URMOR_OPAL_HACK@h
+ ori r10, r10, P9_URMOR_OPAL_HACK@l
+ sub r4,r4,r10
+skip_urmor_hack:
+ mtspr URMOR, r4
+skip_urmor:
+
1:
;// Perform barrier - 3
addi r8, r8, 8
diff --git a/src/kernel/start.S b/src/kernel/start.S
index 1e2b53cd6..0eda5b003 100644
--- a/src/kernel/start.S
+++ b/src/kernel/start.S
@@ -171,6 +171,37 @@ finished_relocate:
nop; \
b kernel_dispatch_task; /* Return to task */
+#define HYP_INTERRUPT(name, address) \
+ .org _start + address; \
+ HYP_INTERRUPT_NOADDR(name)
+
+#define HYP_INTERRUPT_STUB(name, address) \
+ .org _start + address; \
+ intvect_stub_hyp_##name: \
+ b intvect_hyp_##name;
+
+#define HYP_INTERRUPT_NOADDR(name) \
+ intvect_hyp_##name: \
+ or 2,2,2; /* Ensure thread priority is high. */ \
+ mtsprg1 r1; /* Save GPR1 */ \
+ ;/* Retrieve processing address for interrupt. */ \
+ lis r1, intvect_hyp_##name##_finish_save@h; \
+ ori r1, r1, intvect_hyp_##name##_finish_save@l; \
+ ;/* Save interrupt address in SPRG0 */ \
+ mtsprg0 r1; \
+ mfsprg1 r1; /* Restore GPR1 */ \
+ b kernel_save_task ; /* Save current task. */ \
+ intvect_hyp_##name##_finish_save: \
+ ; /* Get TOC entry for kernel C function */ \
+ lis r2, kernel_execute_hyp_##name##@h; \
+ ori r2, r2, kernel_execute_hyp_##name##@l; \
+ ld r0, 0(r2); /* Load call address */ \
+ mtlr r0; \
+ ld r2, 8(r2); /* Load TOC base. */ \
+ blrl; /* Call kernel function */ \
+ nop; \
+ b hyp_dispatch_task; /* Return to task */
+
STD_INTERRUPT_STUB(system_reset, 0x100)
.org _start + 0x180
@@ -207,20 +238,36 @@ intvect_system_call_fast:
STD_INTERRUPT(system_call, 0xC08)
UNIMPL_INTERRUPT_STUB(trace, 0xD00)
+
+.org _start + 0xD80
+intvect_inst_trampoline:
+ mtspr HSPRG0, r2 ;// Free up a temporary register.
+
+ ;// this is now the case where we need to jump to different exception vector
+ ;// route to correct call using hrfid. HSRR0 is already set correct,
+ ;// just need to update HSRR1 with current (escalated privlege) MSR
+ mfmsr r2
+ mtspr HSRR1, r2
+
+ ;// cleanup our register usage
+ mfspr r2, HSPRG0 ;// Restore original value of R2
+ hrfid ;// handle the real exception
+
UNIMPL_INTERRUPT_STUB(hype_data_storage, 0xE00)
-UNIMPL_INTERRUPT_STUB(hype_inst_storage, 0xE20)
+;// Hypervisor Instruction Storage Exception Vector
+;// SMF prevents HB userspace from running in UV/HV/PR = 0b111
+;// Thus userspace runs in 0b101 -- however that means all
+;// exceptions first jump to exception vectors with 0b100
+;// and then ALWAYS take a hype inst_storage exception. Desired
+;// exception is always in HSSR0
+.org _start + 0xE20
+intvect_syscall_hype_inst_storage:
+ b intvect_inst_trampoline
+
STD_INTERRUPT_STUB(hype_emu_assist, 0xE40)
UNIMPL_INTERRUPT_STUB(hype_maint, 0xE60)
-
-;// Hypervisor Doorbell Exception Vector
-;//
-;// There isn't enough room here for a "normal" stub, so jump out to 'stub2'
-;// where there is some space.
-.org _start + 0xE80
-intvect_syscall_hype_doorbell_stub:
- b intvect_syscall_hype_doorbell_stub2
-
-STD_INTERRUPT_STUB(hypervisor_external, 0xEA0)
+HYP_INTERRUPT_STUB(doorbell_stub, 0xE80)
+HYP_INTERRUPT_STUB(external_stub, 0xEA0)
UNIMPL_INTERRUPT_STUB(perf_monitor, 0xF00)
UNIMPL_INTERRUPT_STUB(vector_unavail, 0xF20)
@@ -234,11 +281,11 @@ UNIMPL_INTERRUPT_STUB(fac_unavail, 0xF60)
;// get this exception.
.org _start + 0xF80
hype_fac_unavail:
- mtspr HSPRG0, r0 ;// Free up a temporary register.
+ mtspr HSPRG1, r0 ;// Free up a temporary register.
mfspr r0,HFSCR
ori r0, r0, 1 ;// Set FP=1 (bit 63).
mtspr HFSCR, r0
- mfspr r0, HSPRG0 ;// Restore temporary
+ mfspr r0, HSPRG1 ;// Restore temporary
hrfid
;// Softpatch Exception Vector
@@ -259,11 +306,11 @@ hype_fac_unavail:
;// P7 Book IV.
.org _start + 0x1500
softpatch_stub:
- mtsprg1 r1 ;// Save of R1 temporarily.
+ mtspr HSPRG1, r1 ;// Free up a temporary register (R1)
mfspr r1, HSRR0 ;// Move HSRR0 -> SRR0.
subi r1, r1, 4 ;// Roll back SRR0 1 instruction to one taking except.
mtsrr0 r1
- mfsprg1 r1 ;// Restore R1 and use normal interrupt code.
+ mfspr r1, HSPRG1 ;// Restore temporary (R1)
STD_INTERRUPT_NOADDR(softpatch)
.section .text.kernelasm
@@ -483,6 +530,8 @@ kernel_dispatch_task:
ori r2,r2, 0xC030 ;// Enable MSR[EE,PR,IR,DR].
rldicl r2,r2,50,1 ;// Clear ...
rotldi r2,r2,14 ;// MSR[FP]
+ rldicl r2,r2,3,1 ;// Clear HV bit...
+ rotldi r2,r2,61 ;// MSR[HV]
ld r3, TASK_MSR_MASK(r1) ;// Load MSR mask.
xor r2, r2, r3 ;// Apply MSR mask (XOR).
mtsrr1 r2 ;// Set task MSR (SRR1)
@@ -589,6 +638,121 @@ kernel_dispatch_task:
b 2b
+ ;// @fn dispatch_task
+ ;// Loads context from task structure and performs rfi.
+ ;//
+ ;// Requires:
+ ;// * SPRG3 -> Task Structure.
+ ;// * Current contents of registers are not needed.
+hyp_dispatch_task:
+.global hyp_dispatch_task
+ mfsprg3 r1 ;// Load task structure to r1.
+
+ ldarx r0, TASK_CPUPTR, r1 ;// Clear the reservation by loading / storing
+ stdcx. r0, TASK_CPUPTR, r1 ;// the CPU pointer in the task.
+
+ ;// Check if FP enabled, load context.
+ ld r2, TASK_FP_CONTEXT(r1)
+ cmpwi cr0, r2, 0
+ bne- 1f
+2:
+ ;// Restore GPRs from context.
+ ld r0, TASK_GPR_0(r1) ;// GPR0
+ ld r2, TASK_GPR_2(r1) ;// GPR2
+ ld r3, TASK_GPR_3(r1) ;// GPR3
+ ld r4, TASK_GPR_4(r1) ;// GPR4
+ ld r5, TASK_GPR_5(r1) ;// GPR5
+ ld r6, TASK_GPR_6(r1) ;// GPR6
+ ld r7, TASK_GPR_7(r1) ;// GPR7
+ ld r8, TASK_GPR_8(r1) ;// GPR8
+ ld r9, TASK_GPR_9(r1) ;// GPR9
+ ld r10, TASK_GPR_10(r1) ;// GPR10
+ ld r11, TASK_GPR_11(r1) ;// GPR11
+ ld r12, TASK_GPR_12(r1) ;// GPR12
+ ld r13, TASK_GPR_13(r1) ;// GPR13
+ ld r14, TASK_GPR_14(r1) ;// GPR14
+ ld r15, TASK_GPR_15(r1) ;// GPR15
+ ld r16, TASK_GPR_16(r1) ;// GPR16
+ ld r17, TASK_GPR_17(r1) ;// GPR17
+ ld r18, TASK_GPR_18(r1) ;// GPR18
+ ld r19, TASK_GPR_19(r1) ;// GPR19
+ ld r20, TASK_GPR_20(r1) ;// GPR20
+ ld r21, TASK_GPR_21(r1) ;// GPR21
+ ld r22, TASK_GPR_22(r1) ;// GPR22
+ ld r23, TASK_GPR_23(r1) ;// GPR23
+ ld r24, TASK_GPR_24(r1) ;// GPR24
+ ld r25, TASK_GPR_25(r1) ;// GPR25
+ ld r26, TASK_GPR_26(r1) ;// GPR26
+ ld r27, TASK_GPR_27(r1) ;// GPR27
+
+ ld r28, TASK_LR(r1) ;// Load from context: LR, CR, CTR, XER
+ ld r29, TASK_CR(r1)
+ ld r30, TASK_CTR(r1)
+ ld r31, TASK_XER(r1)
+ mtlr r28 ;// Restore LR
+ mtcr r29 ;// Restore CR
+ mtctr r30 ;// Restore CTR
+ mtxer r31 ;// Restore XER
+
+ ld r28, TASK_GPR_28(r1) ;// GPR28
+ ld r29, TASK_GPR_29(r1) ;// GPR29
+ ld r30, TASK_GPR_30(r1) ;// GPR30
+ ld r31, TASK_GPR_31(r1) ;// GPR31
+ ld r1, TASK_GPR_1(r1) ;// GPR1
+
+ ;//On HYP exceptions we don't task
+ ;//switch -- just jump back to where
+ ;//we came from in HSRR0/HSRR1
+ hrfid ;// Execute task.
+
+ ;// Load FP context.
+1:
+ ;// Set MSR[FP] and also in SRR1.
+ mfmsr r3
+ ori r3,r3,0x2000
+ mtmsrd r3
+ mfsrr1 r3
+ ori r3,r3,0x2000
+ mtsrr1 r3
+ ;// Restore FPSCR
+ lfd f0, TASK_FPSCR(r2)
+ mtfsf f0,f0,1,1
+ ;// Restore FPRs
+ lfd f0, TASK_FPR_0(r2)
+ lfd f1, TASK_FPR_1(r2)
+ lfd f2, TASK_FPR_2(r2)
+ lfd f3, TASK_FPR_3(r2)
+ lfd f4, TASK_FPR_4(r2)
+ lfd f5, TASK_FPR_5(r2)
+ lfd f6, TASK_FPR_6(r2)
+ lfd f7, TASK_FPR_7(r2)
+ lfd f8, TASK_FPR_8(r2)
+ lfd f9, TASK_FPR_9(r2)
+ lfd f10, TASK_FPR_10(r2)
+ lfd f11, TASK_FPR_11(r2)
+ lfd f12, TASK_FPR_12(r2)
+ lfd f13, TASK_FPR_13(r2)
+ lfd f14, TASK_FPR_14(r2)
+ lfd f15, TASK_FPR_15(r2)
+ lfd f16, TASK_FPR_16(r2)
+ lfd f17, TASK_FPR_17(r2)
+ lfd f18, TASK_FPR_18(r2)
+ lfd f19, TASK_FPR_19(r2)
+ lfd f20, TASK_FPR_20(r2)
+ lfd f21, TASK_FPR_21(r2)
+ lfd f22, TASK_FPR_22(r2)
+ lfd f23, TASK_FPR_23(r2)
+ lfd f24, TASK_FPR_24(r2)
+ lfd f25, TASK_FPR_25(r2)
+ lfd f26, TASK_FPR_26(r2)
+ lfd f27, TASK_FPR_27(r2)
+ lfd f28, TASK_FPR_28(r2)
+ lfd f29, TASK_FPR_29(r2)
+ lfd f30, TASK_FPR_30(r2)
+ lfd f31, TASK_FPR_31(r2)
+
+ b 2b
+
intvect_system_reset:
;// Need to identify reason for SRESET and then perform appropriate
;// action.
@@ -725,14 +889,8 @@ intvect_system_reset_external:
;// This function moves the hypervisor external interrupt regs
;// into the external interrupt regs and then branches to the
;// external interrupt handler
-intvect_hypervisor_external:
- mtsprg1 r1 ;// Save off R1 temporarily.
- mfspr r1, HSRR0 ;// Move HSRR0 -> SRR0.
- mtsrr0 r1
- mfspr r1, HSRR1 ;// Move HSRR1 -> SRR1.
- mtsrr1 r1
- mfsprg1 r1 ;// Restore R1 and use external interrupt handler
- b intvect_external
+intvect_hyp_external_stub:
+HYP_INTERRUPT_NOADDR(external)
;// @fn system_call_fast_path
;// Handle fast path system calls.
@@ -905,14 +1063,8 @@ UNIMPL_INTERRUPT_NOADDR(hype_maint, 0xE60)
;// exceptions, instead of the SRR[01] registers that the normal exception
;// code deals with. Copy the contents of HSSR[01] -> SRR[01] first.
;//
-intvect_syscall_hype_doorbell_stub2:
- mtsprg0 r1
- mfspr r1,HSRR0
- mtsrr0 r1
- mfspr r1,HSRR1
- mtsrr1 r1
- mfsprg0 r1
-STD_INTERRUPT_NOADDR(hype_doorbell)
+intvect_hyp_doorbell_stub:
+HYP_INTERRUPT_NOADDR(doorbell)
UNIMPL_INTERRUPT_NOADDR(perf_monitor, 0xF00)
UNIMPL_INTERRUPT_NOADDR(vector_unavail, 0xF20)
diff --git a/src/kernel/syscall.C b/src/kernel/syscall.C
index 13f6288fd..8b7f02243 100644
--- a/src/kernel/syscall.C
+++ b/src/kernel/syscall.C
@@ -50,14 +50,15 @@
extern "C"
-void kernel_execute_hype_doorbell()
+void kernel_execute_hyp_doorbell()
{
task_t* t = TaskManager::getCurrentTask();
+ task_t* l_task_post = nullptr;
doorbell_clear();
//Execute all work items on doorbell_actions stack
KernelWorkItem *l_work = t->cpu->doorbell_actions.pop();
- while(l_work != NULL)
+ while(l_work != nullptr)
{
//Execute Work Item and then delete it
(*l_work)();
@@ -77,13 +78,18 @@ void kernel_execute_hype_doorbell()
InterruptMsgHdlr::sendIpcMsg(pir);
}
- if (t->cpu->idle_task == t)
- {
- t->cpu->scheduler->returnRunnable();
- t->cpu->scheduler->setNextRunnable();
- }
-
DeferredQueue::execute();
+
+ // Mustn't switch tasks if external interrupt due to
+ // the fact external interrupts come in as HYP exceptions
+ // and all other come in as regular excpetions. If HYP
+ // comes on top of regular... need to leave existing task
+ // as is. The custom implementation of sendMessage of InterruptMsgHdlr
+ // will take care of task switching safely.
+
+ //check to see if work switched the task
+ l_task_post = TaskManager::getCurrentTask();
+ kassert(t == l_task_post);
}
extern "C"
@@ -738,15 +744,18 @@ namespace Systemcalls
void CpuSprValue(task_t *t)
{
uint64_t spr = TASK_GETARG0(t);
+ uint64_t l_smf_bit = 0x0;
switch (spr)
{
case CPU_SPR_MSR:
- TASK_SETRTN(t, CpuManager::WAKEUP_MSR_VALUE);
+ //Set SMF bit based on current setting (HB never turns off)
+ l_smf_bit = getMSR() & MSR_SMF_MASK;
+ TASK_SETRTN(t, WAKEUP_MSR_VALUE | l_smf_bit);
break;
case CPU_SPR_LPCR:
- TASK_SETRTN(t, CpuManager::WAKEUP_LPCR_VALUE);
+ TASK_SETRTN(t, WAKEUP_LPCR_VALUE);
break;
case CPU_SPR_HRMOR:
@@ -805,9 +814,9 @@ namespace Systemcalls
if (STOP_INSTRUCTION == (*instruction)) // Verify 'nap' instruction,
// otherwise just return.
{
- // Disable EE, PR, IR, DR so 'nap' can be executed.
+ // Disable HV, EE, PR, IR, DR so 'nap' can be executed.
// (which means to stay in HV state)
- t->context.msr_mask = 0xC030;
+ t->context.msr_mask = 0x100000000000D030;
}
};
OpenPOWER on IntegriCloud