summaryrefslogtreecommitdiffstats
path: root/src/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/include')
-rw-r--r--src/include/kernel/cpu.H18
-rw-r--r--src/include/kernel/cpumgr.H8
-rw-r--r--src/include/kernel/scheduler.H3
-rw-r--r--src/include/kernel/syscalls.H50
-rw-r--r--src/include/kernel/types.h4
-rw-r--r--src/include/sys/task.h15
-rw-r--r--src/include/util/lockfree/atomic_construct.H62
7 files changed, 126 insertions, 34 deletions
diff --git a/src/include/kernel/cpu.H b/src/include/kernel/cpu.H
index 420ec8827..ee24bdd64 100644
--- a/src/include/kernel/cpu.H
+++ b/src/include/kernel/cpu.H
@@ -22,7 +22,7 @@
// IBM_PROLOG_END
/** @file cpu.H
* @brief Defines kernel information and functions about CPUs.
- *
+ *
* In this kernel the term CPU refers to a hardware thread (SMT), not core.
*/
#ifndef __KERNEL_CPU_H
@@ -40,6 +40,12 @@ class Scheduler;
/** @struct cpu_t
* @brief Stores per-CPU kernel information.
+ *
+ * @note kernel_stack and master need to be at fixed locations in this
+ * structure due to usages in start.S.
+ *
+ * - kernel_stack is a double-word at cpu_t[0 bytes].
+ * - master is a byte at cpu_t[12 bytes].
*/
struct cpu_t
{
@@ -49,8 +55,14 @@ struct cpu_t
/** ID of the CPU (PIR value) */
cpuid_t cpu;
- /** If the CPU is the master */
- bool master;
+ struct
+ {
+ /** If the CPU is the master */
+ bool master;
+
+ /** Ensure alignment of master attribute for asm code. */
+ uint64_t __reserved_master:24;
+ } PACKED;
/** Pointer to the scheduler for this CPU (may not be unique) */
Scheduler* scheduler;
diff --git a/src/include/kernel/cpumgr.H b/src/include/kernel/cpumgr.H
index e6fb33a77..31759564b 100644
--- a/src/include/kernel/cpumgr.H
+++ b/src/include/kernel/cpumgr.H
@@ -30,14 +30,18 @@ class CpuManager
{
public:
enum { MAXCPUS = KERNEL_MAX_SUPPORTED_CPUS };
-
- /** @fn getCurrentCPU
+
+ /** @fn getCurrentCPU
* Returns a pointer to the current CPU structure by using the
* task structure in SPRG3.
*/
static cpu_t* getCurrentCPU();
static cpu_t* getCpu(size_t i) { return cv_cpus[i]; }
+ /** @brief Return pointer to master CPU object.
+ */
+ static cpu_t* getMasterCPU();
+
static void init();
static void init_slave_smp(cpu_t*);
diff --git a/src/include/kernel/scheduler.H b/src/include/kernel/scheduler.H
index c7a991c45..16861b6e6 100644
--- a/src/include/kernel/scheduler.H
+++ b/src/include/kernel/scheduler.H
@@ -35,12 +35,13 @@ class Scheduler
friend class CpuManager;
void addTask(task_t*);
+ void addTaskMasterCPU(task_t*);
void returnRunnable();
void setNextRunnable();
protected:
- Scheduler() :
+ Scheduler() :
iv_taskList() {};
~Scheduler() {};
diff --git a/src/include/kernel/syscalls.H b/src/include/kernel/syscalls.H
index 47295ecc2..332273a07 100644
--- a/src/include/kernel/syscalls.H
+++ b/src/include/kernel/syscalls.H
@@ -36,62 +36,66 @@ namespace Systemcalls
* These are passed by userspace code via r3 when the sc instruction is
* executed. The kernel performs a case statement to switch to the
* appropriate system call handler.
+ *
+ * @note TASK_MIGRATE_TO_MASTER value must be kept in sync with start.S.
*/
enum SysCalls
{
/** task_yield() */
- TASK_YIELD = 0,
+ TASK_YIELD = 0,
/** task_create() */
- TASK_START,
+ TASK_START = 1,
/** task_end() */
- TASK_END,
+ TASK_END = 2,
+ /** task_affinity_migrate_to_master() */
+ TASK_MIGRATE_TO_MASTER = 3,
/** msgq_create() */
- MSGQ_CREATE,
+ MSGQ_CREATE,
/** msgq_destroy() */
- MSGQ_DESTROY,
+ MSGQ_DESTROY,
/** VFS internal */
- MSGQ_REGISTER_ROOT,
+ MSGQ_REGISTER_ROOT,
/** VFS internal */
- MSGQ_RESOLVE_ROOT,
+ MSGQ_RESOLVE_ROOT,
/** msg_send() */
- MSG_SEND,
+ MSG_SEND,
/** msg_sendrecv() */
- MSG_SENDRECV,
+ MSG_SENDRECV,
/** msg_respond() */
- MSG_RESPOND,
+ MSG_RESPOND,
/** msg_wait() */
- MSG_WAIT,
+ MSG_WAIT,
/** mmio_map() */
- MMIO_MAP,
+ MMIO_MAP,
/** mmio_unmap() */
- MMIO_UNMAP,
+ MMIO_UNMAP,
/** dev_map() */
- DEV_MAP,
+ DEV_MAP,
/** dev_unmap() */
- DEV_UNMAP,
+ DEV_UNMAP,
/** nanosleep() */
- TIME_NANOSLEEP,
+ TIME_NANOSLEEP,
/** futex_wait() */
- FUTEX_WAIT,
+ FUTEX_WAIT,
/** futex_wake() */
- FUTEX_WAKE,
+ FUTEX_WAKE,
/** shutdown() */
- MISC_SHUTDOWN,
+ MISC_SHUTDOWN,
/** cpu_core_type() */
- MISC_CPUCORETYPE,
+ MISC_CPUCORETYPE,
/** cpu_dd_level() */
- MISC_CPUDDLEVEL,
+ MISC_CPUDDLEVEL,
/** mm_alloc_block() */
- MM_ALLOC_BLOCK,
+ MM_ALLOC_BLOCK,
- SYSCALL_MAX
+ SYSCALL_MAX
};
/** @enum SysCalls_FastPath
diff --git a/src/include/kernel/types.h b/src/include/kernel/types.h
index a53dd79e9..7b201b0de 100644
--- a/src/include/kernel/types.h
+++ b/src/include/kernel/types.h
@@ -25,11 +25,11 @@
#include <stdint.h>
-typedef uint16_t tid_t; // This is 16-bit for the VMM mapping of
+typedef uint16_t tid_t; // This is 16-bit for the VMM mapping of
// stacks. See VmmManager.
struct task_t;
-typedef uint64_t cpuid_t;
+typedef uint32_t cpuid_t;
struct cpu_t;
#endif
diff --git a/src/include/sys/task.h b/src/include/sys/task.h
index fc1604607..9f43996c4 100644
--- a/src/include/sys/task.h
+++ b/src/include/sys/task.h
@@ -30,7 +30,7 @@
#include <kernel/types.h>
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
@@ -102,8 +102,8 @@ tid_t task_exec(const char* path, void* arg);
/** @fn task_affinity_pin
* @brief Pins a task onto the CPU it is currently executing on.
*
- * This function may be called any number of times and each should be paired
- * with a task_affinity_unpin call. This is so that callers do not need to
+ * This function may be called any number of times and each should be paired
+ * with a task_affinity_unpin call. This is so that callers do not need to
* be concerned with affinity pinning desires of functions above and below in
* a call stack.
*
@@ -121,6 +121,15 @@ void task_affinity_pin();
*/
void task_affinity_unpin();
+/** @fn task_affinity_migrate_to_master
+ * @brief Moves a task from the CPU it is on to the master core/thread.
+ *
+ * Unless the affinity is pinned, the task could be migrated to another
+ * core at any point in time. Suggestion is to use task_affinity_pin
+ * prior to this call.
+ */
+void task_affinity_migrate_to_master();
+
#ifdef __cplusplus
}
#endif
diff --git a/src/include/util/lockfree/atomic_construct.H b/src/include/util/lockfree/atomic_construct.H
new file mode 100644
index 000000000..6946a1c60
--- /dev/null
+++ b/src/include/util/lockfree/atomic_construct.H
@@ -0,0 +1,62 @@
+// IBM_PROLOG_BEGIN_TAG
+// This is an automatically generated prolog.
+//
+// $Source: src/include/util/lockfree/atomic_construct.H $
+//
+// IBM CONFIDENTIAL
+//
+// COPYRIGHT International Business Machines Corp. 2011
+//
+// p1
+//
+// Object Code Only (OCO) source materials
+// Licensed Internal Code Source Materials
+// IBM HostBoot Licensed Internal Code
+//
+// The source code for this program is not published or other-
+// wise divested of its trade secrets, irrespective of what has
+// been deposited with the U.S. Copyright Office.
+//
+// Origin: 30
+//
+// IBM_PROLOG_END
+#ifndef __UTIL_LOCKFREE_ATOMIC_CONSTRUCT_H
+#define __UTIL_LOCKFREE_ATOMIC_CONSTRUCT_H
+
+namespace Util
+{
+ namespace Lockfree
+ {
+
+ /** @brief Atomically construct an object and assign it to a pointer.
+ *
+ * This function will check if a pointer is still NULL and
+ * construct an object, atomically, if needed. If the pointer is
+ * not NULL, the current instance will be left alone.
+ *
+ * @param[in] ptr - Pointer-pointer to the object.
+ *
+ * Example:
+ * Foo* iv_foo; //<--- instance variable in class.
+ * atomic_construct(&iv_foo);
+ *
+ * @note It is possible for multiple instances to temporarily exist,
+ * if this code is called from multiple CPUs, but only the one
+ * instance will exist outside the lifetime of this function.
+ */
+ template<typename _T>
+ void atomic_construct(_T** ptr)
+ {
+ if (__sync_bool_compare_and_swap(ptr, NULL, NULL))
+ {
+ _T* instance = new _T();
+ if (!__sync_bool_compare_and_swap(ptr, NULL, instance))
+ {
+ delete instance;
+ }
+ }
+ }
+ };
+};
+
+#endif
OpenPOWER on IntegriCloud