From 5f63cb3eb8f54827a80424dd346620fbf279ef6f Mon Sep 17 00:00:00 2001 From: daney Date: Wed, 22 Oct 2003 16:35:17 +0000 Subject: 2003-10-22 David Daney */ + +/* + Although this in theory could be 'C' instead of C++, saying that it + is C++ and including jvm.h makes it easier to insure that the proper + compiler options are used. There must be unwind tables for + backtrace because it is on the stack when _Unwind_Backtrace is + called. Compiling as C++ insures this. + +*/ + +#include + +#include + +#include + + +extern "C" +{ + int backtrace (void **, int); +} + +struct backtrace_state +{ + int skip_count; + int current_level; + int max_level; + void **locations; +}; + +static _Unwind_Reason_Code +my_trace_fn (struct _Unwind_Context *uc, void *arg) +{ + + struct backtrace_state *bs = (struct backtrace_state *) arg; + + if (bs->skip_count) + { + bs->skip_count--; + return _URC_NO_REASON; + } + + _Unwind_Ptr loc = _Unwind_GetIP (uc); + + if (bs->current_level < bs->max_level) + bs->locations[bs->current_level++] = (void *) loc; + + if (bs->current_level >= bs->max_level) + return _URC_END_OF_STACK; + else + return _URC_NO_REASON; +} + +/* + * backtrace is defined in (some versions of) libc. This definition + * must match so that it can replace the libc version at link time. + * + * Fill the locations array with at most len back trace locations. + * + * Returns the number of locations actually filled in. + * + */ +int +backtrace (void **locations, int len) +{ + struct backtrace_state bs; + bs.skip_count = 1; /* Don't log the call to backtrace itself. */ + bs.current_level = 0; + bs.max_level = len; + bs.locations = locations; + + _Unwind_Backtrace (my_trace_fn, &bs); + return bs.current_level; +} diff --git a/libjava/sysdep/mips/locks.h b/libjava/sysdep/mips/locks.h new file mode 100644 index 00000000000..80509cadbbd --- /dev/null +++ b/libjava/sysdep/mips/locks.h @@ -0,0 +1,109 @@ +// locks.h - Thread synchronization primitives. MIPS implementation. + +/* Copyright (C) 2003 Free Software Foundation + + This file is part of libgcj. + +This software is copyrighted work licensed under the terms of the +Libgcj License. Please consult the file "LIBGCJ_LICENSE" for +details. */ + +#ifndef __SYSDEP_LOCKS_H__ +#define __SYSDEP_LOCKS_H__ + +/* Integer type big enough for object address. */ +typedef unsigned obj_addr_t __attribute__((__mode__(__pointer__))); + + +// Atomically replace *addr by new_val if it was initially equal to old. +// Return true if the comparison succeeded. +// Assumed to have acquire semantics, i.e. later memory operations +// cannot execute before the compare_and_swap finishes. +inline static bool +compare_and_swap(volatile obj_addr_t *addr, + obj_addr_t old, + obj_addr_t new_val) +{ + long result; + __asm__ __volatile__(".set\tpush\n\t" + ".set\tnoreorder\n\t" + ".set\tnomacro\n\t" + "1:\n\t" +#if _MIPS_SIM == _ABIO32 + ".set\tmips2\n\t" +#endif + "ll\t%[result],0(%[addr])\n\t" + "bne\t%[result],%[old],2f\n\t" + "move\t%[result],$0\n\t" // delay slot + "move\t%[result],%[new_val]\n\t" + "sc\t%[result],0(%[addr])\n\t" + "beq\t%[result],$0,1b\n\t" + "nop\n\t" // delay slot + "2:\n\t" + ".set\tpop" + : [result] "=&r" (result) + : [addr] "r" (addr), [new_val] "r" (new_val), [old] "r"(old) + : "memory"); + return (bool) result; +} + +// Set *addr to new_val with release semantics, i.e. making sure +// that prior loads and stores complete before this +// assignment. +inline static void +release_set(volatile obj_addr_t *addr, obj_addr_t new_val) +{ + __asm__ __volatile__(".set\tpush\n\t" +#if _MIPS_SIM == _ABIO32 + ".set\tmips2\n\t" +#endif + "sync\n\t" + ".set\tpop" : : : "memory"); + *(addr) = new_val; +} + +// Compare_and_swap with release semantics instead of acquire semantics. +// On many architecture, the operation makes both guarantees, so the +// implementation can be the same. +inline static bool +compare_and_swap_release(volatile obj_addr_t *addr, + obj_addr_t old, + obj_addr_t new_val) +{ + __asm__ __volatile__(".set\tpush\n\t" +#if _MIPS_SIM == _ABIO32 + ".set\tmips2\n\t" +#endif + "sync\n\t" + ".set\tpop" : : : "memory"); + return compare_and_swap(addr, old, new_val); +} + +// Ensure that subsequent instructions do not execute on stale +// data that was loaded from memory before the barrier. +// On X86, the hardware ensures that reads are properly ordered. +inline static void +read_barrier() +{ + __asm__ __volatile__(".set\tpush\n\t" +#if _MIPS_SIM == _ABIO32 + ".set\tmips2\n\t" +#endif + "sync\n\t" + ".set\tpop" : : : "memory"); +} + +// Ensure that prior stores to memory are completed with respect to other +// processors. +inline static void +write_barrier() +{ + __asm__ __volatile__(".set\tpush\n\t" +#if _MIPS_SIM == _ABIO32 + ".set\tmips2\n\t" +#endif + "sync\n\t" + ".set\tpop" : : : "memory"); +} + +#endif // __SYSDEP_LOCKS_H__ -- cgit v1.2.3