/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * This source file is part of SableVM. * * * * See the file "LICENSE" for the copyright information and for * * the terms and conditions for copying, distribution and * * modification of this source file. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* on Linux/Intel, we'll need to put FPU into double precision mode */ #if defined(__i386__) && defined(__linux__) #include #endif #if defined (HAS_SYSTEM_CLEAR_CACHE) /* A clever hack to avoid time consuming writing of an optimized * cache flush instruction for every architecture in the world. * Unfotunatelly it won't work w/o GCC (GBP) */ extern void __clear_cache (char *beg, char *end); #endif /* defined (HAS_SYSTEM_CLEAR_CACHE) */ /* Beware: Architecture-specific pieces are in *alphabetical* order */ #if (( defined (__sparc__) || defined (__ia64__) || defined (__alpha__) \ || defined (__i386__) || defined (__powerpc__) || defined (__s390__) \ || defined (__hppa__) || defined (__arm__) || defined (__m68k__) \ || defined (__mc68000__) || defined (__mips__) || defined (__mipsel__) \ || defined (__x86_64__) || defined (_POWER) \ ) && defined (__GNUC__)) /* ---------------------------------------------------------------------- _svmh_compare_and_swap ---------------------------------------------------------------------- */ /* this function ATOMICALLY does the following: if (*pword == old_value) { *pword = new_value; return 1; } else { return 0; } */ inline jboolean _svmh_compare_and_swap (volatile _svmt_word *pword, _svmt_word old_value, _svmt_word new_value) { /* Yes, some inline assembly source code... Unfortunaltely, this cannot be expressed in C. */ #if defined (__alpha__) register _svmt_word result, tmp; /* *INDENT-OFF* */ __asm__ __volatile__ ("1: mb\n\t" /* make sure */ " ldq_l %1,%4\n\t" /* load *pword into tmp (reg,<= mem) */ " cmpeq %1,%5,%0\n\t" /* result = (*pword == tmp) */ " beq %0,3f\n\t" /* nothing to do if they differ(0) - jump away */ " mov %3,%1\n\t" /* copy tmp<=new so that we didnt lose it */ " stq_c %1,%4\n\t" /* *pword = new_value (reg,=> mem) */ " beq %1,2f\n\t" /* store could fail! (%1 overwritten!) */ " mb\n\t" /* make sure */ " br 3f\n\t" /* were done */ "2: br 1b\n\t" /* goto "again" */ "3: nop" :"=&r" (result), "=&r" (tmp), "=m" (*pword) :"r" (new_value), "m" (*pword), "r" (old_value)); /* *INDENT-ON* */ #elif defined (__hppa__) /* The only atomic insn available on hppa is "load and clear"; * locks are initialized to 1 on hppa. In this simple implementation * we use a global lock to make compare-and-swap atomic */ #define __ldcw(a) ({ \ unsigned int __ret; \ __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ __ret; \ }) static struct { volatile unsigned int __attribute__ ((aligned (16))) lock; } lock; static int lock_initialized = 0; int result; if (!lock_initialized) { lock.lock = 1; } /* spinlock */ while (__ldcw (&lock.lock) == 0); if (*pword == old_value) { *pword = new_value; result = 1; } else { result = 0; } /* unlock */ lock.lock = 1; /* Prevent reordering */ __asm__ __volatile__ ("":::"memory"); #elif defined (__i386__) /* On the ia32, cmpxchgl has a side effect. When swapping fails, the following variable contains the value that is currently in *pword (presumably different from old_value). */ _svmt_word current_value; _svmt_u8 result; /* *INDENT-OFF* */ __asm__ __volatile__ ("lock\n\t" "cmpxchgl %3, %1\n\t" "sete %0" :"=q" (result), "=m" (*pword), "=a" (current_value) :"r" (new_value), "m" (*pword), "a" (old_value) :"memory"); /* *INDENT-ON* */ #elif defined (__ia64__) #include jboolean result; result = __sync_bool_compare_and_swap (pword, old_value, new_value); #elif defined (__x86_64__) _svmt_word current_value; _svmt_u8 result; /* *INDENT-OFF* */ __asm__ __volatile__ ("lock\n\t" "cmpxchgq %3, %1\n\t" "sete %0" :"=q" (result), "=m" (*pword), "=a" (current_value) :"r" (new_value), "m" (*pword), "a" (old_value) :"memory"); /* *INDENT-ON* */ #else #error "SableVM is not prepared to run on this kind of a system; no atomic compare&swap defined." #endif /* defined (__alpha__) */ return result ? JNI_TRUE : JNI_FALSE; } #endif /* (( defined (__sparc__) || ...) && defined (__GNUC__)) */ /* ---------------------------------------------------------------------- _svmf_set_fpu_double_precision_mode ---------------------------------------------------------------------- */ void _svmf_set_fpu_double_precision_mode(void) { /* on Linux/Intel, put FPU into double precision mode */ #if defined(__i386__) && defined(__linux__) /* This puts the X86 FPU in 64-bit precision mode as opposed to the default 80-bit mode (used by Linux). For more explanations see: http://www.srware.com/linux_numerics.txt Originally it was put into lib_init.c but apparently some code executed later resets the flag so we set it to "right" value on each interpreter invocation. TODO: find out where/what exacly resets the flag. */ { fpu_control_t fpu_control; _FPU_GETCW (fpu_control); fpu_control &= ~_FPU_EXTENDED; fpu_control |= _FPU_DOUBLE; _FPU_SETCW (fpu_control); } #endif /* i386 && linux */ } /* ---------------------------------------------------------------------- _svmf_order_d64_bits ---------------------------------------------------------------------- */ _svmt_u64 _svmf_order_d64_bits(_svmt_u64 value) { #if defined(__arm__) /* On ARM 'double' values have their two 32bit parts reversed. */ _svmt_u64 result, result1, result2; result1 = result & 0xffffffff; result2 = result >> 32; result = (result1 << 32) | result2; return result; #else return value; #endif } /* ---------------------------------------------------------------------- _svmf_aligned_size_t ---------------------------------------------------------------------- */ inline size_t _svmf_aligned_size_t (size_t size) { return (size + (SVM_ALIGNMENT - 1)) & ~((size_t) (SVM_ALIGNMENT - 1)); }