/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * This source file is part of SableVM. * * * * See the file "LICENSE" for the copyright information and for * * the terms and conditions for copying, distribution and * * modification of this source file. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Thread specific data */ static pthread_key_t _svmv_env_key; /* key to access thread specific environment data */ /* stack_offset */ static const size_t _svmv_stack_offset = (sizeof (_svmt_stack_frame) + (SVM_ALIGNMENT - 1)) & ~((size_t) (SVM_ALIGNMENT - 1)); /* ---------------------------------------------------------------------- _svmf_thread_init ---------------------------------------------------------------------- */ svm_static jint _svmf_thread_init (void) { /* create key for thread specific data */ if (pthread_key_create (&_svmv_env_key, NULL) != 0) { goto error; } return JNI_OK; error: return JNI_ERR; } /* ---------------------------------------------------------------------- _svmf_get_current_env ---------------------------------------------------------------------- */ static _svmt_JNIEnv * _svmf_get_current_env (void) { return (_svmt_JNIEnv *) pthread_getspecific (_svmv_env_key); } /* ---------------------------------------------------------------------- _svmf_set_current_env ---------------------------------------------------------------------- */ static void _svmf_set_current_env (_svmt_JNIEnv *env) { #ifndef NDEBUG int error = #endif pthread_setspecific (_svmv_env_key, env); assert (!error); } /* ---------------------------------------------------------------------- _svmf_stack_init_defaults ---------------------------------------------------------------------- */ svm_static void _svmf_stack_init_defaults (_svmt_JavaVM *vm) { vm->stack_min_size = SVM_STACK_DEFAULT_MIN_SIZE; vm->stack_max_size = SVM_STACK_DEFAULT_MAX_SIZE; vm->stack_allocation_increment = SVM_STACK_DEFAULT_ALLOCATION_INCREMENT; } /* ---------------------------------------------------------------------- _svmf_stack_init ---------------------------------------------------------------------- */ svm_static jint _svmf_stack_init (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; size_t lrefs_offset = _svmf_aligned_size_t (sizeof (_svmt_stack_frame)); jint lrefs_count = SVM_FRAME_NATIVE_REFS_MIN; size_t lrefs_size = _svmf_aligned_size_t ((lrefs_count + 2) * sizeof (_svmt_stack_native_reference)); size_t frame_size = lrefs_offset + lrefs_size; size_t alloc_size; if (frame_size <= vm->stack_min_size) { alloc_size = vm->stack_min_size; } else if (vm->stack_max_size == 0 || frame_size <= vm->stack_max_size) { alloc_size = vm->stack_min_size + _svmf_aligned_to_increment (frame_size - vm->stack_min_size, vm->stack_allocation_increment); } else { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } env->stack.start = _svmf_malloc (alloc_size); if (env->stack.start == NULL) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } env->stack.current_frame = env->stack.start; env->stack.end = ((char *) env->stack.start) + alloc_size; env->stack.current_frame->previous_offset = 0; env->stack.current_frame->end_offset = frame_size; env->stack.current_frame->method = &vm->stack_bottom_method; env->stack.current_frame->stack_trace_element = NULL; env->stack.current_frame->lock_count = 0; env->stack.current_frame->this = NULL; env->stack.current_frame->pc = vm->stack_bottom_method.frame_info->code; env->stack.current_frame->stack_size = 0; { _svmt_stack_native_reference *lrefs = (_svmt_stack_native_reference *) (((char *) env->stack.current_frame) + env->stack.current_frame->end_offset); jint i; lrefs[-1].jint = lrefs_count; lrefs[-2].size_t = lrefs_size; lrefs = (_svmt_stack_native_reference *) (((char *) lrefs) - lrefs_size); for (i = 0; i < lrefs_count; i++) { if (_svmm_new_native_local (env, lrefs[i].jobject) != JNI_OK) { return JNI_ERR; } } } return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_ensure_stack_capacity ---------------------------------------------------------------------- */ svm_static jint _svmf_ensure_stack_capacity (_svmt_JNIEnv *env, size_t frame_size) { void *current_frame = env->stack.current_frame; void *current_frame_end = ((char *) current_frame) + env->stack.current_frame->end_offset; void *stack_end = env->stack.end; size_t available = ((char *) stack_end) - ((char *) current_frame_end); assert (stack_end >= current_frame_end); #ifdef STATISTICS { _svmt_JavaVM *vm = env->vm; void *stack_start = env->stack.start; _svmt_stack_frame *frame = (_svmt_stack_frame *) current_frame; if (((char *) frame) - ((char *) stack_start) > vm->max_stack_size) { _svmt_method_info *method = env->stack.current_frame->method; vm->max_stack_size = ((char *) frame) - ((char *) stack_start); vm->stack_local_count = 0; vm->stack_local_split_count = 0; while (method != &vm->stack_bottom_method) { if (!_svmf_is_set_flag (method->access_flags, SVM_ACC_INTERNAL)) { vm->stack_local_count += method->frame_info->local_count; vm->stack_local_split_count += method->frame_info->local_split_count; } frame = (_svmt_stack_frame *) (((char *) frame) - frame->previous_offset); method = frame->method; } } } #endif if (frame_size > available) { _svmt_JavaVM *vm = env->vm; size_t stack_increment = vm->stack_allocation_increment; if (stack_increment == 0) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } else { size_t min_growth = frame_size - available; void *stack_start = env->stack.start; size_t current_frame_offset = ((char *) current_frame) - ((char *) stack_start); /* growth sould be a multiple of stack_increment large enough to hold min_growth */ size_t growth = _svmf_aligned_to_increment (min_growth, stack_increment); size_t current_size = ((char *) stack_end) - ((char *) stack_start); size_t new_size = current_size + growth; void *new_stack; /* detect overflows */ if ((vm->stack_max_size != 0 && new_size > vm->stack_max_size) || new_size <= current_size) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } new_stack = _svmf_realloc (stack_start, new_size); /* out of memory */ if (new_stack == NULL) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } env->stack.start = new_stack; env->stack.end = ((char *) new_stack) + new_size; env->stack.current_frame = (_svmt_stack_frame *) (((char *) new_stack) + current_frame_offset); } } return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_halt_if_requested ---------------------------------------------------------------------- */ /* IMPORTANT: The calling thread should hold the lock on vm->global_mutex when calling this function. */ svm_static void _svmf_halt_if_requested (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; /* is stopping the world requested by another thread? */ while (env->thread_status == SVM_THREAD_STATUS_HALT_REQUESTED) { env->thread_status = SVM_THREAD_STATUS_HALTED; vm->pending_halt_thread_count--; /* is there any other thread pending halt? */ if (vm->pending_halt_thread_count == 0) { /* no, so signal requesting thread */ _svmm_cond_signal (vm->requesting_thread_cond); } /* halt thread */ do { _svmm_cond_wait (vm->halted_threads_cond, vm->global_mutex); } while (env->thread_status == SVM_THREAD_STATUS_HALTED); } assert (env->thread_status == SVM_THREAD_STATUS_RUNNING_JAVA); } /* ---------------------------------------------------------------------- _svmf_periodic_check_halt_requested ---------------------------------------------------------------------- */ svm_static void _svmf_periodic_check_halt_requested (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; _svmm_mutex_lock (vm->global_mutex); _svmf_halt_if_requested (env); _svmm_mutex_unlock (); } /* ---------------------------------------------------------------------- _svmf_periodic_check ---------------------------------------------------------------------- */ inline svm_static void _svmf_periodic_check (_svmt_JNIEnv *env) { #ifndef NDEBUG jint status = env->thread_status; assert (status == SVM_THREAD_STATUS_RUNNING_JAVA || status == SVM_THREAD_STATUS_HALT_REQUESTED); #endif /* is stopping the world requested by another thread? */ if (env->thread_status == SVM_THREAD_STATUS_HALT_REQUESTED) { _svmt_JavaVM *vm = env->vm; _svmm_mutex_lock (vm->global_mutex); _svmf_halt_if_requested (env); _svmm_mutex_unlock (); } } /* ---------------------------------------------------------------------- _svmf_stop_the_world ---------------------------------------------------------------------- */ /* IMPORTANT: The calling thread should hold the lock on vm->global_mutex when calling this function. Once the world is stopped, the calling thread should release the lock until just prior the call to resume_the_world. */ svm_static void _svmf_stop_the_world (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; jint i; /* if another thread beat us to it, halt */ _svmf_halt_if_requested (env); /* ok, now we can proceed */ assert (env->thread_status == SVM_THREAD_STATUS_RUNNING_JAVA); assert (vm->pending_halt_thread_count == 0); for (i = 0; i < 2; i++) { _svmt_JNIEnv *current; /* visit all threads */ for (current = ((i == 0) ? vm->threads.user : vm->threads.system); current != NULL; current = current->next) { jboolean succeeded; /* skip the running thread */ if (current == env) { continue; } /* request halt */ do { switch (current->thread_status) { case SVM_THREAD_STATUS_RUNNING_JAVA: { succeeded = _svmm_compare_and_swap (current->thread_status, SVM_THREAD_STATUS_RUNNING_JAVA, SVM_THREAD_STATUS_HALT_REQUESTED); if (succeeded) { vm->pending_halt_thread_count++; } } break; case SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED: { succeeded = _svmm_compare_and_swap (current->thread_status, SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED, SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_DISALLOWED); } break; default: { succeeded = JNI_FALSE; _svmm_fatal_error ("impossible control flow"); } break; } } while (!succeeded); } } /* wait for other threads to halt */ while (vm->pending_halt_thread_count != 0) { _svmm_cond_wait (vm->requesting_thread_cond, vm->global_mutex); } /* the world is stopped! we can resume... */ return; } /* ---------------------------------------------------------------------- _svmf_resume_the_world ---------------------------------------------------------------------- */ /* IMPORTANT: The calling thread should acquire the lock on vm->global_mutex before calling this function. Of course, this function should not be invoked unless the world has been already stopped by the calling thread. */ svm_static void _svmf_resume_the_world (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; jint i; assert (env->thread_status == SVM_THREAD_STATUS_RUNNING_JAVA); assert (vm->pending_halt_thread_count == 0); for (i = 0; i < 2; i++) { _svmt_JNIEnv *current; /* visit all threads */ for (current = ((i == 0) ? vm->threads.user : vm->threads.system); current != NULL; current = current->next) { /* skip the running thread */ if (current == env) { continue; } /* disable halt */ switch (current->thread_status) { case SVM_THREAD_STATUS_HALTED: { current->thread_status = SVM_THREAD_STATUS_RUNNING_JAVA; } break; case SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_DISALLOWED: { current->thread_status = SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED; } break; default: { _svmm_fatal_error ("impossible control flow"); } break; } } } /* wake all halted threads */ _svmm_cond_broadcast (vm->halted_threads_cond); /* the world will resume execution as soon as the calling thread releases the lock... */ return; } /* ---------------------------------------------------------------------- _svmf_stopping_java ---------------------------------------------------------------------- */ svm_static void _svmf_stopping_java (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; #ifndef NDEBUG jint status = env->thread_status; assert (status == SVM_THREAD_STATUS_RUNNING_JAVA || status == SVM_THREAD_STATUS_HALT_REQUESTED); #endif if (!_svmm_compare_and_swap (env->thread_status, SVM_THREAD_STATUS_RUNNING_JAVA, SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED)) { _svmm_mutex_lock (vm->global_mutex); _svmf_halt_if_requested (env); assert (env->thread_status == SVM_THREAD_STATUS_RUNNING_JAVA); env->thread_status = SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED; _svmm_mutex_unlock (); } } /* ---------------------------------------------------------------------- _svmf_resuming_java ---------------------------------------------------------------------- */ svm_static void _svmf_resuming_java (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; #ifndef NDEBUG jint status = env->thread_status; assert (status == SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED || status == SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_DISALLOWED); #endif if (!_svmm_compare_and_swap (env->thread_status, SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED, SVM_THREAD_STATUS_RUNNING_JAVA)) { _svmm_mutex_lock (vm->global_mutex); while (env->thread_status == SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_DISALLOWED) { _svmm_cond_wait (vm->halted_threads_cond, vm->global_mutex); } assert (env->thread_status == SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED); env->thread_status = SVM_THREAD_STATUS_RUNNING_JAVA; _svmm_mutex_unlock (); } } /* ---------------------------------------------------------------------- _svmf_recursive_counter_(create|delete|increment|decrement) ---------------------------------------------------------------------- */ /* Note: while calling you should be holding one of these two locks: * env->fatlock.mutex, env->contention.owner.mutex */ svm_static jboolean _svmf_recursive_counter_create (_svmt_JNIEnv *env, _svmt_object_instance *instance, jint recursive_count) { _svmt_recursive_counter_node *counter = env->free_recursive_counters; /* try to reuse an existing one first */ if (counter != NULL) { env->free_recursive_counters = counter->next; *counter->jobject = instance; counter->value = recursive_count; } else { if (_svmm_gzalloc_recusive_counter (env, counter) != JNI_OK) { return JNI_ERR; } if (_svmm_new_native_global (env, counter->jobject) != JNI_OK) { _svmm_gzfree_recusive_counter (counter); return JNI_ERR; } } assert (counter->jobject !=NULL); *counter->jobject = instance; counter->value = recursive_count; _svmm_tree_insert_recursive_counter (env->overflown_recursive_counters, counter); return JNI_OK; /* OOM if JNI_ERR */ } svm_static jint _svmf_recursive_counter_delete (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_recursive_counter_node *counter, wanted_counter; jint recursive_counter; wanted_counter.instance = instance; wanted_counter.jobject = NULL; counter = _svmm_tree_find_recursive_counter (env->overflown_recursive_counters, &wanted_counter); assert (counter != NULL); recursive_counter = counter->value; _svmm_tree_remove_recursive_counter (env->overflown_recursive_counters, counter); counter->next = env->free_recursive_counters; env->free_recursive_counters = counter; /* the last value of deleted counter */ return recursive_counter; } svm_static jint _svmf_recursive_counter_increment (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_recursive_counter_node *counter, wanted_counter; wanted_counter.instance = instance; wanted_counter.jobject = NULL; counter = _svmm_tree_find_recursive_counter (env->overflown_recursive_counters, &wanted_counter); assert (counter != NULL); return ++counter->value; /* new counter value */ } svm_static jint _svmf_recursive_counter_decrement (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_recursive_counter_node *counter, wanted_counter; wanted_counter.instance = instance; wanted_counter.jobject = NULL; counter = _svmm_tree_find_recursive_counter (env->overflown_recursive_counters, &wanted_counter); assert (counter != NULL); return --counter->value; /* new counter value */ } /* ---------------------------------------------------------------------- _svmf_prepare_fatlocks_for_gc ---------------------------------------------------------------------- */ svm_static void _svmf_prepare_fatlocks_for_gc (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; jint i; _svmt_word j, max_id = vm->fat_locks.next_fat_lock_id; _svmt_fat_lock **fat_lock = vm->fat_locks.array; /* The world is stopped, but threads can spuriously wake up and * lookup the trees, so we have to prevent this. */ for (i = 1; i < vm->threads.next_thread_id; i++) { _svmt_JNIEnv *current_env = vm->threads.array[i]; _svmm_mutex_lock_raw (current_env->contention.owner.mutex); } for (j = 0; j < max_id; j++) if (fat_lock[j] != NULL) _svmm_mutex_lock_raw (fat_lock[j]->mutex); } /* ---------------------------------------------------------------------- _svmf_reconstruct_counter_tree ---------------------------------------------------------------------- */ svm_static void _svmf_reconstruct_counter_tree (_svmt_recursive_counter_node ** new_root, _svmt_recursive_counter_node * old_root) { /* We could have used an in-order walker here, but * recusion looks so much cleaner... */ if (old_root == NULL) return; if (old_root->left != NULL) { _svmf_reconstruct_counter_tree (new_root, old_root->left); } if (old_root->right != NULL) { _svmf_reconstruct_counter_tree (new_root, old_root->right); } _svmh_tree_insert_recursive_counter (new_root, old_root); } /* ---------------------------------------------------------------------- _svmf_fixup_fatlocks_after_gc ---------------------------------------------------------------------- */ svm_static void _svmf_fixup_fatlocks_after_gc (_svmt_JNIEnv *env) { jint i; _svmt_word j; _svmt_fat_lock **fat_lock = env->vm->fat_locks.array; _svmt_word max_id = env->vm->fat_locks.next_fat_lock_id; for (i = 1; i < env->vm->threads.next_thread_id; i++) { _svmt_JNIEnv *current_env = env->vm->threads.array[i]; _svmt_recursive_counter_node *old_counter_tree; old_counter_tree = current_env->overflown_recursive_counters; current_env->overflown_recursive_counters = NULL; _svmf_reconstruct_counter_tree (¤t_env->overflown_recursive_counters, old_counter_tree); _svmm_mutex_unlock_raw (current_env->contention.owner.mutex); } for (j = 0; j < max_id; j++) if (fat_lock[j] != NULL) _svmm_mutex_lock_raw (fat_lock[j]->mutex); } /* ---------------------------------------------------------------------- _svmf_get_fatlock_fast ---------------------------------------------------------------------- */ svm_static inline _svmt_fat_lock * _svmf_get_fatlock_fast (_svmt_JNIEnv *env) { _svmt_fat_lock *fat_lock; _svmm_debug_synchronization (env, "FATLOCK %d GET by %d\n", fat_lock->id, env->thread.id, NULL, NULL); #ifdef STATISTICS env->vm->total_fatlocks_get_count++; #endif if ((fat_lock = env->fat_lock_cached) != NULL) { env->fat_lock_cached = NULL; #ifdef STATISTICS env->vm->total_fatlocks_get_cached_count++; #endif return fat_lock; } if ((fat_lock = _svmf_fatlock_get (env)) != NULL) { return fat_lock; } /* we need to create a new fat lock */ { _svmt_word id; if (_svmm_gzalloc_fat_lock_no_exception (fat_lock) != JNI_OK) { goto error; } /* increment global ID no. */ while (!_svmm_compare_and_swap (env->vm->fat_locks.next_fat_lock_id, id = env->vm->fat_locks.next_fat_lock_id, env->vm->fat_locks.next_fat_lock_id + 1)) { } if (id > SVM_MAX_FATLOCK_ID) { _svmm_gzfree_fat_lock_no_exception (fat_lock); goto error; } /* initialize */ if (_svmm_new_native_local (env, fat_lock->jobject) != JNI_OK) { _svmm_gzfree_fat_lock_no_exception (fat_lock); goto error; } _svmm_mutex_init (fat_lock->mutex); _svmm_cond_init (fat_lock->cond); _svmm_cond_init (fat_lock->notification_cond); fat_lock->id = id; /* register only after the new lock is usable (due to GC time works) */ env->vm->fat_locks.array[id] = fat_lock; #ifdef STATISTICS env->vm->total_fatlocks_created_count++; #endif return fat_lock; } error: return NULL; } /* ---------------------------------------------------------------------- _svmf_put_fatlock_fast ---------------------------------------------------------------------- */ svm_static inline void _svmf_put_fatlock_fast (_svmt_JNIEnv *env, _svmt_fat_lock *fat_lock) { _svmm_debug_synchronization (env, "FATLOCK %d PUT by %d\n", fat_lock->id, env->thread.id, NULL, NULL); #ifdef STATISTICS env->vm->total_fatlocks_put_count++; #endif if (env->fat_lock_cached == NULL) { #ifdef STATISTICS env->vm->total_fatlocks_put_cached_count++; #endif env->fat_lock_cached = fat_lock; } else { _svmf_fatlock_put (env, fat_lock); } } /* ---------------------------------------------------------------------- _svmf_inflate_lock_no_exception ---------------------------------------------------------------------- */ svm_static jint _svmf_inflate_lock_no_exception (_svmt_JNIEnv *env, _svmt_object_instance *instance, int threads_to_come_no) { jint recursive_count; _svmt_fat_lock *fat_lock; if ((fat_lock = _svmf_get_fatlock_fast (env)) == NULL) { return JNI_ERR; /* OOM */ } _svmm_mutex_lock (fat_lock->mutex); fat_lock->threads_to_come_no = threads_to_come_no; *fat_lock->jobject = instance; _svmm_debug_synchronization (env, "inflating thinlock (%d) owned by (%d) to fatlock %d", _svmf_lockword_get_thinlock_id (instance->lockword), env->thread.id, fat_lock->id, NULL); #ifdef STATISTICS env->vm->total_lock_inflations_count++; #endif assert (_svmf_lockword_get_thinlock_id (instance->lockword) == env->thread.thinlock_id); assert (fat_lock->recursive_count == 0); assert (fat_lock->owner == NULL); /* Note that we are NOT incrementing the recursive count * in the following code. A thin lock recursive count of * 0 is equivalent to a fat lock recursive count of 1. */ recursive_count = _svmf_lockword_get_thinlock_recursive_count (instance->lockword); if (recursive_count == SVM_THINLOCK_MAX_RECURSIVE_COUNT) { fat_lock->recursive_count = _svmf_recursive_counter_delete (env, instance); } else { fat_lock->recursive_count = recursive_count + 1; } /* change lockword to fat */ fat_lock->owner = env; instance->lockword = _svmf_lockword_fatlock (fat_lock->id, _svmf_lockword_get_extra_bits (instance->lockword)); _svmm_mutex_unlock (); return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_deflate_lock_no_exception ---------------------------------------------------------------------- */ svm_static void _svmf_deflate_lock_no_exception (_svmt_JNIEnv *env, _svmt_fat_lock *fat_lock, _svmt_object_instance *instance) { /* Note that we are NOT decrementing the recursive count in the * following code. A thin lock recursive count of 0 is equivalent * to a fat lock recursive count of 1. */ jint recursive_count = fat_lock->recursive_count - 1; assert (recursive_count >= -1); #ifdef STATISTICS env->vm->total_lock_deflations_count++; #endif /* put recursive_count back into thinlock */ if (recursive_count >= SVM_THINLOCK_MAX_RECURSIVE_COUNT) { /* Overflow! Put the counter into tree of counters. */ _svmf_recursive_counter_create (env, instance, recursive_count); recursive_count = SVM_THINLOCK_MAX_RECURSIVE_COUNT; } /* remove the one left blocked_thread (if it exists) */ assert ((fat_lock->blocked_tree == NULL) || ((fat_lock->blocked_tree->left == NULL) && (fat_lock->blocked_tree->right == NULL))); fat_lock->blocked_tree = NULL; fat_lock->threads_blocked_no = 0; fat_lock->threads_to_come_no = 0; /* _svmm_debug_synchronization (env, "Deflating lockword(%d): thinlock_id(%d), rec_count(%d), extra(%d)", instance->lockword, env->thread.thinlock_id, recursive_count, _svmf_lockword_get_extra_bits (instance->lockword)); */ if (recursive_count == -1) { /* we're releasing the lock */ instance->lockword = _svmf_lockword_get_extra_bits (instance->lockword); } else { instance->lockword = _svmf_lockword_thinlock (env->thread.thinlock_id, recursive_count, _svmf_lockword_get_extra_bits (instance->lockword)); } _svmm_debug_synchronization (env, "Deflated fatlock (%d) to thinlock (%d) of object (%d)", fat_lock->thread.id, instance->lockword, instance, NULL); fat_lock->recursive_count = 0; fat_lock->owner = NULL; *fat_lock->jobject = NULL; /* NOTE: the caller must put the fatlock back after releasing * fatlock's mutex */ } /* ---------------------------------------------------------------------- _svmf_handle_contention ---------------------------------------------------------------------- */ svm_static jint _svmf_handle_contention (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_JNIEnv *current; _svmt_JavaVM *vm = env->vm; _svmt_fat_lock **fat_lock_array = vm->fat_locks.array; current = env->contention.owner.wait_list; /* We wake up all of the contending threads: * 1. all those who waited to lock the object we've just released * 2. and those who waited on an object's lock that got/is inflated * 3. for all others we first inflate lock of object they're waiting * to lock, then we signal them too. */ #ifndef NDEBUG if (_svmf_lockword_is_thin (instance->lockword)) { _svmm_debug_synchronization (env, "handling contention on thinlock (%d) of obj (%d) I've freed", _svmf_lockword_get_thinlock_id (instance->lockword), instance, NULL, NULL); } else { _svmm_debug_synchronization (env, "handling contention on fatlock (%d) owned by this thread", _svmf_lockword_get_fatlock_index (instance->lockword), NULL, NULL, NULL); } #endif #ifdef STATISTICS vm->total_lock_handle_contentions_count++; #endif while (current != NULL) { _svmm_debug_synchronization (env, "handling contention: current is thread %d", current->thread.id, NULL, NULL, NULL); if ((*(current->contention.requester.jobject)) != instance) { _svmt_word lockword = (*(current->contention.requester.jobject))->lockword; if (_svmf_lockword_is_thin (lockword)) { /* we must own the conteded object */ assert (_svmf_lockword_get_thinlock_id (lockword) == env->thread.thinlock_id); if (JNI_OK != _svmf_inflate_lock_no_exception (env, *(current->contention.requester.jobject), 1)) { return JNI_ERR; /* OOM */ } } else { jint fat_id = _svmf_lockword_get_fatlock_index (lockword); _svmt_fat_lock *fat_lock = fat_lock_array[fat_id]; /* we must own the fat lock */ assert (fat_lock->owner == env); fat_lock->threads_to_come_no++; } } _svmm_cond_signal (current->contention.requester.cond); current = current->contention.requester.wait_list_next; } env->contention.owner.wait_list = NULL; env->contention.owner.flag = 0; return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_enter_object_monitor ---------------------------------------------------------------------- */ inline svm_static jint _svmf_enter_object_monitor (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_JavaVM *vm = env->vm; _svmt_word old_lockword; #ifndef NDEBUG const char *type_name = instance->vtable->type->name; #endif assert (instance != NULL); retry: old_lockword = instance->lockword; assert (env->thread_status != SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_ALLOWED); assert (env->thread_status != SVM_THREAD_STATUS_NOT_RUNNING_JAVA_RESUMING_DISALLOWED); { _svmt_word new_lockword = env->thread.thinlock_id | _svmf_lockword_get_extra_bits (old_lockword); if (_svmm_compare_and_swap (instance->lockword, _svmf_lockword_get_extra_bits (old_lockword), new_lockword)) { #ifdef STATISTICS env->vm->total_lock_thin_acquisitions_count++; #endif /* thin lock acquired */ return JNI_OK; } } /* Either the object is already locked, or the lock is inflated. */ if (_svmf_lockword_is_thin (old_lockword)) { /* it is a thin lock */ if (_svmf_lockword_get_thinlock_id (old_lockword) == env->thread.thinlock_id) { /* the thinlock is already owned by the current thread */ jint recursive_count = _svmf_lockword_get_thinlock_recursive_count (old_lockword); assert (recursive_count <= SVM_THINLOCK_MAX_RECURSIVE_COUNT); if (++recursive_count <= SVM_THINLOCK_MAX_RECURSIVE_COUNT) { instance->lockword = _svmf_lockword_thinlock (env->thread.thinlock_id, recursive_count, _svmf_lockword_get_extra_bits (old_lockword)); } if (recursive_count == SVM_THINLOCK_MAX_RECURSIVE_COUNT) { jboolean status; /* Overflow! Put the counter into tree of counters. */ _svmm_mutex_lock (env->contention.owner.mutex); status = _svmf_recursive_counter_create (env, instance, recursive_count); _svmm_mutex_unlock (); if (status != JNI_OK) { /* out of memory exception pending */ return JNI_ERR; } } else if (recursive_count > SVM_THINLOCK_MAX_RECURSIVE_COUNT) { jint new_recursive_count; /* Counter already in the tree of counters. */ _svmm_mutex_lock (env->contention.owner.mutex); new_recursive_count = _svmf_recursive_counter_increment (env, instance); _svmm_mutex_unlock (); if (new_recursive_count < 0) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } } return JNI_OK; /* think lock re-entered */ } else { /* contention: the thinlock is owned by another thread */ jint owner_id = _svmf_lockword_get_thread_id (old_lockword); _svmt_JNIEnv *owner = vm->threads.array[owner_id]; _svmt_word old_flag; jboolean notified; /* If we read "old_lockword" before the thinlock was actually acquired by another thread, retry. */ if (owner == NULL) { goto retry; } /* notify owning thread that inflation is requested */ _svmm_mutex_lock (owner->contention.owner.mutex); old_flag = owner->contention.owner.flag; owner->contention.owner.flag = 1; if (_svmf_lockword_is_thin (instance->lockword) && (_svmf_lockword_get_thinlock_id (instance->lockword) == owner->thread.thinlock_id)) { /* the lock is thin and thinlock owner is still the same * and has been notified */ notified = JNI_TRUE; /* add this thread into the lock owner's wait_list */ env->contention.requester.wait_list_next = owner->contention.owner.wait_list; owner->contention.owner.wait_list = env; *(env->contention.requester.jobject) = instance; } else { /* the owner has changed, or thinlock inflated, * so restore the contention flag to its original value */ notified = JNI_FALSE; owner->contention.owner.flag = old_flag; } _svmm_mutex_unlock (); /* if the owner has changed, retry from scratch */ if (!notified) { /* retry locking */ goto retry; } /* This one is tricky! To avoid deadlocks, transition to stopping java must be done while the owner's contention mutex is released. */ _svmf_stopping_java (env); /* IMPORTANT: From now on, we MUST NOT read or write any value which can be changed by GC or any other "stop the word" dependent operation. */ /* transition made; reacquire lock */ _svmm_mutex_lock (owner->contention.owner.mutex); /* while thread is still on the owner's wait list or wait tree, * then wait */ _svmm_debug_synchronization (env, "thinlock of obj (%d) owned by another thread (%d)", instance, owner->thread.id, NULL, NULL); do { _svmt_JNIEnv *current = owner->contention.owner.wait_list; while (current != NULL && current != env) { /* search wait_list for yourself */ current = current->contention.requester.wait_list_next; } if (current == NULL) { break; } _svmm_debug_synchronization (env, "will sleep on thinlock contention with (%d) " "on object (%d)", owner->thread.id, instance, NULL, NULL); _svmm_cond_wait (env->contention.requester.cond, owner->contention.owner.mutex); } while (JNI_TRUE); _svmm_debug_synchronization (env, "thinlock (%d) contention resolved", _svmf_lockword_get_thinlock_id (instance->lockword), NULL, NULL, NULL); _svmm_mutex_unlock (); _svmf_resuming_java (env); /* GC could have moved things around... */ instance = *(env->contention.requester.jobject); *(env->contention.requester.jobject) = NULL; /* retry locking */ goto retry; } } else { /* it is a fat lock */ jint fat_id = _svmf_lockword_get_fatlock_index (old_lockword); _svmt_fat_lock *fat_lock = vm->fat_locks.array[fat_id]; jboolean registered = JNI_FALSE, deflated, incremented = JNI_FALSE; jint status = JNI_OK; _svmm_debug_synchronization (env, "entering fatlock id (%d), fat_lock (%d)", fat_id, fat_lock, NULL, NULL); /* grab the mutex and proceed */ _svmm_mutex_lock (fat_lock->mutex); /* are we already the owner? */ if (fat_lock->owner == env) { fat_lock->recursive_count++; incremented = JNI_TRUE; /* overflow? */ if (fat_lock->recursive_count < 0) { _svmf_error_OutOfMemoryError (env); /* restore recursive count */ fat_lock->recursive_count--; status = JNI_ERR; } } else { /* we do not own this fatlock - check whether it is still * associated with our object (it *could* have changed, ie. * because of defalation) */ if (*(fat_lock->jobject) == instance) { /* yes: register in blocked_tree, etc. */ _svmm_tree_insert_thread_id (fat_lock->blocked_tree, env->contention.thread_node); if (fat_lock->threads_to_come_no > 0) fat_lock->threads_to_come_no--; fat_lock->threads_blocked_no++; registered = JNI_TRUE; } } _svmm_mutex_unlock (); if (status != JNI_OK) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } if (incremented) { return JNI_OK; } if (!registered) { /* retry locking */ goto retry; } _svmm_debug_synchronization (env, "Contention entering fatlock (%d) on object (%d)", fat_id, instance, NULL, NULL); /* save reference in case GC happens */ *env->contention.requester.jobject = instance; _svmf_stopping_java (env); /* IMPORTANT: From now on, we MUST NOT read or write any value which can be changed by GC or any other "stop the world" dependent operation. */ _svmm_mutex_lock (fat_lock->mutex); /* wait until no other thread owns the lock, or it gets deflated */ while (fat_lock->recursive_count != 0 && _svmm_tree_find_thread_id (fat_lock->blocked_tree, env->contention.thread_node) != NULL) { _svmm_cond_wait (fat_lock->cond, fat_lock->mutex); } /* IDEA/TODO: instead of having whole tree to search thru * we could simply have a counter that is incremented when * lock is deflated. We'd then remember the value of this * counter on our entry and wait until we can acquire the lock * or the counter value is not the one we remembered */ if (_svmm_tree_find_thread_id (fat_lock->blocked_tree, env->contention.thread_node) != NULL) { /* we can acquire the fatlock */ assert (fat_lock->recursive_count == 0); assert (fat_lock->owner == NULL); #ifdef STATISTICS env->vm->total_lock_fat_acquisitions_count++; #endif fat_lock->recursive_count = 1; fat_lock->owner = env; /* remove us from blocked_tree, etc. */ _svmm_tree_remove_thread_id (fat_lock->blocked_tree, env->contention.thread_node); fat_lock->threads_blocked_no--; deflated = JNI_FALSE; } else { /* fatlock we've been waiting on got deflated */ deflated = JNI_TRUE; } _svmm_mutex_unlock (); _svmf_resuming_java (env); /* restore reference in case GC happened */ instance = *env->contention.requester.jobject; if (deflated) { /* retry locking */ goto retry; } /* We are sure that we acquired fatlock of the object we wanted * even though we did not recheck the instance, because we could * have only been in ONE blocked_tree. */ return JNI_OK; } } /* ---------------------------------------------------------------------- _svmf_exit_object_monitor ---------------------------------------------------------------------- */ inline svm_static jint _svmf_exit_object_monitor (_svmt_JNIEnv *env, _svmt_object_instance *instance) { _svmt_JavaVM *vm = env->vm; _svmt_word old_lockword; assert (instance != NULL); old_lockword = instance->lockword; if (_svmf_lockword_is_thin (old_lockword)) { /* it is a thin lock */ jint recursive_count; if (_svmf_lockword_get_thinlock_id (old_lockword) != env->thread.thinlock_id) { _svmf_error_IllegalMonitorStateException (env); return JNI_ERR; } recursive_count = _svmf_lockword_get_thinlock_recursive_count (old_lockword); if (recursive_count == 0) { /* we're releasing the thin lock */ instance->lockword = _svmf_lockword_get_extra_bits (old_lockword); goto handle_contention; } else { /* Note: once we put recursive_count in the tree of counters it * stays there until the lock is released (or inflated) */ if (recursive_count == SVM_THINLOCK_MAX_RECURSIVE_COUNT) { jboolean released = JNI_FALSE; _svmm_mutex_lock (env->contention.owner.mutex); /* Counter is in the tree of counters */ if (_svmf_recursive_counter_decrement (env, instance) == 0) { /* we're releasing the thin lock */ _svmf_recursive_counter_delete (env, instance); instance->lockword = _svmf_lockword_get_extra_bits (old_lockword); released = JNI_TRUE; } _svmm_mutex_unlock (); if (released) { goto handle_contention; } } else { /* Normal counter in the thin lock */ instance->lockword = _svmf_lockword_thinlock (env->thread.thinlock_id, recursive_count - 1, _svmf_lockword_get_extra_bits (old_lockword)); } /* we're done */ return JNI_OK; } } else { /* it is a fat lock */ jint fat_id = _svmf_lockword_get_fatlock_index (old_lockword); _svmt_fat_lock *fat_lock = vm->fat_locks.array[fat_id]; jint status = JNI_OK; jboolean released = JNI_FALSE, deflated = JNI_FALSE; _svmm_mutex_lock (fat_lock->mutex); assert (fat_lock->recursive_count >= 0); if (fat_lock->recursive_count == 0 || fat_lock->owner != env) { status = JNI_ERR; } /* are we releasing the fatlock? */ if (--(fat_lock->recursive_count) == 0) { _svmm_debug_synchronization (env, "Exiting/releasing fatlock (%d) on object (%d)", fat_id, instance, NULL, NULL); released = JNI_TRUE; fat_lock->owner = NULL; _svmm_cond_broadcast (fat_lock->cond); /* We might need to deflate before unlocking. */ if ((fat_lock->threads_waiting_no == 0) && (fat_lock->threads_blocked_no + fat_lock->threads_to_come_no <= 1)) { _svmf_deflate_lock_no_exception (env, fat_lock, instance); deflated = JNI_TRUE; } } _svmm_mutex_unlock (); if (deflated == JNI_TRUE) { /* give fatlock back to the free list */ _svmf_put_fatlock_fast (env, fat_lock); } if (status != JNI_OK) { _svmf_error_IllegalMonitorStateException (env); return JNI_ERR; } if (released) { goto handle_contention; } /* we're done */ return JNI_OK; } handle_contention: if (env->contention.owner.flag) { jint status; _svmm_debug_synchronization (env, "contention flag set (%d), will handle it", env->contention.owner.flag, NULL, NULL, NULL); _svmm_mutex_lock (env->contention.owner.mutex); status = _svmf_handle_contention (env, instance); _svmm_mutex_unlock (); if (status != JNI_OK) { _svmf_error_OutOfMemoryError (env); return JNI_ERR; } } /* we're done */ return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_thread_start ---------------------------------------------------------------------- */ svm_static void * _svmf_thread_start (void *_env) { _svmt_JNIEnv *env = (_svmt_JNIEnv *) _env; _svmt_JavaVM *vm = env->vm; assert (env->is_alive == JNI_TRUE); #ifdef STATISTICS env->vm->total_threads_created_count++; #endif env->thread.pthread = pthread_self (); _svmf_set_current_env (env); _svmm_invoke_static_virtualmachine_runthread (env); _svmm_mutex_lock (vm->global_mutex); _svmf_halt_if_requested (env); env->is_alive = JNI_FALSE; if (env->previous != NULL) { env->previous->next = env->next; } else { if (env->thread.is_daemon) { vm->threads.system = env->next; } else { vm->threads.user = env->next; } } if (env->next != NULL) { env->next->previous = env->previous; } _svmm_cond_signal (vm->threads.vm_destruction_cond); /* leak it for now... */ _svmm_mutex_unlock (); return NULL; }