/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * This source file is part of SableVM. * * * * See the file "LICENSE" for the copyright information and for * * the terms and conditions for copying, distribution and * * modification of this source file. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if defined (_SABLEVM_GENERATIONAL_COPYING_GC) #define _SABLEVM_GC_WRITE_BARRIER_REFERENCE 1 #define SVM_DEFAULT_FRAME_SIZE _svmf_aligned_size_t (8192) #define SVM_DEFAULT_GENERATION_COUNT 3 #define SVM_DEFAULT_GENERATION_SIZE SVM_DEFAULT_FRAME_SIZE * 512 #define SVM_DEFAULT_MIN_SIZE SVM_DEFAULT_GENERATION_SIZE * 3 #define SVM_DEFAULT_MAX_SIZE SVM_DEFAULT_GENERATION_SIZE * 100 #define SVM_DEFAULT_INCREMENT_SIZE SVM_DEFAULT_GENERATION_SIZE * 3 #define SVM_DEFAULT_LARGE_OBJECT_SIZE 1024 #define SVM_GC_MINOR 0 #define SVM_GC_MAJOR 1 #define SVM_GC_FULL 2 static _svmt_frame myframe; #if defined (_SABLEVM_GC_STATISTICS) svm_static void _svmf_print_gc_stat (_svmt_JNIEnv *env) { _svmt_heap *heap = &(env->vm->heap); _svmf_printf (env, stderr,"\n\n\nCOLLECTOR STATISTICS\n"); _svmf_printf (env, stderr, "\ntotal time : %ld s", heap->gc_total_secs); _svmf_printf (env, stderr, " %ld us", heap->gc_total_usecs); _svmf_printf (env, stderr, "\ncollections : %d", heap->gc_collections); if (heap->gc_collections > 0) { _svmf_printf (env, stderr, "\n minor : %d", heap->gc_minor); _svmf_printf (env, stderr, "\n major : %d", heap->gc_major); _svmf_printf (env, stderr, "\n full : %d", heap->gc_full); _svmf_printf (env, stderr, "\ntotal time / collections : %.2f us", (float) (heap->gc_total_secs * 1000000 + heap->gc_total_usecs) / (float) heap->gc_collections); _svmf_printf (env, stderr, "\nbytes copied : %ld", heap->gc_bytes_copied); _svmf_printf (env, stderr, "\nobjects copied : %ld", heap->gc_objects_copied); _svmf_printf (env, stderr, "\nbytes / objects : %.2f", (float) heap->gc_bytes_copied / (float) heap->gc_objects_copied); } _svmf_printf (env, stderr,"\n\n\nALLOCATOR STATISTICS\n"); _svmf_printf (env, stderr, "\nallocated bytes : %ld", heap->al_bytes_allocated); _svmf_printf (env, stderr, "\nallocated objects : %ld", heap->al_objects_allocated); _svmf_printf (env, stderr, "\nbytes / objects : %.2f", (float) heap->al_bytes_allocated / (float) heap->al_objects_allocated); _svmf_printf (env, stderr,"\n\n\nWRITE BARRIER STATISTICS\n"); _svmf_printf (env, stderr, "\ntotal time : %ld s", heap->wb_total_secs); _svmf_printf (env, stderr, " %ld us", heap->wb_total_usecs); _svmf_printf (env, stderr, "\ncalls : %ld", heap->wb_calls); if (heap->wb_calls > 0) { if (heap->wb_slow_paths > 0) { _svmf_printf (env, stderr, "\n slow path : %ld", heap->wb_slow_paths); _svmf_printf (env, stderr, "\n y2o ptr : %ld", heap->wb_y2o_ptrs); _svmf_printf (env, stderr, "\n o2y ptr : %ld", heap->wb_o2y_ptrs); if (heap->wb_o2y_ptrs > 0) { _svmf_printf (env, stderr, "\n y2o / o2y : %.2f", (float) heap->wb_y2o_ptrs / (float) heap->wb_o2y_ptrs); } } _svmf_printf (env, stderr, "\ntotal time / calls : %.2f us", (float) (heap->wb_total_secs * 1000000 + heap->wb_total_usecs) / (float) heap->wb_calls); } _svmf_printf (env, stderr,"\n\n\nHEAP SETUP\n"); _svmf_printf (env, stderr, "\nstart size : %ld", (char *) heap->size - ((jint) heap->increment_size) * heap->gc_full); _svmf_printf (env, stderr, "\nheap size : %ld", heap->size); _svmf_printf (env, stderr, "\n nursery size : %ld", heap->generations[0].size); _svmf_printf (env, stderr, "\n middle size : %ld", heap->generations[1].size); _svmf_printf (env, stderr, "\n oldest size : %ld", heap->generations[2].size); _svmf_printf (env, stderr, "\nincrement size : %ld", heap->increment_size); _svmf_printf (env, stderr,"\n\n\n"); fflush (NULL); } #endif /* -------------------------------------------------------------------------------- _svmf_write_barrier -------------------------------------------------------------------------------- */ inline svm_static void _svmf_write_barrier (_svmt_JNIEnv *env, _svmt_object_instance **pslot) { _svmt_heap *heap = &(env->vm->heap); #if defined (_SABLEVM_GC_STATISTICS) struct timeval start_time; struct timeval end_time; long secs; long usecs; gettimeofday (&start_time, NULL); #endif /* the full collection uses a copying algorithm. */ if (heap->next_gc != SVM_GC_FULL) { _svmt_generation *gen = heap->generations; if (((void *) *pslot) >= gen[0].space.start && ((void *) *pslot) < gen[1].alloc.start) { if (((void *) pslot) >= gen[1].space.start && ((void *) pslot) < gen[2].alloc.start) { _svmt_frame *frame = heap->frames + ((((char *) pslot) - ((char *) gen[1].space.start)) / heap->frame_size); if (((void *) pslot) < frame->start) frame->start = (void *) pslot; pslot = (_svmt_object_instance **) ((_svmt_word *) pslot) + 1; if (((void *) pslot) > frame->end) frame->end = (void *) pslot; #if defined (_SABLEVM_GC_STATISTICS) heap->wb_slow_paths++; #endif } } } #if defined (_SABLEVM_GC_STATISTICS) gettimeofday (&end_time, NULL); secs = end_time.tv_sec - start_time.tv_sec; usecs = end_time.tv_usec - start_time.tv_usec; if (usecs < 0) { usecs += 1000000; secs -= 1; } heap->wb_total_secs += secs; heap->wb_total_usecs += usecs; if (heap->wb_total_usecs > 999999) { heap->wb_total_usecs -= 1000000; heap->wb_total_secs += 1; } heap->wb_calls++; { jint i, j; for (i = 0; i < 3; i++) { if (pslot >= heap->generations[i].space.start && pslot < heap->generations[i].alloc.start) { break; } } for (j = 0; j < 3; j++) { if (*pslot >= heap->generations[j].space.start && *pslot < heap->generations[j].alloc.start) { break; } } if (i != 3 && j != 3) { if (i < j) { heap->wb_y2o_ptrs++; } else if (i > j) { heap->wb_o2y_ptrs++; } } } #endif } /* -------------------------------------------------------------------------------- _svmf_get_available_space -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_available_space (_svmt_generation *gen) { return (size_t)(((char *) gen->alloc.end) - ((char *) gen->alloc.start)); } /* -------------------------------------------------------------------------------- _svmf_is_free -------------------------------------------------------------------------------- */ inline svm_static jint _svmf_is_free (_svmt_generation* gen, size_t size) { return (jint) (size <= _svmf_get_available_space(gen)); } /* -------------------------------------------------------------------------------- _svmf_get_free_memory -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_free_memory (_svmt_heap *heap) { jint g; size_t result = 0; for (g = 0; g < SVM_DEFAULT_GENERATION_COUNT; g++) { result += _svmf_get_available_space(&(heap->generations[g])); } return result; } /* -------------------------------------------------------------------------------- _svmf_get_total_space -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_total_space (_svmt_heap *heap) { return heap->size; } /* -------------------------------------------------------------------------------- _svmf_get_max_size -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_max_size (_svmt_heap *heap) { return heap->max_size; } /* -------------------------------------------------------------------------------- _svmf_get_hashcode_of_space -------------------------------------------------------------------------------- */ inline svm_static jint _svmf_get_hashcode_of_space (_svmt_heap *heap, void *space) { return heap->hashcode_base + (size_t) space; } /* -------------------------------------------------------------------------------- _svmf_heap_init_defaults -------------------------------------------------------------------------------- */ inline svm_static void _svmf_heap_init_defaults (_svmt_heap *heap) { heap->generations = _svmf_malloc (SVM_DEFAULT_GENERATION_COUNT * sizeof(_svmt_generation)); if (heap->generations == NULL) { return; } heap->generations[0].size = SVM_DEFAULT_GENERATION_SIZE; heap->generations[1].size = SVM_DEFAULT_GENERATION_SIZE * 2; heap->generations[2].size = SVM_DEFAULT_GENERATION_SIZE * 4; heap->size = SVM_DEFAULT_GENERATION_SIZE * 7; heap->increment_size = SVM_DEFAULT_INCREMENT_SIZE; heap->max_size = SVM_DEFAULT_MAX_SIZE; heap->frame_size = SVM_DEFAULT_FRAME_SIZE; heap->large_object_size = SVM_DEFAULT_LARGE_OBJECT_SIZE; heap->next_gc = SVM_GC_MINOR; #if defined (_SABLEVM_GC_STATISTICS) /* collector */ heap->gc_total_secs = 0; heap->gc_total_usecs = 0; heap->gc_collections = 0; heap->gc_minor = 0; heap->gc_major = 0; heap->gc_full = 0; heap->gc_bytes_copied = 0; heap->gc_objects_copied = 0; /* allocator */ heap->al_bytes_allocated = 0; heap->al_objects_allocated = 0; /* write barrier */ heap->wb_total_secs = 0; heap->wb_total_usecs = 0; heap->wb_calls = 0; heap->wb_slow_paths = 0; heap->wb_y2o_ptrs = 0; heap->wb_o2y_ptrs = 0; #endif } /* -------------------------------------------------------------------------------- _svmf_heap_init -------------------------------------------------------------------------------- */ inline svm_static jint _svmf_heap_init (_svmt_heap * heap) { if (heap->generations == NULL) { return JNI_ERR; } /* if (heap->max_size < heap->size) { return JNI_ERR; } */ /* initialize the heap. */ heap->space.start = _svmf_malloc (heap->size); if (heap->space.start == NULL) { return JNI_ERR; } heap->space.end = ((char *) heap->space.start) + heap->size; /* initialize the frames. */ heap->frame_count = _svmf_aligned_size_t (heap->size - heap->generations[0].size) / heap->frame_size; heap->frames = (_svmt_frame *) _svmf_malloc (heap->frame_count * sizeof(_svmt_frame)); if (heap->frames == NULL) { return JNI_ERR; } else { jint f; for (f = 0; f < heap->frame_count; f++) { heap->frames[f].start = (void *) UINT_MAX; heap->frames[f].end = (void *) 0; } } /* initialize the generations. */ { _svmt_generation * gen = heap->generations; void* start = heap->space.start; int i; for (i = 0; i < SVM_DEFAULT_GENERATION_COUNT; i++) { gen[i].alloc.start = gen[i].space.start = start; start = ((char *) start) + gen[i].size; gen[i].alloc.end = gen[i].space.end = start; } } return JNI_OK; } svm_static void _svmf_heap_free (_svmt_heap *heap) { /* _svmf_free (heap->frames); _svmf_free (heap); */ } /* -------------------------------------------------------------------------------- _svmf_get_start_offset -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_start_offset (_svmt_object_instance *obj) { #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT) size_t start_offset = 0; /* it's an array. */ if (_svmf_lockword_is_array (obj->lockword)) { assert (obj->vtable->type->is_array); if (_svmf_lockword_get_array_type (obj->lockword) == SVM_TYPE_REFERENCE) { start_offset = _svmf_aligned_size_t (((size_t) ((_svmt_array_instance *) obj)->size) * sizeof(void *)); } return start_offset; } /* it's a normal object. */ assert (!obj->vtable->type->is_array); return obj->vtable->start_offset; #elif defined (_SABLEVM_TRADITIONAL_OBJECT_LAYOUT) return 0; #endif } /* -------------------------------------------------------------------------------- _svmf_get_end_offset -------------------------------------------------------------------------------- */ inline svm_static size_t _svmf_get_end_offset (_svmt_object_instance *obj) { _svmt_word lockword = obj->lockword; /* it's an array. */ if (_svmf_lockword_is_array (lockword)) { size_t end_offset = 0; size_t size; assert (obj->vtable->type->is_array); size = (size_t) ((_svmt_array_instance *) obj)->size; end_offset = _svmf_aligned_size_t (sizeof (_svmt_array_instance)); switch (_svmf_lockword_get_array_type (lockword)) { case SVM_TYPE_BOOLEAN: { end_offset += (size + 7) / 8; } break; case SVM_TYPE_BYTE: { end_offset += size; } break; case SVM_TYPE_SHORT: { end_offset += size * 2; } break; case SVM_TYPE_CHAR: { end_offset += size * 2; } break; case SVM_TYPE_INT: { end_offset += size * 4; } break; case SVM_TYPE_LONG: { end_offset += size * 8; } break; case SVM_TYPE_FLOAT: { end_offset += size * 4; } break; case SVM_TYPE_DOUBLE: { end_offset += size * 8; } break; case SVM_TYPE_REFERENCE: { #if defined (_SABLEVM_TRADITIONAL_OBJECT_LAYOUT) end_offset += size * sizeof(void *); #endif } break; default: { _svmm_fatal_error ("impossible control flow"); } break; } return _svmf_aligned_size_t (end_offset); } /* it's a normal object. */ assert (!obj->vtable->type->is_array); return obj->vtable->next_offset_no_hashcode; } /* -------------------------------------------------------------------------------- _svmf_do_copy_object -------------------------------------------------------------------------------- */ inline svm_static _svmt_object_instance* _svmf_do_copy_object (_svmt_generation *gen, _svmt_object_instance *obj, size_t start_offset, size_t obj_size) { void *to_space = gen->alloc.start; #if defined (MAGIC) obj->magic[0] = 0; #endif memcpy (to_space, (void *) (((char *) obj) - start_offset), obj_size); gen->alloc.start = ((char *) to_space) + obj_size; /* set and return forward reference. */ return (*(_svmt_object_instance **)obj) = (_svmt_object_instance *) (((char *) to_space) + start_offset); } /* -------------------------------------------------------------------------------- _svmf_copy_object -------------------------------------------------------------------------------- */ svm_static _svmt_object_instance* _svmf_copy_object (_svmt_JNIEnv *env, _svmt_object_instance *obj) { _svmt_heap *heap = &(env->vm->heap); if (obj == NULL) { return NULL; } /* obj is out of from space. */ { if (((void *) obj) < heap->from_generation->space.start || ((void *) obj) >= heap->from_generation->space.end) { return obj; } } /* lockword is a forward reference. */ if (_svmf_lockword_is_forward_reference (obj->lockword)) { assert (((void *) obj->lockword) >= heap->to_generation->space.start && ((void *) obj->lockword) < heap->to_generation->alloc.start); return (_svmt_object_instance *) obj->lockword; } #if defined (MAGIC) assert (strcmp (obj->magic, "SableVM") == 0); #endif { size_t end_offset = _svmf_get_end_offset (obj); size_t start_offset = _svmf_get_start_offset (obj); size_t obj_size = start_offset + end_offset; #if defined (_SABLEVM_GC_STATISTICS) heap->gc_bytes_copied += obj_size; heap->gc_objects_copied++; #endif return _svmf_do_copy_object (heap->to_generation, obj, start_offset, obj_size); } } /* -------------------------------------------------------------------------------- _svmf_trace_native_ref_list -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_native_ref_list (_svmt_JNIEnv *env, _svmt_native_ref *list) { while (list != NULL) { list->ref = _svmf_copy_object (env, list->ref); list = list->next; } } /* -------------------------------------------------------------------------------- _svmf_trace_stack -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_stack (_svmt_JNIEnv *env, _svmt_JNIEnv *thread) { _svmt_JavaVM *vm = env->vm; _svmt_stack_frame *frame = thread->stack.current_frame; _svmt_method_info *method = frame->method; while (method != &vm->stack_bottom_method) { if (!_svmf_is_set_flag (method->access_flags, SVM_ACC_INTERNAL)) { _svmt_stack_value *locals = (_svmt_stack_value *) (void *) (((char *) frame) - method->frame_info->start_offset); _svmt_gc_map_node *pgm = method->parameters_gc_map; jint nprlc = method->frame_info->non_parameter_ref_locals_count; _svmt_stack_value *stack = (_svmt_stack_value *) (void *) (((char *) frame) + _svmv_stack_offset); jint stack_size = frame->stack_size; _svmt_gc_map_node *stack_gc_map = (stack_size == 0) ? NULL : (frame->pc - 1)->stack_gc_map; jint i, start = method->java_args_count; frame->this = _svmf_copy_object (env, frame->this); frame->stack_trace_element = _svmf_copy_object (env, frame->stack_trace_element); /* Trace method formal parameters */ for (i = 0; i < pgm->size; i++) { if (_svmf_get_bit (pgm->bits, i)) { locals[i].reference = _svmf_copy_object (env, locals[i].reference); } } /* Trace other local references. */ for (i = 0; i < nprlc; i++) { locals[start+i].reference = _svmf_copy_object (env, locals[start+i].reference); } /* Trace the stack. */ if (stack_size > 0) { jint max = _svmf_min_jint (stack_size, stack_gc_map->size); for (i = 0; i < max; i++) { if (_svmf_get_bit (stack_gc_map->bits, i)) { stack[i].reference = _svmf_copy_object (env, stack[i].reference); } } } } frame = (_svmt_stack_frame *) (void *) (((char *) frame) - frame->previous_offset); method = frame->method; } } /* -------------------------------------------------------------------------------- _svmf_trace_frame -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_frame (_svmt_JNIEnv *env, _svmt_frame *frame) { if (frame->start < frame->end) { _svmt_word *current = (_svmt_word *) frame->start; _svmt_word *end_offset = (_svmt_word *) frame->end; frame->start = (void *) UINT_MAX; frame->end = (void *) 0; while (current < end_offset) { if (_svmf_word_is_reference (*current)) { *current = (_svmt_word) ((void *) _svmf_copy_object (env, (_svmt_object_instance *) *current)); /* add the reference in the remembered set. */ if (env->vm->heap.next_gc == SVM_GC_MINOR && ((void *) current) >= env->vm->heap.generations[2].space.start) { if (frame->end < frame->start) { frame->start = current; } frame->end = ++current; } else { ++current; } } else { current = (_svmt_word *) (((char *) current) + _svmf_get_end_offset ((_svmt_object_instance *) current)); } } } } /* -------------------------------------------------------------------------------- _svmf_trace_space -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_space (_svmt_JNIEnv *env, _svmt_frame *frame) { _svmt_word *current = (_svmt_word *) frame->start; _svmt_word *end_offset = (_svmt_word *) frame->end; while (current < end_offset) { if (_svmf_word_is_reference (*current)) { *current = (_svmt_word) ((void *) _svmf_copy_object (env, (_svmt_object_instance *) *current)); ++current; } else { current = (_svmt_word *) (((char *) current) + _svmf_get_end_offset ((_svmt_object_instance *) current)); } } } /* -------------------------------------------------------------------------------- _svmf_trace_heap -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_heap (_svmt_JNIEnv *env) { _svmt_generation *to = env->vm->heap.to_generation; _svmt_heap *heap = &(env->vm->heap); /* trace the remembered set. */ if (heap->next_gc == SVM_GC_MINOR) { jint i; for (i = 0; i < heap->frame_count; i++) { _svmf_trace_frame (env, &(heap->frames[i])); } } else if (heap->next_gc == SVM_GC_MAJOR) { jint frame_count = heap->generations[1].size / heap->frame_size; jint i; for (i = 0; i < frame_count; i++) { heap->frames[i].start = (void *) UINT_MAX; heap->frames[i].end = (void *) 0; } for ( ; i < heap->frame_count; i++) { _svmf_trace_frame (env, heap->frames + i); } } /* trace the moved objects. */ myframe.end = to->alloc.start; while (myframe.start < myframe.end && myframe.end < to->space.end) { _svmf_trace_space (env, &myframe); myframe.start = myframe.end; myframe.end = to->alloc.start; } } /* -------------------------------------------------------------------------------- _svmf_trace_native_references -------------------------------------------------------------------------------- */ inline svm_static void _svmf_trace_native_references (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; _svmt_JNIEnv *thread; /* Trace native global references. */ _svmf_trace_native_ref_list (env, vm->native_globals.list); /* Trace stack and native local references. */ for (thread = vm->threads.user; thread != NULL; thread = thread->next) { _svmf_trace_native_ref_list (env, thread->native_locals.list); _svmf_trace_stack (env, thread); } for (thread = vm->threads.system; thread != NULL; thread = thread->next) { _svmf_trace_native_ref_list (env, thread->native_locals.list); _svmf_trace_stack (env, thread); } } /* -------------------------------------------------------------------------------- _svmf_generational_copying_gc_internal -------------------------------------------------------------------------------- */ inline svm_static void _svmf_generational_copying_gc_internal (_svmt_JNIEnv *env) { _svmf_stop_the_world (env); pthread_mutex_unlock (&env->vm->global_mutex); myframe.start = env->vm->heap.to_generation->alloc.start; _svmf_trace_native_references (env); _svmf_trace_heap (env); pthread_mutex_lock (&env->vm->global_mutex); _svmf_resume_the_world (env); } /* -------------------------------------------------------------------------------- _svmf_collect_garbage -------------------------------------------------------------------------------- */ inline svm_static void _svmf_collect_garbage (_svmt_JNIEnv *env) { _svmt_heap *heap = &(env->vm->heap); if (heap->next_gc == SVM_GC_MINOR) { #if defined (_SABLEVM_GC_STATISTICS) heap->gc_minor++; #endif heap->from_generation = &(heap->generations[0]); heap->to_generation = &(heap->generations[1]); _svmf_generational_copying_gc_internal (env); heap->generations[0].alloc = heap->generations[0].space; } else if (heap->next_gc == SVM_GC_MAJOR) { _svmt_generation gen; #if defined (_SABLEVM_GC_STATISTICS) heap->gc_major++; #endif gen.space.start = heap->generations[0].space.start; gen.space.end = heap->generations[1].space.end; heap->from_generation = &gen; heap->to_generation = &(heap->generations[2]); _svmf_generational_copying_gc_internal (env); heap->generations[0].alloc = heap->generations[0].space; heap->generations[1].alloc = heap->generations[1].space; } else if (heap->next_gc == SVM_GC_FULL) { _svmt_generation gen; void* old_heap = heap->space.start; #if defined (_SABLEVM_GC_STATISTICS) heap->gc_full++; #endif heap->generations[2].size += heap->increment_size; heap->size += heap->increment_size; gen.space.start = heap->space.start; gen.space.end = heap->space.end; heap->from_generation = &gen; _svmf_free (heap->frames); if (_svmf_heap_init (heap) == JNI_OK) { heap->to_generation = &(heap->generations[2]); _svmf_generational_copying_gc_internal (env); _svmf_free (old_heap); } } else { _svmm_fatal_error ("impossible control flow"); } } /* -------------------------------------------------------------------------------- _svmf_select_gc -------------------------------------------------------------------------------- */ svm_static void _svmf_select_next_gc (_svmt_JNIEnv *env) { _svmt_heap *heap = &(env->vm->heap); _svmt_generation *gen = heap->generations; /* when the next collection is a full one, we allocate in each generation until an object can not be allocated. Then the full collection scavenges the heap using a basic copying algorithm. */ /* Do a full collection until enough space is recycled in the oldest generation. */ if (heap->next_gc == SVM_GC_FULL && ((float) _svmf_get_available_space(&(gen[2])) / (float) gen[2].size) < 0.30) { return; } else { jlong free_space = _svmf_get_available_space(&(gen[1])); if (free_space > gen[0].size) { heap->next_gc = SVM_GC_MINOR; } else if (free_space > gen[0].size / 2) { heap->next_gc = SVM_GC_MINOR; /* the middle-aged generation must be able to contain the nursery. */ gen[0].alloc.end = ((char *) gen[0].space.start) + free_space; } else { free_space = _svmf_get_available_space(&(gen[2])); if (free_space > gen[1].size) { heap->next_gc = SVM_GC_MAJOR; /* the oldest generation must be able to contain the nursery and the middle-aged generation. */ free_space -= (jlong) (((char *) gen[1].alloc.start) - ((char *) gen[1].space.start)); free_space = (free_space > gen[0].size) ? gen[0].size : free_space; gen[0].alloc.end = ((char *) gen[0].space.start) + free_space; } else { heap->next_gc = SVM_GC_FULL; } } } } /* -------------------------------------------------------------------------------- _svmf_gc_request_space -------------------------------------------------------------------------------- */ /* IMPORTANT: The calling thread should hold the lock on vm->global_mutex when calling this function. */ inline svm_static jint _svmf_gc_request_space (_svmt_JNIEnv *env, size_t requested_size) { _svmt_heap *heap = &(env->vm->heap); _svmt_generation* gen = heap->generations; #if defined (_SABLEVM_GC_STATISTICS) struct timeval start_time; struct timeval end_time; long secs; long usecs; #endif /* requested_size = 0 means full gc. */ /* large object goes in the oldest generation. */ if (requested_size != 0) { if (heap->next_gc == SVM_GC_FULL) { int i; for (i=0; i<3; i++) { if (_svmf_is_free(&(gen[i]), requested_size)) { heap->to_generation = &(gen[i]); return JNI_OK; } } } /* else if (requested_size >= heap->large_object_size && _svmf_is_free(&(gen[2]), requested_size)) { heap->to_generation = &(gen[2]); return JNI_OK; } */ else if (_svmf_is_free(&(gen[0]), requested_size)) { heap->to_generation = &(gen[0]); return JNI_OK; } /* else if (requested_size < gen[2].size) { heap->next_gc = SVM_GC_FULL; } else { return JNI_ERR; } */ } else { heap->next_gc = SVM_GC_FULL; } #if defined (_SABLEVM_GC_STATISTICS) gettimeofday (&start_time, NULL); #endif /* do gc call. */ _svmf_collect_garbage (env); #if defined (_SABLEVM_GC_STATISTICS) heap->gc_collections++; gettimeofday (&end_time, NULL); secs = end_time.tv_sec - start_time.tv_sec; usecs = end_time.tv_usec - start_time.tv_usec; if (usecs < 0) { usecs += 1000000; secs -= 1; } heap->gc_total_secs += secs; heap->gc_total_usecs += usecs; if (heap->gc_total_usecs > 999999) { heap->gc_total_usecs -= 1000000; heap->gc_total_secs += 1; } #endif if (_svmf_is_free(heap->to_generation, requested_size)) { _svmf_select_next_gc(env); return JNI_OK; } return JNI_ERR; } /* -------------------------------------------------------------------------------- _svmf_gc_new_instance -------------------------------------------------------------------------------- */ svm_static jint _svmf_gc_new_instance (_svmt_JNIEnv *env, size_t instance_size, void **ppinstance) { jint status; _svmm_mutex_lock (env->vm->global_mutex); _svmf_halt_if_requested (env); status = _svmf_gc_request_space (env, instance_size); if (status == JNI_OK) { /* Reset the instance space. */ void **ppspace = &(env->vm->heap.to_generation->alloc.start); memset (*ppspace, 0, instance_size); *ppinstance = *ppspace; /* Update the free-space pointer. */ *ppspace = ((char *) *ppspace) + instance_size; #if defined (_SABLEVM_GC_STATISTICS) env->vm->heap.al_bytes_allocated += instance_size; env->vm->heap.al_objects_allocated++; #endif } else { _svmf_error_OutOfMemoryError (env); } _svmm_mutex_unlock (); return status; } #endif /* #if defined (_SABLEVM_GENERATIONAL_COPYING_GC) */