/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * This source file is part of SableVM. * * * * See the file "LICENSE" for the copyright information and for * * the terms and conditions for copying, distribution and * * modification of this source file. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifdef COMMENT /* THIS CODE NEEDS TO BE FIXED!!! */ /* ---------------------------------------------------------------------- _svmf_heap_init_defaults ---------------------------------------------------------------------- */ static void _svmf_heap_init_defaults (_svmt_JavaVM *vm) { vm->heap.young_size = SVM_HEAP_DEFAULT_YOUNG_SIZE; vm->heap.card_size = SVM_HEAP_DEFAULT_CARD_SIZE; vm->heap.old_min_size = SVM_HEAP_DEFAULT_OLD_MIN_SIZE; vm->heap.old_max_size = SVM_HEAP_DEFAULT_OLD_MAX_SIZE; vm->heap.old_allocation_increment = SVM_HEAP_DEFAULT_OLD_ALLOCATION_INCREMENT; } static jint copy_gc (_svmt_JNIEnv *env, size_t needed); static jboolean create_new_heap (_svmt_JavaVM *vm, size_t old_generation_size); static jint check_garbage_space (void **head, void *tail, _svmt_heap *heap_info, jboolean complete); /* ---------------------------------------------------------------------- _svmf_heap_manager_init ---------------------------------------------------------------------- */ jint _svmf_heap_manager_init (_svmt_JavaVM *vm) { vm->heap_creation_space_size = 64000; vm->heap_aging_space_size = 64000; vm->heap_old_generation_init_size = 1048576; vm->heap_card_count = 128; _svmm_mutex_init (vm->heap.mutex); vm->heap_creation_space_size = ALIGNED_SIZE (vm->heap_creation_space_size); vm->heap_aging_space_size = ALIGNED_SIZE (vm->heap_aging_space_size); vm->heap.card_table_base1 = NULL; if (vm->heap_aging_space_size < vm->heap_creation_space_size) { return JNI_ERR; } if (create_new_heap (vm, vm->heap_old_generation_init_size) == 0) { return JNI_ERR; } vm->heap.major_collection = 0; vm->heap.move_bar = vm->heap.end; return JNI_OK; } /* This function allocates a new heap and its associated card table NOTE: (1) The following members of vm->heap are not initialized/touched: mutex, move_bar, major_collection All other members are set accordingly. (2) The old heap is NOT freed in this function */ static jboolean create_new_heap (_svmt_JavaVM *vm, size_t old_generation_size); static jboolean create_new_heap (_svmt_JavaVM *vm, size_t old_generation_size) { size_t heap_size; double card_size; size_t i; old_generation_size = ALIGNED_SIZE (old_generation_size); heap_size = vm->heap_creation_space_size + vm->heap_aging_space_size * 3 + old_generation_size; vm->heap.start = malloc (heap_size); if (vm->heap.start == NULL) { return 0; } vm->heap.alloc = vm->heap.start; vm->heap.end = ((char *) vm->heap.start) + vm->heap_creation_space_size; vm->heap.aging_from_start = vm->heap.aging_from_alloc = vm->heap.end; vm->heap.aging_from_end = ((char *) vm->heap.aging_from_start) + vm->heap_aging_space_size; vm->heap.aging_to_start = vm->heap.aging_to_alloc = vm->heap.aging_from_end; vm->heap.aging_to_end = ((char *) vm->heap.aging_to_start) + vm->heap_aging_space_size; vm->heap.old_start = vm->heap.old_alloc = vm->heap.aging_to_end; vm->heap.old_watershed = ((char *) vm->heap.old_start) + old_generation_size; vm->heap.old_end = ((char *) vm->heap.old_watershed) + vm->heap_aging_space_size; /* Create the new Card Table */ free ((void *) vm->heap.card_table_base1); vm->heap.card_table_base1 = (char *) malloc (vm->heap_card_count + vm->heap_card_count * sizeof (size_t)); if (vm->heap.card_table_base1 == NULL) { return 0; } vm->heap.card_table_base2 = (size_t *) (vm->heap.card_table_base1 + vm->heap_card_count); /* Initialize the new Card Table */ for (i = 0; i < vm->heap_card_count; i++) { vm->heap.card_table_base1[i] = 0; vm->heap.card_table_base2[i] = 0; } /* Set up book-keeping data for the Card Table (for speed) */ card_size = ((size_t) ((char *) vm->heap.old_end - (char *) vm->heap.start)) / (double) (vm->heap_card_count - 2); vm->heap.free_bits = (int) (log (card_size) / log (2.0)) + 1; vm->heap.heap_base_id = CARD_ID (vm->heap.start, vm->heap); return 1; } static void array_get_offsets (jint size, jint base_type, size_t *before_header_offset, size_t *after_header_offset); static void array_get_offsets (jint size, jint base_type, size_t *before_header_offset, size_t *after_header_offset) { *after_header_offset = ARRAY_HEADER_SIZE; *before_header_offset = 0; switch (base_type) { case SVM_TYPE_BOOLEAN: { *after_header_offset += (((size_t) size) + 7) / 8; } break; case SVM_TYPE_BYTE: { *after_header_offset += ((size_t) size); } break; case SVM_TYPE_SHORT: { *after_header_offset += ((size_t) size) * 2; } break; case SVM_TYPE_CHAR: { *after_header_offset += ((size_t) size) * 2; } break; case SVM_TYPE_INT: { *after_header_offset += ((size_t) size) * 4; } break; case SVM_TYPE_LONG: { *after_header_offset += ((size_t) size) * 8; } break; case SVM_TYPE_FLOAT: { *after_header_offset += ((size_t) size) * 4; } break; case SVM_TYPE_DOUBLE: { *after_header_offset += ((size_t) size) * 8; } break; case SVM_TYPE_REFERENCE: { *before_header_offset += ((size_t) size) * sizeof (void *); } break; default: { abort (); } break; } *after_header_offset = ALIGNED_SIZE (*after_header_offset); *before_header_offset = ALIGNED_SIZE (*before_header_offset); } static void regular_obj_get_offsets (void *obj, size_t *before_header_offset, size_t *after_header_offset); static void regular_obj_get_offsets (void *obj, size_t *before_header_offset, size_t *after_header_offset) { void *word = *((void **) obj); size_t end = _svmm_lockw_end (word); size_t start = _svmm_lockw_start (word); *after_header_offset = end * SVM_ALIGNMENT; *before_header_offset = start * SVM_ALIGNMENT; if (end == SVM_LOCKW_OVRFLW) { *after_header_offset = ((_svmt_object_instance *) obj)->vtable->next_offset; } if (start == SVM_LOCKW_OVRFLW) { *before_header_offset = ((_svmt_object_instance *) obj)->vtable->start_offset; } } static jboolean is_intergen_ptr (void *obj, _svmt_heap *heap_info); static jboolean is_intergen_ptr (void *obj, _svmt_heap *heap_info) { size_t creation_size = ((char *) heap_info->end) - ((char *) heap_info->start); size_t aging_size = ((char *) heap_info->old_start) - ((char *) heap_info->end); void *young_gen_start = ((char *) heap_info->move_bar) - creation_size; void *young_gen_end = ((char *) heap_info->move_bar) + aging_size; return (obj >= young_gen_start && obj < young_gen_end); } /* ----------------------------------------------------------------------- old_alloc_overhead - This is the overhead required by every object copied to/created in the old generation space (to make card marking mechanism works) ----------------------------------------------------------------------- */ static void old_alloc_overhead (void *obj, size_t size, _svmt_heap *heap_info, jboolean new_obj); static void old_alloc_overhead (void *obj, size_t size, _svmt_heap *heap_info, jboolean new_obj) { void **obj_ptr; /* For each card ID overflow_id, where start_id < overflow_id <= end_id, set the corresponding offset for the card (from the object start byte) */ size_t overflow_id = CARD_ID (obj, *heap_info) + 1; size_t end_id = CARD_ID (((char *) obj) + size, *heap_info); for (; overflow_id <= end_id; overflow_id++) { heap_info->card_table_base2[overflow_id - heap_info->heap_base_id] = (overflow_id << heap_info->free_bits) - (size_t) obj; } if (new_obj) return; /* If this object has inter-generational pointers, set the flag as well */ for (obj_ptr = (void **) obj; *obj_ptr == NULL || _svmm_lockw_is_ref (*obj_ptr); obj_ptr++) { if (*obj_ptr != NULL && is_intergen_ptr (*obj_ptr, heap_info)) { heap_info-> card_table_base1[CARD_TABLE_INDEX (obj_ptr, *heap_info)] = 1; } } } /* ---------------------------------------------------------------------- copy_gc ---------------------------------------------------------------------- */ static void *copy_object (void *obj, _svmt_heap *heap_info); static void * copy_object (void *obj, _svmt_heap *heap_info) { void *word; size_t instance_size; size_t start_offset; size_t next_offset; void *result; void **ptail; /* NULL */ if (obj == NULL) { return NULL; } /* Check eligibility */ if (!heap_info->major_collection && obj >= heap_info->old_start) return obj; word = *((void **) obj); /* Forward pointer - already copied. Do nothing */ if (_svmm_lockw_is_ref (word)) { return word; } /* !!we must copy the object!! */ /* Resolve destination */ if (obj < heap_info->move_bar) { ptail = &(heap_info->aging_to_alloc); } else { ptail = &(heap_info->old_alloc); } /* Calculate the size of the object */ if (_svmm_lockw_is_array (word)) { /* it's actually an array */ array_get_offsets (((_svmt_array_instance *) obj)->size, _svmm_lockw_type (word), &start_offset, &next_offset); } else { /* it's a normal object */ regular_obj_get_offsets (obj, &start_offset, &next_offset); } instance_size = start_offset + next_offset; if (obj < heap_info->move_bar) { assert ((void *) (((char *) *ptail) + instance_size) <= heap_info->aging_to_end); } else { assert ((void *) (((char *) *ptail) + instance_size) <= heap_info->old_end); } /* Copy the object to the space pointed by ptail */ memcpy (*ptail, ((char *) obj) - start_offset, instance_size); /* Fix card boundary issues for destinations in the old space */ if (*ptail >= heap_info->old_start) { old_alloc_overhead (*ptail, instance_size, heap_info, 0); } /* The pointer to the (lockword of) the copied object goes to: - the result of this function - the lockword of its original place (to act as a forwarding pointer) */ *((void **) obj) = result = ((char *) *ptail) + start_offset; /* Correct the ptail pointer to next available byte */ *ptail = ((char *) *ptail) + instance_size; return result; } static void trace_native_refs (_svmt_native_ref *native_list, _svmt_heap *heap_info); static void trace_native_refs (_svmt_native_ref *native_list, _svmt_heap *heap_info) { while (native_list != NULL) { native_list->ref = copy_object (native_list->ref, heap_info); /* if (native_list->ref != NULL) { void *word = *((void **) native_list->ref); assert (!_svmm_lockw_is_ref(word)); } */ native_list = native_list->next; assert (check_garbage_space (heap_info->aging_to_start, heap_info->aging_to_alloc, heap_info, 0)); assert (check_garbage_space (heap_info->old_start, heap_info->old_alloc, heap_info, 0)); } } static void check_native_refs (_svmt_native_ref *native_list); static void check_native_refs (_svmt_native_ref *native_list) { while (native_list != NULL) { if (native_list->ref != NULL) { assert (!_svmm_lockw_is_ref (native_list->ref->lockword)); /* printf (" gc: %s\n", native_list->ref->vtable->type->name); */ } native_list = native_list->next; } } static void check_all_native_refs (_svmt_JNIEnv *env); static void check_all_native_refs (_svmt_JNIEnv *env) { _svmt_JavaVM *vm = env->vm; /* Trace native refs */ check_native_refs (vm->native_globals.native_global_list); /* Trace stack */ { _svmt_JNIEnv *thread; for (thread = vm->threads.user; thread != NULL; thread = thread->next) { check_native_refs (thread->native_locals.native_local_list); } for (thread = vm->threads.system; thread != NULL; thread = thread->next) { check_native_refs (thread->native_locals.native_local_list); } } } static _svmt_gc_map *get_gc_map (_svmt_code *pc, _svmt_gc_map * gc_maps); static _svmt_gc_map * get_gc_map (_svmt_code *pc, _svmt_gc_map * gc_maps) { while (gc_maps != NULL) { if (gc_maps->pc == pc) { return gc_maps; } gc_maps = gc_maps->next; } abort (); return NULL; } static void trace_stack (_svmt_JNIEnv *thread, _svmt_heap *heap_info); static void trace_stack (_svmt_JNIEnv *thread, _svmt_heap *heap_info) { _svmt_stack_frame *frame = thread->stack.current_frame; _svmt_method_info *method; _svmt_stack_value *locals; _svmt_stack_value *stack; _svmt_gc_map *gc_map; jint locals_count; size_t stack_size; /* handle the top frame specially */ switch (thread->gc_status) { case SVM_GC_STATUS_GC_POINT: { /* handle normally */ } break; case SVM_GC_STATUS_METHOD_START: case SVM_GC_STATUS_JNI: { if (frame->previous_offset == 0) { return; } frame = (_svmt_stack_frame *) (((char *) frame) - frame->previous_offset); } break; case SVM_GC_STATUS_EXCEPTION: { jint local; method = frame->method; locals = (_svmt_stack_value *) (((char *) frame) - method->locals_size); locals_count = method->code_attribute->locals_count; stack = (_svmt_stack_value *) (((char *) frame) + method->stack_offset); for (local = 0; local < locals_count; local++) { if (GET_BIT (method->code_attribute->locals_map, local)) { locals[local].reference = copy_object (locals[local].reference, heap_info); } } stack[0].reference = copy_object (stack[0].reference, heap_info); if (frame->previous_offset == 0) { return; } frame = (_svmt_stack_frame *) (((char *) frame) - frame->previous_offset); } break; default: { abort (); } break; } /* normal frames */ while (1) { method = frame->method; if (!(IS_SET (method->access_flags, SVM_ACC_DUMMY) || IS_SET (method->access_flags, SVM_ACC_NATIVE))) { jint local; size_t s; method = frame->method; locals = (_svmt_stack_value *) (((char *) frame) - method->locals_size); locals_count = method->code_attribute->locals_count; stack = (_svmt_stack_value *) (((char *) frame) + method->stack_offset); stack_size = frame->stack_size; if (stack_size > 0) { gc_map = get_gc_map (frame->pc, method->code_attribute->gc_maps); } for (local = 0; local < locals_count; local++) { if (GET_BIT (method->code_attribute->locals_map, local)) { locals[local].reference = copy_object (locals[local].reference, heap_info); } } for (s = 0; s < stack_size; s++) { if (GET_BIT (gc_map->stack_map, s)) { stack[s].reference = copy_object (stack[s].reference, heap_info); } } } if (frame->previous_offset == 0) { return; } frame = (_svmt_stack_frame *) (((char *) frame) - frame->previous_offset); } } static void trace_garbage_space (void **head, void **ptail, _svmt_heap *heap_info); static void trace_garbage_space (void **head, void **ptail, _svmt_heap *heap_info) { while (((void *) head) < *ptail) { void *word = *head; if (_svmm_lockw_is_ref (word)) { /* ref field: trace it */ *(head++) = copy_object (word, heap_info); } else { /* obj header: skip to next obj */ size_t start_offset, next_offset; if (_svmm_lockw_is_array (word)) { /* it's actually an array */ array_get_offsets (((_svmt_array_instance *) head)->size, _svmm_lockw_type (word), &start_offset, &next_offset); } else { /* it's a normal object */ regular_obj_get_offsets (head, &start_offset, &next_offset); } head = (void **) (((char *) head) + next_offset); } } } static jint check_garbage_space (void **head, void *tail, _svmt_heap *heap_info, jboolean complete); static jint check_garbage_space (void **head, void *tail, _svmt_heap *heap_info, jboolean complete) { int count = 0; int count_ref = 0; jboolean wrong = 0; _svmt_object_instance *hehe; assert (heap_info->old_alloc <= heap_info->old_end); while (((void *) head) < tail) { void *word = *((void **) head); if (_svmm_lockw_is_ref (word)) { count_ref++; if (word != NULL) { if (complete) { /* Everything should be in either the aging-to-space or the old-space */ if (word < heap_info->aging_to_start || (word >= heap_info->aging_to_alloc && word < heap_info->old_start) || word >= heap_info->old_alloc) { wrong = 1; assert (0); } } else if (!heap_info->major_collection) { if (! ((word >= heap_info->start && word < heap_info->alloc) || (word >= heap_info->aging_to_start && word < heap_info->aging_to_alloc) || (word >= heap_info->aging_from_start && word < heap_info->aging_from_alloc) || (word >= heap_info->old_start && word < heap_info->old_alloc))) { wrong = 1; assert (0); } } /* Check if intergenerational pointers are marked */ if (((void *) head) >= heap_info->old_start && is_intergen_ptr (word, heap_info)) { assert (heap_info-> card_table_base1[CARD_TABLE_INDEX (head, (*heap_info))] == 1); } } head++; } else { /* obj header: skip to next obj */ size_t start_offset, next_offset; hehe = (_svmt_object_instance *) head; assert (wrong == 0); count++; count_ref = 0; if (_svmm_lockw_is_array (word)) { /* it's actually an array */ array_get_offsets (((_svmt_array_instance *) head)->size, _svmm_lockw_type (word), &start_offset, &next_offset); } else { /* it's a normal object */ regular_obj_get_offsets (head, &start_offset, &next_offset); } head = (void **) (((char *) head) + next_offset); } } assert (head == tail); return 1; } static void *trace_card_get_start (int i, _svmt_heap *heap_info); static void * trace_card_get_start (int i, _svmt_heap *heap_info) { /* Get the boundaries of the card */ void *start_place = (void *) ((heap_info->heap_base_id + i) << heap_info->free_bits); void *start_obj = (void *) (((heap_info->heap_base_id + i) << heap_info->free_bits) - heap_info->card_table_base2[i]); /* Preventive measure */ if (start_obj < heap_info->old_start) start_obj = heap_info->old_start; /* Get the first reference/lockword within the card */ while (start_obj < start_place) { void *word = *((void **) start_obj); if (_svmm_lockw_is_ref (word)) { start_obj = ((void **) start_obj) + 1; } else { size_t start_offset, next_offset; if (_svmm_lockw_is_array (word)) { array_get_offsets (((_svmt_array_instance *) start_obj)->size, _svmm_lockw_type (word), &start_offset, &next_offset); } else { regular_obj_get_offsets (start_obj, &start_offset, &next_offset); } start_obj = ((char *) start_obj) + next_offset; } } return start_obj; } static void trace_old_generation (_svmt_heap *heap_info); static void trace_old_generation (_svmt_heap *heap_info) { size_t i, end_card = CARD_TABLE_INDEX (heap_info->old_alloc, (*heap_info)); void *start_obj, *end_place; /* Trace all marked card from the card of heap_info->old_start to the card before the one of heap_info->old_alloc */ for (i = CARD_TABLE_INDEX (heap_info->old_start, *heap_info); i < end_card; i++) { if (heap_info->card_table_base1[i] != 0) { /* Somthing in the card modified... */ end_place = (void *) ((heap_info->heap_base_id + i + 1) << heap_info-> free_bits); start_obj = trace_card_get_start (i, heap_info); /* Trace the card */ trace_garbage_space (start_obj, &end_place, heap_info); } } /* Trace the card of heap_info->old_alloc and all allocated spaces hereafter */ start_obj = trace_card_get_start (i, heap_info); trace_garbage_space (start_obj, &(heap_info->old_alloc), heap_info); } static jint copy_gc (_svmt_JNIEnv *env, size_t needed) { _svmt_JavaVM *vm = env->vm; void *old_aging_to_alloc, *old_old_alloc; void *temp, *old_heap; assert (printf ("Garbage collection starts\n")); check_all_native_refs (env); assert (check_garbage_space (vm->heap.aging_from_start, vm->heap.aging_from_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.start, vm->heap.alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 0)); /* For major collection, allocate a new heap space */ if (vm->heap.major_collection) { old_heap = vm->heap.start; /* Remember the old heap pointer for the free() call */ if (create_new_heap (vm, ((char *) vm->heap.old_end) - ((char *) vm->heap.old_start) + (vm->heap_aging_space_size) + needed * 3) == 0) { _svmdf_error_OutOfMemoryError (env); return JNI_ERR; } } check_all_native_refs (env); /* Trace native refs */ trace_native_refs (vm->native_globals.native_global_list, &(vm->heap)); assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 0)); /* Trace stack */ { _svmt_JNIEnv *thread; for (thread = vm->threads.user; thread != NULL; thread = thread->next) { trace_native_refs (thread->native_locals.native_local_list, &(vm->heap)); trace_stack (thread, &(vm->heap)); } for (thread = vm->threads.system; thread != NULL; thread = thread->next) { trace_native_refs (thread->native_locals.native_local_list, &(vm->heap)); trace_stack (thread, &(vm->heap)); } } check_all_native_refs (env); assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 0)); /* Trace the aging to-space */ trace_garbage_space (vm->heap.aging_to_start, &(vm->heap.aging_to_alloc), &(vm->heap)); old_aging_to_alloc = vm->heap.aging_to_alloc; assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 1)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 0)); /* Either trace the whole old generation (in major collections) or trace the marked cards of the old generation (in minor collections) */ if (vm->heap.major_collection) trace_garbage_space (vm->heap.old_start, &(vm->heap.old_alloc), &(vm->heap)); else trace_old_generation (&(vm->heap)); old_old_alloc = vm->heap.old_alloc; assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 1)); /* Deal with the cross increasing allocations */ while (old_aging_to_alloc != vm->heap.aging_to_alloc) { trace_garbage_space (old_aging_to_alloc, &(vm->heap.aging_to_alloc), &(vm->heap)); old_aging_to_alloc = vm->heap.aging_to_alloc; assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 1)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 0)); if (old_old_alloc == vm->heap.old_alloc) break; trace_garbage_space (old_old_alloc, &(vm->heap.old_alloc), &(vm->heap)); old_old_alloc = vm->heap.old_alloc; assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 0)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 1)); } assert (check_garbage_space (vm->heap.aging_to_start, vm->heap.aging_to_alloc, &(vm->heap), 1)); assert (check_garbage_space (vm->heap.old_start, vm->heap.old_alloc, &(vm->heap), 1)); /* Dump the "from" spaces */ vm->heap.alloc = vm->heap.start; vm->heap.aging_from_alloc = vm->heap.aging_from_start; /* Swap the two aging spaces - done for both sorts of collection */ temp = vm->heap.aging_from_start; vm->heap.aging_from_start = vm->heap.aging_to_start; vm->heap.aging_to_start = temp; temp = vm->heap.aging_from_alloc; vm->heap.aging_from_alloc = vm->heap.aging_to_alloc; vm->heap.aging_to_alloc = temp; temp = vm->heap.aging_from_end; vm->heap.aging_from_end = vm->heap.aging_to_end; vm->heap.aging_to_end = temp; /* Collection-specific bookkeeping work */ if (vm->heap.major_collection) { vm->heap.major_collection = 0; vm->heap.move_bar = vm->heap.end; free (old_heap); /* Free the old heap */ } else { if (vm->heap.old_alloc > vm->heap.old_watershed) vm->heap.major_collection = 1; } check_all_native_refs (env); return JNI_OK; } /* ---------------------------------------------------------------------- _svmf_new_object_instance ---------------------------------------------------------------------- */ _svmt_object_instance * _svmf_new_object_instance (_svmt_JNIEnv *env, _svmt_class_info *class_info) { _svmt_JavaVM *vm = env->vm; _svmt_object_instance *result; jint status = JNI_OK; void **alloc = &(vm->heap.alloc); /* Size of calculated easily for regular objects */ size_t instance_size = class_info->start_offset + class_info->next_offset; _svmm_mutex_lock (vm->heap.mutex); /* If there is not enough space left, gargabe collect and continue */ if (instance_size > (size_t) (((char *) vm->heap.end) - ((char *) vm->heap.alloc))) { if (instance_size > (size_t) (((char *) vm->heap.end) - ((char *) vm->heap.start))) { if (instance_size > (size_t) (((char *) vm->heap.old_end) - ((char *) vm->heap.old_alloc))) { vm->heap.major_collection = 1; status = copy_gc (env, instance_size); } alloc = &(vm->heap.old_alloc); old_alloc_overhead (vm->heap.old_alloc, instance_size, &(vm->heap), 1); if (((void *) (((char *) (*alloc)) + instance_size)) > vm->heap.old_watershed) vm->heap.major_collection = 1; } else { status = copy_gc (env, instance_size); } } if (status == JNI_OK) { size_t i; /* Initialize the new object */ for (i = 0; i < instance_size; i++) { ((char *) *alloc)[i] = 0; } /* result points to the header of the new object */ result = (_svmt_object_instance *) (((char *) *alloc) + class_info->start_offset); /* Virtually allocate the space in the runtime environment */ *alloc = ((char *) *alloc) + instance_size; /* Initialize the header */ result->lockword = class_info->initial_lockword; result->vtable = class_info->vtable; } _svmm_mutex_unlock (); if (status != JNI_OK) { _svmdf_error_OutOfMemoryError (env); return NULL; } return result; } /* ---------------------------------------------------------------------- _svmf_new_array_instance ---------------------------------------------------------------------- */ _svmt_array_instance * _svmf_new_array_instance (_svmt_JNIEnv *env, _svmt_array_info *array_info, jint size) { _svmt_JavaVM *vm = env->vm; _svmt_array_instance *result; size_t instance_size; size_t start_offset, next_offset; void **alloc = &(vm->heap.alloc); jint status = JNI_OK; jint base_type = (array_info->dimensions == 1) ? array_info->base_type : SVM_TYPE_REFERENCE; /* Calculate the space required */ array_get_offsets (size, base_type, &start_offset, &next_offset); instance_size = start_offset + next_offset; _svmm_mutex_lock (vm->heap.mutex); /* If there is not enough space left, gargabe collect and continue */ if (instance_size > (size_t) (((char *) vm->heap.end) - ((char *) vm->heap.alloc))) { if (instance_size > (size_t) (((char *) vm->heap.end) - ((char *) vm->heap.start))) { if (instance_size > (size_t) (((char *) vm->heap.old_end) - ((char *) vm->heap.old_alloc))) { vm->heap.major_collection = 1; status = copy_gc (env, instance_size); } alloc = &(vm->heap.old_alloc); old_alloc_overhead (vm->heap.old_alloc, instance_size, &(vm->heap), 1); if (((void *) (((char *) (*alloc)) + instance_size)) > vm->heap.old_watershed) vm->heap.major_collection = 1; } else { status = copy_gc (env, instance_size); } } if (status == JNI_OK) { size_t i; /* Initialize the new array space */ for (i = 0; i < instance_size; i++) { ((char *) *alloc)[i] = 0; } /* result points to the header of the new object */ result = (_svmt_array_instance *) (((char *) *alloc) + start_offset); /* Virtually allocate the space in the runtime environment */ *alloc = ((char *) *alloc) + instance_size; /* Initialize the header */ result->lockword = array_info->initial_lockword; result->vtable = array_info->vtable; result->size = size; } _svmm_mutex_unlock (); if (status != JNI_OK) { _svmdf_error_OutOfMemoryError (env); return NULL; } return result; } #endif /* COMMENT */