< prev index next >

src/share/vm/memory/universe.cpp

Print this page




 535          sk != NULL;
 536          sk = sk->next_sibling()) {
 537       reinitialize_vtable_of(sk, CHECK);
 538     }
 539   }
 540 }
 541 
 542 
 543 void initialize_itable_for_klass(Klass* k, TRAPS) {
 544   InstanceKlass::cast(k)->itable().initialize_itable(false, CHECK);
 545 }
 546 
 547 
 548 void Universe::reinitialize_itables(TRAPS) {
 549   SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
 550 
 551 }
 552 
 553 
 554 bool Universe::on_page_boundary(void* addr) {
 555   return is_ptr_aligned(addr, os::vm_page_size());
 556 }
 557 
 558 
 559 bool Universe::should_fill_in_stack_trace(Handle throwable) {
 560   // never attempt to fill in the stack trace of preallocated errors that do not have
 561   // backtrace. These errors are kept alive forever and may be "re-used" when all
 562   // preallocated errors with backtrace have been consumed. Also need to avoid
 563   // a potential loop which could happen if an out of memory occurs when attempting
 564   // to allocate the backtrace.
 565   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
 566           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
 567           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
 568           (throwable() != Universe::_out_of_memory_error_array_size) &&
 569           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
 570           (throwable() != Universe::_out_of_memory_error_realloc_objects));
 571 }
 572 
 573 
 574 oop Universe::gen_out_of_memory_error(oop default_err) {
 575   // generate an out of memory error:


 801   if (Universe::narrow_oop_base() != 0) {
 802     st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
 803   }
 804 
 805   if (Universe::narrow_oop_shift() != 0) {
 806     st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
 807   }
 808 
 809   if (!Universe::narrow_oop_use_implicit_null_checks()) {
 810     st->print(", no protected page in front of the heap");
 811   }
 812   st->cr();
 813 }
 814 
 815 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 816 
 817   assert(alignment <= Arguments::conservative_max_heap_alignment(),
 818          "actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
 819          alignment, Arguments::conservative_max_heap_alignment());
 820 
 821   size_t total_reserved = align_size_up(heap_size, alignment);
 822   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 823       "heap size is too big for compressed oops");
 824 
 825   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
 826   assert(!UseLargePages
 827       || UseParallelGC
 828       || use_large_pages, "Wrong alignment to use large pages");
 829 
 830   // Now create the space.
 831   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages);
 832 
 833   if (total_rs.is_reserved()) {
 834     assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
 835            "must be exactly of required size and alignment");
 836     // We are good.
 837 
 838     if (UseCompressedOops) {
 839       // Universe::initialize_heap() will reset this to NULL if unscaled
 840       // or zero-based narrow oops are actually used.
 841       // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
 842       Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
 843     }
 844 
 845     return total_rs;




 535          sk != NULL;
 536          sk = sk->next_sibling()) {
 537       reinitialize_vtable_of(sk, CHECK);
 538     }
 539   }
 540 }
 541 
 542 
 543 void initialize_itable_for_klass(Klass* k, TRAPS) {
 544   InstanceKlass::cast(k)->itable().initialize_itable(false, CHECK);
 545 }
 546 
 547 
 548 void Universe::reinitialize_itables(TRAPS) {
 549   SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
 550 
 551 }
 552 
 553 
 554 bool Universe::on_page_boundary(void* addr) {
 555   return is_aligned(addr, os::vm_page_size());
 556 }
 557 
 558 
 559 bool Universe::should_fill_in_stack_trace(Handle throwable) {
 560   // never attempt to fill in the stack trace of preallocated errors that do not have
 561   // backtrace. These errors are kept alive forever and may be "re-used" when all
 562   // preallocated errors with backtrace have been consumed. Also need to avoid
 563   // a potential loop which could happen if an out of memory occurs when attempting
 564   // to allocate the backtrace.
 565   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
 566           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
 567           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
 568           (throwable() != Universe::_out_of_memory_error_array_size) &&
 569           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
 570           (throwable() != Universe::_out_of_memory_error_realloc_objects));
 571 }
 572 
 573 
 574 oop Universe::gen_out_of_memory_error(oop default_err) {
 575   // generate an out of memory error:


 801   if (Universe::narrow_oop_base() != 0) {
 802     st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));
 803   }
 804 
 805   if (Universe::narrow_oop_shift() != 0) {
 806     st->print(", Oop shift amount: %d", Universe::narrow_oop_shift());
 807   }
 808 
 809   if (!Universe::narrow_oop_use_implicit_null_checks()) {
 810     st->print(", no protected page in front of the heap");
 811   }
 812   st->cr();
 813 }
 814 
 815 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 816 
 817   assert(alignment <= Arguments::conservative_max_heap_alignment(),
 818          "actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
 819          alignment, Arguments::conservative_max_heap_alignment());
 820 
 821   size_t total_reserved = align_up(heap_size, alignment);
 822   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 823       "heap size is too big for compressed oops");
 824 
 825   bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
 826   assert(!UseLargePages
 827       || UseParallelGC
 828       || use_large_pages, "Wrong alignment to use large pages");
 829 
 830   // Now create the space.
 831   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages);
 832 
 833   if (total_rs.is_reserved()) {
 834     assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
 835            "must be exactly of required size and alignment");
 836     // We are good.
 837 
 838     if (UseCompressedOops) {
 839       // Universe::initialize_heap() will reset this to NULL if unscaled
 840       // or zero-based narrow oops are actually used.
 841       // Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
 842       Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base());
 843     }
 844 
 845     return total_rs;


< prev index next >