1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)collectedHeap.inline.hpp     1.50 07/09/07 10:56:50 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 // Inline allocation implementations.
  29 
  30 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
  31                                                  HeapWord* obj,
  32                                                  size_t size) {
  33   post_allocation_setup_no_klass_install(klass, obj, size);
  34   post_allocation_install_obj_klass(klass, oop(obj), (int) size);
  35 }
  36 
  37 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
  38                                                            HeapWord* objPtr,
  39                                                            size_t size) {
  40   oop obj = (oop)objPtr;
  41 
  42   assert(obj != NULL, "NULL object pointer");
  43   if (UseBiasedLocking && (klass() != NULL)) {
  44     obj->set_mark(klass->prototype_header());
  45   } else {
  46     // May be bootstrapping
  47     obj->set_mark(markOopDesc::prototype());
  48   }
  49 }
  50 
  51 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
  52                                                    oop obj,
  53                                                    int size) {
  54   // These asserts are kind of complicated because of klassKlass
  55   // and the beginning of the world.
  56   assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
  57   assert(klass() == NULL || klass()->is_klass(), "not a klass");
  58   assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass");
  59   assert(obj != NULL, "NULL object pointer");
  60   obj->set_klass(klass());
  61   assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
  62          "missing blueprint");
  63 }
  64 
  65 // Support for jvmti and dtrace
  66 inline void post_allocation_notify(KlassHandle klass, oop obj) {
  67   // support low memory notifications (no-op if not enabled)
  68   LowMemoryDetector::detect_low_memory_for_collected_pools();
  69 
  70   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
  71   JvmtiExport::vm_object_alloc_event_collector(obj);
  72 
  73   if (DTraceAllocProbes) {
  74     // support for Dtrace object alloc event (no-op most of the time)
  75     if (klass() != NULL && klass()->klass_part()->name() != NULL) {
  76       SharedRuntime::dtrace_object_alloc(obj);
  77     }
  78   }
  79 }
  80 
  81 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
  82                                               HeapWord* obj,
  83                                               size_t size) {
  84   post_allocation_setup_common(klass, obj, size);
  85   assert(Universe::is_bootstrapping() ||
  86          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
  87   // notify jvmti and dtrace
  88   post_allocation_notify(klass, (oop)obj);
  89 }
  90 
  91 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
  92                                                 HeapWord* obj,
  93                                                 size_t size,
  94                                                 int length) {
  95   // Set array length before setting the _klass field
  96   // in post_allocation_setup_common() because the klass field
  97   // indicates that the object is parsable by concurrent GC.
  98   assert(length >= 0, "length should be non-negative");
  99   ((arrayOop)obj)->set_length(length);
 100   post_allocation_setup_common(klass, obj, size);
 101   assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
 102   // notify jvmti and dtrace (must be after length is set for dtrace)
 103   post_allocation_notify(klass, (oop)obj);
 104 }
 105 
 106 HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
 107 
 108   // Clear unhandled oops for memory allocation.  Memory allocation might
 109   // not take out a lock if from tlab, so clear here.
 110   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
 111   
 112   if (HAS_PENDING_EXCEPTION) {
 113     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
 114     return NULL;  // caller does a CHECK_0 too
 115   }
 116 
 117   // We may want to update this, is_noref objects might not be allocated in TLABs.
 118   HeapWord* result = NULL;
 119   if (UseTLAB) {
 120     result = CollectedHeap::allocate_from_tlab(THREAD, size);
 121     if (result != NULL) {
 122       assert(!HAS_PENDING_EXCEPTION,
 123              "Unexpected exception, will result in uninitialized storage");
 124       return result;
 125     }
 126   }
 127   bool gc_overhead_limit_was_exceeded = false;
 128   result = Universe::heap()->mem_allocate(size,
 129                                           is_noref,
 130                                           false,
 131                                           &gc_overhead_limit_was_exceeded);
 132   if (result != NULL) {
 133     NOT_PRODUCT(Universe::heap()->
 134       check_for_non_bad_heap_word_value(result, size));
 135     assert(!HAS_PENDING_EXCEPTION,
 136            "Unexpected exception, will result in uninitialized storage");
 137     return result;
 138   }
 139 
 140 
 141   if (!gc_overhead_limit_was_exceeded) {
 142     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 143     report_java_out_of_memory("Java heap space");
 144 
 145     if (JvmtiExport::should_post_resource_exhausted()) {
 146       JvmtiExport::post_resource_exhausted(
 147         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 
 148         "Java heap space");
 149     }
 150 
 151     THROW_OOP_0(Universe::out_of_memory_error_java_heap());
 152   } else {
 153     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 154     report_java_out_of_memory("GC overhead limit exceeded");
 155 
 156     if (JvmtiExport::should_post_resource_exhausted()) {
 157       JvmtiExport::post_resource_exhausted(
 158         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
 159         "GC overhead limit exceeded");
 160     }
 161 
 162     THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
 163   }
 164 }
 165 
 166 HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
 167   HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
 168   init_obj(obj, size);
 169   return obj;
 170 }
 171 
 172 // Need to investigate, do we really want to throw OOM exception here?
 173 HeapWord* CollectedHeap::common_permanent_mem_allocate_noinit(size_t size, TRAPS) {
 174   if (HAS_PENDING_EXCEPTION) {
 175     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
 176     return NULL;  // caller does a CHECK_NULL too
 177   }
 178 
 179 #ifdef ASSERT
 180   if (CIFireOOMAt > 0 && THREAD->is_Compiler_thread() &&
 181       ++_fire_out_of_memory_count >= CIFireOOMAt) {
 182     // For testing of OOM handling in the CI throw an OOM and see how
 183     // it does.  Historically improper handling of these has resulted
 184     // in crashes which we really don't want to have in the CI.
 185     THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
 186   }
 187 #endif
 188 
 189   HeapWord* result = Universe::heap()->permanent_mem_allocate(size);
 190   if (result != NULL) {
 191     NOT_PRODUCT(Universe::heap()->
 192       check_for_non_bad_heap_word_value(result, size));
 193     assert(!HAS_PENDING_EXCEPTION,
 194            "Unexpected exception, will result in uninitialized storage");
 195     return result;
 196   }
 197   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 198   report_java_out_of_memory("PermGen space");
 199 
 200   if (JvmtiExport::should_post_resource_exhausted()) {
 201     JvmtiExport::post_resource_exhausted(
 202         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 
 203         "PermGen space");
 204   }
 205 
 206   THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
 207 }
 208 
 209 HeapWord* CollectedHeap::common_permanent_mem_allocate_init(size_t size, TRAPS) {
 210   HeapWord* obj = common_permanent_mem_allocate_noinit(size, CHECK_NULL);
 211   init_obj(obj, size);
 212   return obj;
 213 }
 214 
 215 HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
 216   assert(UseTLAB, "should use UseTLAB");
 217 
 218   HeapWord* obj = thread->tlab().allocate(size);
 219   if (obj != NULL) {
 220     return obj;
 221   }
 222   // Otherwise...
 223   return allocate_from_tlab_slow(thread, size);
 224 }
 225 
 226 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
 227   assert(obj != NULL, "cannot initialize NULL object");
 228   const size_t hs = oopDesc::header_size();
 229   assert(size >= hs, "unexpected object size");
 230   ((oop)obj)->set_klass_gap(0);
 231   Copy::fill_to_aligned_words(obj + hs, size - hs);
 232 }
 233 
 234 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
 235   debug_only(check_for_valid_allocation_state());
 236   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 237   assert(size >= 0, "int won't convert to size_t");
 238   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
 239   post_allocation_setup_obj(klass, obj, size);
 240   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
 241   return (oop)obj;  
 242 }
 243 
 244 oop CollectedHeap::array_allocate(KlassHandle klass,
 245                                   int size,
 246                                   int length,
 247                                   TRAPS) {
 248   debug_only(check_for_valid_allocation_state());
 249   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 250   assert(size >= 0, "int won't convert to size_t");
 251   HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
 252   post_allocation_setup_array(klass, obj, size, length);
 253   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
 254   return (oop)obj;  
 255 }
 256 
 257 oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
 258                                             int size,
 259                                             int length,
 260                                             TRAPS) {
 261   debug_only(check_for_valid_allocation_state());
 262   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 263   assert(size >= 0, "int won't convert to size_t");
 264   HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
 265   post_allocation_setup_array(klass, obj, size, length);
 266   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
 267   return (oop)obj;  
 268 }
 269 
 270 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
 271   oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
 272   post_allocation_install_obj_klass(klass, obj, size);
 273   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, 
 274                                                               size));
 275   return obj;  
 276 }
 277 
 278 oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
 279                                                            int size, 
 280                                                            TRAPS) {
 281   debug_only(check_for_valid_allocation_state());
 282   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 283   assert(size >= 0, "int won't convert to size_t");
 284   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
 285   post_allocation_setup_no_klass_install(klass, obj, size);
 286   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
 287   return (oop)obj;  
 288 }
 289 
 290 oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
 291                                             int size,
 292                                             int length,
 293                                             TRAPS) {
 294   debug_only(check_for_valid_allocation_state());
 295   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
 296   assert(size >= 0, "int won't convert to size_t");
 297   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
 298   post_allocation_setup_array(klass, obj, size, length);
 299   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
 300   return (oop)obj;  
 301 }
 302 
 303 // Returns "TRUE" if "p" is a method oop in the
 304 // current heap with high probability. NOTE: The main
 305 // current consumers of this interface are Forte::
 306 // and ThreadProfiler::. In these cases, the
 307 // interpreter frame from which "p" came, may be
 308 // under construction when sampled asynchronously, so
 309 // the clients want to check that it represents a
 310 // valid method before using it. Nonetheless since
 311 // the clients do not typically lock out GC, the
 312 // predicate is_valid_method() is not stable, so
 313 // it is possible that by the time "p" is used, it
 314 // is no longer valid.
 315 inline bool CollectedHeap::is_valid_method(oop p) const {
 316   return
 317     p != NULL &&
 318 
 319     // Check whether it is aligned at a HeapWord boundary.
 320     Space::is_aligned(p) &&
 321 
 322     // Check whether "method" is in the allocated part of the
 323     // permanent generation -- this needs to be checked before
 324     // p->klass() below to avoid a SEGV (but see below
 325     // for a potential window of vulnerability).
 326     is_permanent((void*)p) &&
 327 
 328     // See if GC is active; however, there is still an
 329     // apparently unavoidable window after this call
 330     // and before the client of this interface uses "p".
 331     // If the client chooses not to lock out GC, then
 332     // it's a risk the client must accept.
 333     !is_gc_active() &&
 334 
 335     // Check that p is a methodOop.
 336     p->klass() == Universe::methodKlassObj();
 337 }
 338 
 339 
 340 #ifndef PRODUCT
 341 
 342 inline bool
 343 CollectedHeap::promotion_should_fail(volatile size_t* count) {
 344   // Access to count is not atomic; the value does not have to be exact.
 345   if (PromotionFailureALot) {
 346     const size_t gc_num = total_collections();
 347     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
 348     if (elapsed_gcs >= PromotionFailureALotInterval) {
 349       // Test for unsigned arithmetic wrap-around.
 350       if (++*count >= PromotionFailureALotCount) {
 351         *count = 0;
 352         return true;
 353       }
 354     }
 355   }
 356   return false;
 357 }
 358 
 359 inline bool CollectedHeap::promotion_should_fail() {
 360   return promotion_should_fail(&_promotion_failure_alot_count);
 361 }
 362 
 363 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
 364   if (PromotionFailureALot) {
 365     _promotion_failure_alot_gc_number = total_collections();
 366     *count = 0;
 367   }
 368 }
 369 
 370 inline void CollectedHeap::reset_promotion_should_fail() {
 371   reset_promotion_should_fail(&_promotion_failure_alot_count);
 372 }
 373 #endif  // #ifndef PRODUCT