1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/allocTracer.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/thread.inline.hpp" 38 #include "services/lowMemoryDetector.hpp" 39 #include "utilities/copy.hpp" 40 41 // Inline allocation implementations. 42 43 void CollectedHeap::post_allocation_setup_common(KlassHandle klass, 44 HeapWord* obj) { 45 post_allocation_setup_no_klass_install(klass, obj); 46 post_allocation_install_obj_klass(klass, oop(obj)); 47 } 48 49 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, 50 HeapWord* objPtr) { 51 oop obj = (oop)objPtr; 52 53 assert(obj != NULL, "NULL object pointer"); 54 if (UseBiasedLocking && (klass() != NULL)) { 55 obj->set_mark(klass->prototype_header()); 56 } else { 57 // May be bootstrapping 58 obj->set_mark(markOopDesc::prototype()); 59 } 60 } 61 62 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, 63 oop obj) { 64 // These asserts are kind of complicated because of klassKlass 65 // and the beginning of the world. 66 assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); 67 assert(klass() == NULL || klass()->is_klass(), "not a klass"); 68 assert(obj != NULL, "NULL object pointer"); 69 obj->set_klass(klass()); 70 assert(!Universe::is_fully_initialized() || obj->klass() != NULL, 71 "missing klass"); 72 } 73 74 // Support for jvmti and dtrace 75 inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { 76 // support low memory notifications (no-op if not enabled) 77 LowMemoryDetector::detect_low_memory_for_collected_pools(); 78 79 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 80 JvmtiExport::vm_object_alloc_event_collector(obj); 81 82 if (DTraceAllocProbes) { 83 // support for Dtrace object alloc event (no-op most of the time) 84 if (klass() != NULL && klass()->name() != NULL) { 85 SharedRuntime::dtrace_object_alloc(obj, size); 86 } 87 } 88 } 89 90 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, 91 HeapWord* obj, 92 int size) { 93 post_allocation_setup_common(klass, obj); 94 assert(Universe::is_bootstrapping() || 95 !((oop)obj)->is_array(), "must not be an array"); 96 // notify jvmti and dtrace 97 post_allocation_notify(klass, (oop)obj, size); 98 } 99 100 void CollectedHeap::post_allocation_setup_class(KlassHandle klass, 101 HeapWord* obj, 102 int size) { 103 // Set oop_size field before setting the _klass field 104 // in post_allocation_setup_common() because the klass field 105 // indicates that the object is parsable by concurrent GC. 106 oop new_cls = (oop)obj; 107 assert(size > 0, "oop_size must be positive."); 108 java_lang_Class::set_oop_size(new_cls, size); 109 post_allocation_setup_common(klass, obj); 110 assert(Universe::is_bootstrapping() || 111 !new_cls->is_array(), "must not be an array"); 112 // notify jvmti and dtrace 113 post_allocation_notify(klass, new_cls, size); 114 } 115 116 void CollectedHeap::post_allocation_setup_array(KlassHandle klass, 117 HeapWord* obj, 118 int length) { 119 // Set array length before setting the _klass field 120 // in post_allocation_setup_common() because the klass field 121 // indicates that the object is parsable by concurrent GC. 122 assert(length >= 0, "length should be non-negative"); 123 ((arrayOop)obj)->set_length(length); 124 post_allocation_setup_common(klass, obj); 125 oop new_obj = (oop)obj; 126 assert(new_obj->is_array(), "must be an array"); 127 // notify jvmti and dtrace (must be after length is set for dtrace) 128 post_allocation_notify(klass, new_obj, new_obj->size()); 129 } 130 131 HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) { 132 133 // Clear unhandled oops for memory allocation. Memory allocation might 134 // not take out a lock if from tlab, so clear here. 135 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 136 137 if (HAS_PENDING_EXCEPTION) { 138 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 139 return NULL; // caller does a CHECK_0 too 140 } 141 142 HeapWord* result = NULL; 143 if (UseTLAB) { 144 result = allocate_from_tlab(klass, THREAD, size); 145 if (result != NULL) { 146 assert(!HAS_PENDING_EXCEPTION, 147 "Unexpected exception, will result in uninitialized storage"); 148 return result; 149 } 150 } 151 bool gc_overhead_limit_was_exceeded = false; 152 result = Universe::heap()->mem_allocate(size, 153 &gc_overhead_limit_was_exceeded); 154 if (result != NULL) { 155 NOT_PRODUCT(Universe::heap()-> 156 check_for_non_bad_heap_word_value(result, size)); 157 assert(!HAS_PENDING_EXCEPTION, 158 "Unexpected exception, will result in uninitialized storage"); 159 THREAD->incr_allocated_bytes(size * HeapWordSize); 160 161 AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); 162 163 return result; 164 } 165 166 167 if (!gc_overhead_limit_was_exceeded) { 168 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 169 report_java_out_of_memory("Java heap space"); 170 171 if (JvmtiExport::should_post_resource_exhausted()) { 172 JvmtiExport::post_resource_exhausted( 173 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 174 "Java heap space"); 175 } 176 177 THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 178 } else { 179 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 180 report_java_out_of_memory("GC overhead limit exceeded"); 181 182 if (JvmtiExport::should_post_resource_exhausted()) { 183 JvmtiExport::post_resource_exhausted( 184 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 185 "GC overhead limit exceeded"); 186 } 187 188 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 189 } 190 } 191 192 HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) { 193 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 194 init_obj(obj, size); 195 return obj; 196 } 197 198 HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { 199 assert(UseTLAB, "should use UseTLAB"); 200 201 HeapWord* obj = thread->tlab().allocate(size); 202 if (obj != NULL) { 203 return obj; 204 } 205 // Otherwise... 206 return allocate_from_tlab_slow(klass, thread, size); 207 } 208 209 void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 210 assert(obj != NULL, "cannot initialize NULL object"); 211 const size_t hs = oopDesc::header_size(); 212 assert(size >= hs, "unexpected object size"); 213 ((oop)obj)->set_klass_gap(0); 214 Copy::fill_to_aligned_words(obj + hs, size - hs); 215 } 216 217 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { 218 debug_only(check_for_valid_allocation_state()); 219 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 220 assert(size >= 0, "int won't convert to size_t"); 221 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 222 post_allocation_setup_obj(klass, obj, size); 223 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 224 return (oop)obj; 225 } 226 227 oop CollectedHeap::class_allocate(KlassHandle klass, int size, TRAPS) { 228 debug_only(check_for_valid_allocation_state()); 229 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 230 assert(size >= 0, "int won't convert to size_t"); 231 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 232 post_allocation_setup_class(klass, obj, size); // set oop_size 233 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 234 return (oop)obj; 235 } 236 237 oop CollectedHeap::array_allocate(KlassHandle klass, 238 int size, 239 int length, 240 TRAPS) { 241 debug_only(check_for_valid_allocation_state()); 242 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 243 assert(size >= 0, "int won't convert to size_t"); 244 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 245 post_allocation_setup_array(klass, obj, length); 246 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 247 return (oop)obj; 248 } 249 250 oop CollectedHeap::array_allocate_nozero(KlassHandle klass, 251 int size, 252 int length, 253 TRAPS) { 254 debug_only(check_for_valid_allocation_state()); 255 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 256 assert(size >= 0, "int won't convert to size_t"); 257 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 258 ((oop)obj)->set_klass_gap(0); 259 post_allocation_setup_array(klass, obj, length); 260 #ifndef PRODUCT 261 const size_t hs = oopDesc::header_size()+1; 262 Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs); 263 #endif 264 return (oop)obj; 265 } 266 267 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr, 268 HeapWord* end, 269 unsigned short alignment_in_bytes) { 270 if (alignment_in_bytes <= ObjectAlignmentInBytes) { 271 return addr; 272 } 273 274 assert(is_ptr_aligned(addr, HeapWordSize), 275 "Address " PTR_FORMAT " is not properly aligned.", p2i(addr)); 276 assert(is_size_aligned(alignment_in_bytes, HeapWordSize), 277 "Alignment size %u is incorrect.", alignment_in_bytes); 278 279 HeapWord* new_addr = (HeapWord*) align_ptr_up(addr, alignment_in_bytes); 280 size_t padding = pointer_delta(new_addr, addr); 281 282 if (padding == 0) { 283 return addr; 284 } 285 286 if (padding < CollectedHeap::min_fill_size()) { 287 padding += alignment_in_bytes / HeapWordSize; 288 assert(padding >= CollectedHeap::min_fill_size(), 289 "alignment_in_bytes %u is expect to be larger " 290 "than the minimum object size", alignment_in_bytes); 291 new_addr = addr + padding; 292 } 293 294 assert(new_addr > addr, "Unexpected arithmetic overflow " 295 PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)); 296 if(new_addr < end) { 297 CollectedHeap::fill_with_object(addr, padding); 298 return new_addr; 299 } else { 300 return NULL; 301 } 302 } 303 304 #ifndef PRODUCT 305 306 inline bool 307 CollectedHeap::promotion_should_fail(volatile size_t* count) { 308 // Access to count is not atomic; the value does not have to be exact. 309 if (PromotionFailureALot) { 310 const size_t gc_num = total_collections(); 311 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 312 if (elapsed_gcs >= PromotionFailureALotInterval) { 313 // Test for unsigned arithmetic wrap-around. 314 if (++*count >= PromotionFailureALotCount) { 315 *count = 0; 316 return true; 317 } 318 } 319 } 320 return false; 321 } 322 323 inline bool CollectedHeap::promotion_should_fail() { 324 return promotion_should_fail(&_promotion_failure_alot_count); 325 } 326 327 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 328 if (PromotionFailureALot) { 329 _promotion_failure_alot_gc_number = total_collections(); 330 *count = 0; 331 } 332 } 333 334 inline void CollectedHeap::reset_promotion_should_fail() { 335 reset_promotion_should_fail(&_promotion_failure_alot_count); 336 } 337 #endif // #ifndef PRODUCT 338 339 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP