1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/allocTracer.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "runtime/heapMonitoring.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/thread.inline.hpp" 39 #include "services/lowMemoryDetector.hpp" 40 #include "utilities/copy.hpp" 41 42 // Inline allocation implementations. 43 44 void CollectedHeap::post_allocation_setup_common(KlassHandle klass, 45 HeapWord* obj_ptr) { 46 post_allocation_setup_no_klass_install(klass, obj_ptr); 47 oop obj = (oop)obj_ptr; 48 #if ! INCLUDE_ALL_GCS 49 obj->set_klass(klass()); 50 #else 51 // Need a release store to ensure array/class length, mark word, and 52 // object zeroing are visible before setting the klass non-NULL, for 53 // concurrent collectors. 54 obj->release_set_klass(klass()); 55 #endif 56 } 57 58 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, 59 HeapWord* obj_ptr) { 60 oop obj = (oop)obj_ptr; 61 62 assert(obj != NULL, "NULL object pointer"); 63 if (UseBiasedLocking && (klass() != NULL)) { 64 obj->set_mark(klass->prototype_header()); 65 } else { 66 // May be bootstrapping 67 obj->set_mark(markOopDesc::prototype()); 68 } 69 } 70 71 // Support for jvmti and dtrace 72 inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { 73 // support low memory notifications (no-op if not enabled) 74 LowMemoryDetector::detect_low_memory_for_collected_pools(); 75 76 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 77 JvmtiExport::vm_object_alloc_event_collector(obj); 78 79 if (DTraceAllocProbes) { 80 // support for Dtrace object alloc event (no-op most of the time) 81 if (klass() != NULL && klass()->name() != NULL) { 82 SharedRuntime::dtrace_object_alloc(obj, size); 83 } 84 } 85 86 if (HeapMonitor) { 87 // support for object alloc event (no-op most of the time) 88 if (klass() != NULL && klass()->name() != NULL) { 89 Thread *base_thread = Thread::current(); 90 if (base_thread->is_Java_thread()) { 91 JavaThread *thread = (JavaThread *) base_thread; 92 size_t *bytes_until_sample = thread->bytes_until_sample(); 93 size_t size_in_bytes = ((size_t) size) << LogHeapWordSize; 94 assert(size > 0, "positive size"); 95 if (*bytes_until_sample < size_in_bytes) { 96 HeapMonitoring::object_alloc_do_sample(thread, obj, size_in_bytes); 97 } else { 98 *bytes_until_sample -= size_in_bytes; 99 } 100 } 101 } 102 } 103 } 104 105 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, 106 HeapWord* obj_ptr, 107 int size) { 108 post_allocation_setup_common(klass, obj_ptr); 109 oop obj = (oop)obj_ptr; 110 assert(Universe::is_bootstrapping() || 111 !obj->is_array(), "must not be an array"); 112 // notify jvmti and dtrace 113 post_allocation_notify(klass, obj, size); 114 } 115 116 void CollectedHeap::post_allocation_setup_class(KlassHandle klass, 117 HeapWord* obj_ptr, 118 int size) { 119 // Set oop_size field before setting the _klass field because a 120 // non-NULL _klass field indicates that the object is parsable by 121 // concurrent GC. 122 oop new_cls = (oop)obj_ptr; 123 assert(size > 0, "oop_size must be positive."); 124 java_lang_Class::set_oop_size(new_cls, size); 125 post_allocation_setup_common(klass, obj_ptr); 126 assert(Universe::is_bootstrapping() || 127 !new_cls->is_array(), "must not be an array"); 128 // notify jvmti and dtrace 129 post_allocation_notify(klass, new_cls, size); 130 } 131 132 void CollectedHeap::post_allocation_setup_array(KlassHandle klass, 133 HeapWord* obj_ptr, 134 int length) { 135 // Set array length before setting the _klass field because a 136 // non-NULL klass field indicates that the object is parsable by 137 // concurrent GC. 138 assert(length >= 0, "length should be non-negative"); 139 ((arrayOop)obj_ptr)->set_length(length); 140 post_allocation_setup_common(klass, obj_ptr); 141 oop new_obj = (oop)obj_ptr; 142 assert(new_obj->is_array(), "must be an array"); 143 // notify jvmti and dtrace (must be after length is set for dtrace) 144 post_allocation_notify(klass, new_obj, new_obj->size()); 145 } 146 147 HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) { 148 149 // Clear unhandled oops for memory allocation. Memory allocation might 150 // not take out a lock if from tlab, so clear here. 151 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 152 153 if (HAS_PENDING_EXCEPTION) { 154 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 155 return NULL; // caller does a CHECK_0 too 156 } 157 158 HeapWord* result = NULL; 159 if (UseTLAB) { 160 result = allocate_from_tlab(klass, THREAD, size); 161 if (result != NULL) { 162 assert(!HAS_PENDING_EXCEPTION, 163 "Unexpected exception, will result in uninitialized storage"); 164 return result; 165 } 166 } 167 bool gc_overhead_limit_was_exceeded = false; 168 result = Universe::heap()->mem_allocate(size, 169 &gc_overhead_limit_was_exceeded); 170 if (result != NULL) { 171 NOT_PRODUCT(Universe::heap()-> 172 check_for_non_bad_heap_word_value(result, size)); 173 assert(!HAS_PENDING_EXCEPTION, 174 "Unexpected exception, will result in uninitialized storage"); 175 THREAD->incr_allocated_bytes(size * HeapWordSize); 176 177 AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); 178 179 return result; 180 } 181 182 183 if (!gc_overhead_limit_was_exceeded) { 184 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 185 report_java_out_of_memory("Java heap space"); 186 187 if (JvmtiExport::should_post_resource_exhausted()) { 188 JvmtiExport::post_resource_exhausted( 189 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 190 "Java heap space"); 191 } 192 193 THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 194 } else { 195 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 196 report_java_out_of_memory("GC overhead limit exceeded"); 197 198 if (JvmtiExport::should_post_resource_exhausted()) { 199 JvmtiExport::post_resource_exhausted( 200 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 201 "GC overhead limit exceeded"); 202 } 203 204 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 205 } 206 } 207 208 HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) { 209 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 210 init_obj(obj, size); 211 return obj; 212 } 213 214 HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { 215 assert(UseTLAB, "should use UseTLAB"); 216 217 HeapWord* obj = thread->tlab().allocate(size); 218 if (obj != NULL) { 219 return obj; 220 } 221 // Otherwise... 222 return allocate_from_tlab_slow(klass, thread, size); 223 } 224 225 void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 226 assert(obj != NULL, "cannot initialize NULL object"); 227 const size_t hs = oopDesc::header_size(); 228 assert(size >= hs, "unexpected object size"); 229 ((oop)obj)->set_klass_gap(0); 230 Copy::fill_to_aligned_words(obj + hs, size - hs); 231 } 232 233 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { 234 debug_only(check_for_valid_allocation_state()); 235 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 236 assert(size >= 0, "int won't convert to size_t"); 237 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 238 post_allocation_setup_obj(klass, obj, size); 239 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 240 return (oop)obj; 241 } 242 243 oop CollectedHeap::class_allocate(KlassHandle klass, int size, TRAPS) { 244 debug_only(check_for_valid_allocation_state()); 245 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 246 assert(size >= 0, "int won't convert to size_t"); 247 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 248 post_allocation_setup_class(klass, obj, size); // set oop_size 249 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 250 return (oop)obj; 251 } 252 253 oop CollectedHeap::array_allocate(KlassHandle klass, 254 int size, 255 int length, 256 TRAPS) { 257 debug_only(check_for_valid_allocation_state()); 258 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 259 assert(size >= 0, "int won't convert to size_t"); 260 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 261 post_allocation_setup_array(klass, obj, length); 262 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 263 return (oop)obj; 264 } 265 266 oop CollectedHeap::array_allocate_nozero(KlassHandle klass, 267 int size, 268 int length, 269 TRAPS) { 270 debug_only(check_for_valid_allocation_state()); 271 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 272 assert(size >= 0, "int won't convert to size_t"); 273 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 274 ((oop)obj)->set_klass_gap(0); 275 post_allocation_setup_array(klass, obj, length); 276 #ifndef PRODUCT 277 const size_t hs = oopDesc::header_size()+1; 278 Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs); 279 #endif 280 return (oop)obj; 281 } 282 283 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr, 284 HeapWord* end, 285 unsigned short alignment_in_bytes) { 286 if (alignment_in_bytes <= ObjectAlignmentInBytes) { 287 return addr; 288 } 289 290 assert(is_ptr_aligned(addr, HeapWordSize), 291 "Address " PTR_FORMAT " is not properly aligned.", p2i(addr)); 292 assert(is_size_aligned(alignment_in_bytes, HeapWordSize), 293 "Alignment size %u is incorrect.", alignment_in_bytes); 294 295 HeapWord* new_addr = (HeapWord*) align_ptr_up(addr, alignment_in_bytes); 296 size_t padding = pointer_delta(new_addr, addr); 297 298 if (padding == 0) { 299 return addr; 300 } 301 302 if (padding < CollectedHeap::min_fill_size()) { 303 padding += alignment_in_bytes / HeapWordSize; 304 assert(padding >= CollectedHeap::min_fill_size(), 305 "alignment_in_bytes %u is expect to be larger " 306 "than the minimum object size", alignment_in_bytes); 307 new_addr = addr + padding; 308 } 309 310 assert(new_addr > addr, "Unexpected arithmetic overflow " 311 PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)); 312 if(new_addr < end) { 313 CollectedHeap::fill_with_object(addr, padding); 314 return new_addr; 315 } else { 316 return NULL; 317 } 318 } 319 320 #ifndef PRODUCT 321 322 inline bool 323 CollectedHeap::promotion_should_fail(volatile size_t* count) { 324 // Access to count is not atomic; the value does not have to be exact. 325 if (PromotionFailureALot) { 326 const size_t gc_num = total_collections(); 327 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 328 if (elapsed_gcs >= PromotionFailureALotInterval) { 329 // Test for unsigned arithmetic wrap-around. 330 if (++*count >= PromotionFailureALotCount) { 331 *count = 0; 332 return true; 333 } 334 } 335 } 336 return false; 337 } 338 339 inline bool CollectedHeap::promotion_should_fail() { 340 return promotion_should_fail(&_promotion_failure_alot_count); 341 } 342 343 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 344 if (PromotionFailureALot) { 345 _promotion_failure_alot_gc_number = total_collections(); 346 *count = 0; 347 } 348 } 349 350 inline void CollectedHeap::reset_promotion_should_fail() { 351 reset_promotion_should_fail(&_promotion_failure_alot_count); 352 } 353 #endif // #ifndef PRODUCT 354 355 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP