1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shared/allocTracer.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/thread.inline.hpp" 38 #include "services/lowMemoryDetector.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/copy.hpp" 41 42 // Inline allocation implementations. 43 44 void CollectedHeap::post_allocation_setup_common(Klass* klass, 45 HeapWord* obj_ptr) { 46 post_allocation_setup_no_klass_install(klass, obj_ptr); 47 oop obj = (oop)obj_ptr; 48 #if ! INCLUDE_ALL_GCS 49 obj->set_klass(klass); 50 #else 51 // Need a release store to ensure array/class length, mark word, and 52 // object zeroing are visible before setting the klass non-NULL, for 53 // concurrent collectors. 54 obj->release_set_klass(klass); 55 #endif 56 } 57 58 void CollectedHeap::post_allocation_setup_no_klass_install(Klass* klass, 59 HeapWord* obj_ptr) { 60 oop obj = (oop)obj_ptr; 61 62 assert(obj != NULL, "NULL object pointer"); 63 obj->set_mark(Klass::default_prototype_header(klass)); 64 } 65 66 // Support for jvmti and dtrace 67 inline void post_allocation_notify(Klass* klass, oop obj, int size) { 68 // support low memory notifications (no-op if not enabled) 69 LowMemoryDetector::detect_low_memory_for_collected_pools(); 70 71 // support for JVMTI VMObjectAlloc event (no-op if not enabled) 72 JvmtiExport::vm_object_alloc_event_collector(obj); 73 74 if (DTraceAllocProbes) { 75 // support for Dtrace object alloc event (no-op most of the time) 76 if (klass != NULL && klass->name() != NULL) { 77 SharedRuntime::dtrace_object_alloc(obj, size); 78 } 79 } 80 } 81 82 void CollectedHeap::post_allocation_setup_obj(Klass* klass, 83 HeapWord* obj_ptr, 84 int size) { 85 post_allocation_setup_common(klass, obj_ptr); 86 oop obj = (oop)obj_ptr; 87 assert(Universe::is_bootstrapping() || 88 !obj->is_array(), "must not be an array"); 89 // notify jvmti and dtrace 90 post_allocation_notify(klass, obj, size); 91 } 92 93 void CollectedHeap::post_allocation_setup_class(Klass* klass, 94 HeapWord* obj_ptr, 95 int size) { 96 // Set oop_size field before setting the _klass field because a 97 // non-NULL _klass field indicates that the object is parsable by 98 // concurrent GC. 99 oop new_cls = (oop)obj_ptr; 100 assert(size > 0, "oop_size must be positive."); 101 java_lang_Class::set_oop_size(new_cls, size); 102 post_allocation_setup_common(klass, obj_ptr); 103 assert(Universe::is_bootstrapping() || 104 !new_cls->is_array(), "must not be an array"); 105 // notify jvmti and dtrace 106 post_allocation_notify(klass, new_cls, size); 107 } 108 109 void CollectedHeap::post_allocation_setup_array(Klass* klass, 110 HeapWord* obj_ptr, 111 int length) { 112 // Set array length before setting the _klass field because a 113 // non-NULL klass field indicates that the object is parsable by 114 // concurrent GC. 115 assert(length >= 0, "length should be non-negative"); 116 ((arrayOop)obj_ptr)->set_length(length); 117 post_allocation_setup_common(klass, obj_ptr); 118 oop new_obj = (oop)obj_ptr; 119 assert(new_obj->is_array(), "must be an array"); 120 // notify jvmti and dtrace (must be after length is set for dtrace) 121 post_allocation_notify(klass, new_obj, new_obj->size()); 122 } 123 124 HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS) { 125 126 // Clear unhandled oops for memory allocation. Memory allocation might 127 // not take out a lock if from tlab, so clear here. 128 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 129 130 if (HAS_PENDING_EXCEPTION) { 131 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 132 return NULL; // caller does a CHECK_0 too 133 } 134 135 HeapWord* result = NULL; 136 if (UseTLAB) { 137 result = allocate_from_tlab(klass, THREAD, size); 138 if (result != NULL) { 139 assert(!HAS_PENDING_EXCEPTION, 140 "Unexpected exception, will result in uninitialized storage"); 141 return result; 142 } 143 } 144 bool gc_overhead_limit_was_exceeded = false; 145 result = Universe::heap()->mem_allocate(size, 146 &gc_overhead_limit_was_exceeded); 147 if (result != NULL) { 148 NOT_PRODUCT(Universe::heap()-> 149 check_for_non_bad_heap_word_value(result, size)); 150 assert(!HAS_PENDING_EXCEPTION, 151 "Unexpected exception, will result in uninitialized storage"); 152 THREAD->incr_allocated_bytes(size * HeapWordSize); 153 154 AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); 155 156 return result; 157 } 158 159 160 if (!gc_overhead_limit_was_exceeded) { 161 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 162 report_java_out_of_memory("Java heap space"); 163 164 if (JvmtiExport::should_post_resource_exhausted()) { 165 JvmtiExport::post_resource_exhausted( 166 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 167 "Java heap space"); 168 } 169 170 THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 171 } else { 172 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 173 report_java_out_of_memory("GC overhead limit exceeded"); 174 175 if (JvmtiExport::should_post_resource_exhausted()) { 176 JvmtiExport::post_resource_exhausted( 177 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 178 "GC overhead limit exceeded"); 179 } 180 181 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 182 } 183 } 184 185 HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRAPS) { 186 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 187 init_obj(obj, size); 188 return obj; 189 } 190 191 HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, Thread* thread, size_t size) { 192 assert(UseTLAB, "should use UseTLAB"); 193 194 HeapWord* obj = thread->tlab().allocate(size); 195 if (obj != NULL) { 196 return obj; 197 } 198 // Otherwise... 199 return allocate_from_tlab_slow(klass, thread, size); 200 } 201 202 void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 203 assert(obj != NULL, "cannot initialize NULL object"); 204 const size_t hs = oopDesc::header_size(); 205 assert(size >= hs, "unexpected object size"); 206 ((oop)obj)->set_klass_gap(0); 207 Copy::fill_to_aligned_words(obj + hs, size - hs); 208 } 209 210 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) { 211 debug_only(check_for_valid_allocation_state()); 212 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 213 assert(size >= 0, "int won't convert to size_t"); 214 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 215 post_allocation_setup_obj(klass, obj, size); 216 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 217 return (oop)obj; 218 } 219 220 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) { 221 debug_only(check_for_valid_allocation_state()); 222 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 223 assert(size >= 0, "int won't convert to size_t"); 224 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 225 post_allocation_setup_class(klass, obj, size); // set oop_size 226 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 227 return (oop)obj; 228 } 229 230 oop CollectedHeap::array_allocate(Klass* klass, 231 int size, 232 int length, 233 TRAPS) { 234 debug_only(check_for_valid_allocation_state()); 235 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 236 assert(size >= 0, "int won't convert to size_t"); 237 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 238 post_allocation_setup_array(klass, obj, length); 239 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 240 return (oop)obj; 241 } 242 243 oop CollectedHeap::array_allocate_nozero(Klass* klass, 244 int size, 245 int length, 246 TRAPS) { 247 debug_only(check_for_valid_allocation_state()); 248 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 249 assert(size >= 0, "int won't convert to size_t"); 250 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 251 ((oop)obj)->set_klass_gap(0); 252 post_allocation_setup_array(klass, obj, length); 253 #ifndef PRODUCT 254 const size_t hs = oopDesc::header_size()+1; 255 Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs); 256 #endif 257 return (oop)obj; 258 } 259 260 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr, 261 HeapWord* end, 262 unsigned short alignment_in_bytes) { 263 if (alignment_in_bytes <= ObjectAlignmentInBytes) { 264 return addr; 265 } 266 267 assert(is_aligned(addr, HeapWordSize), 268 "Address " PTR_FORMAT " is not properly aligned.", p2i(addr)); 269 assert(is_aligned(alignment_in_bytes, HeapWordSize), 270 "Alignment size %u is incorrect.", alignment_in_bytes); 271 272 HeapWord* new_addr = align_up(addr, alignment_in_bytes); 273 size_t padding = pointer_delta(new_addr, addr); 274 275 if (padding == 0) { 276 return addr; 277 } 278 279 if (padding < CollectedHeap::min_fill_size()) { 280 padding += alignment_in_bytes / HeapWordSize; 281 assert(padding >= CollectedHeap::min_fill_size(), 282 "alignment_in_bytes %u is expect to be larger " 283 "than the minimum object size", alignment_in_bytes); 284 new_addr = addr + padding; 285 } 286 287 assert(new_addr > addr, "Unexpected arithmetic overflow " 288 PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)); 289 if(new_addr < end) { 290 CollectedHeap::fill_with_object(addr, padding); 291 return new_addr; 292 } else { 293 return NULL; 294 } 295 } 296 297 #ifndef PRODUCT 298 299 inline bool 300 CollectedHeap::promotion_should_fail(volatile size_t* count) { 301 // Access to count is not atomic; the value does not have to be exact. 302 if (PromotionFailureALot) { 303 const size_t gc_num = total_collections(); 304 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 305 if (elapsed_gcs >= PromotionFailureALotInterval) { 306 // Test for unsigned arithmetic wrap-around. 307 if (++*count >= PromotionFailureALotCount) { 308 *count = 0; 309 return true; 310 } 311 } 312 } 313 return false; 314 } 315 316 inline bool CollectedHeap::promotion_should_fail() { 317 return promotion_should_fail(&_promotion_failure_alot_count); 318 } 319 320 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 321 if (PromotionFailureALot) { 322 _promotion_failure_alot_gc_number = total_collections(); 323 *count = 0; 324 } 325 } 326 327 inline void CollectedHeap::reset_promotion_should_fail() { 328 reset_promotion_should_fail(&_promotion_failure_alot_count); 329 } 330 #endif // #ifndef PRODUCT 331 332 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP