1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
 26 #define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
 27 
 28 #include "classfile/javaClasses.hpp"
 29 #include "gc/shared/allocTracer.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/arrayOop.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/jvmtiExport.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/thread.inline.hpp"
 38 #include "services/lowMemoryDetector.hpp"
 39 #include "utilities/align.hpp"
 40 #include "utilities/copy.hpp"
 41 
 42 // Inline allocation implementations.
 43 
 44 void CollectedHeap::post_allocation_setup_common(Klass* klass,
 45                                                  HeapWord* obj_ptr) {
 46   post_allocation_setup_no_klass_install(klass, obj_ptr);
 47   oop obj = (oop)obj_ptr;
 48 #if (INCLUDE_G1GC || INCLUDE_CMSGC)
 49   // Need a release store to ensure array/class length, mark word, and
 50   // object zeroing are visible before setting the klass non-NULL, for
 51   // concurrent collectors.
 52   obj->release_set_klass(klass);
 53 #else
 54   obj->set_klass(klass);
 55 #endif
 56 }
 57 
 58 void CollectedHeap::post_allocation_setup_no_klass_install(Klass* klass,
 59                                                            HeapWord* obj_ptr) {
 60   oop obj = (oop)obj_ptr;
 61 
 62   assert(obj != NULL, "NULL object pointer");
 63   if (UseBiasedLocking && (klass != NULL)) {
 64     obj->set_mark_raw(klass->prototype_header());
 65   } else {
 66     // May be bootstrapping
 67     obj->set_mark_raw(markOopDesc::prototype());
 68   }
 69 }
 70 
 71 // Support for jvmti and dtrace
 72 inline void post_allocation_notify(Klass* klass, oop obj, int size) {
 73   // support low memory notifications (no-op if not enabled)
 74   LowMemoryDetector::detect_low_memory_for_collected_pools();
 75 
 76   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
 77   JvmtiExport::vm_object_alloc_event_collector(obj);
 78 
 79   if (DTraceAllocProbes) {
 80     // support for Dtrace object alloc event (no-op most of the time)
 81     if (klass != NULL && klass->name() != NULL) {
 82       SharedRuntime::dtrace_object_alloc(obj, size);
 83     }
 84   }
 85 }
 86 
 87 void CollectedHeap::post_allocation_setup_obj(Klass* klass,
 88                                               HeapWord* obj_ptr,
 89                                               int size) {
 90   post_allocation_setup_common(klass, obj_ptr);
 91   oop obj = (oop)obj_ptr;
 92   assert(Universe::is_bootstrapping() ||
 93          !obj->is_array(), "must not be an array");
 94   // notify jvmti and dtrace
 95   post_allocation_notify(klass, obj, size);
 96 }
 97 
 98 void CollectedHeap::post_allocation_setup_class(Klass* klass,
 99                                                 HeapWord* obj_ptr,
100                                                 int size) {
101   // Set oop_size field before setting the _klass field because a
102   // non-NULL _klass field indicates that the object is parsable by
103   // concurrent GC.
104   oop new_cls = (oop)obj_ptr;
105   assert(size > 0, "oop_size must be positive.");
106   java_lang_Class::set_oop_size(new_cls, size);
107   post_allocation_setup_common(klass, obj_ptr);
108   assert(Universe::is_bootstrapping() ||
109          !new_cls->is_array(), "must not be an array");
110   // notify jvmti and dtrace
111   post_allocation_notify(klass, new_cls, size);
112 }
113 
114 void CollectedHeap::post_allocation_setup_array(Klass* klass,
115                                                 HeapWord* obj_ptr,
116                                                 int length) {
117   // Set array length before setting the _klass field because a
118   // non-NULL klass field indicates that the object is parsable by
119   // concurrent GC.
120   assert(length >= 0, "length should be non-negative");
121   ((arrayOop)obj_ptr)->set_length(length);
122   post_allocation_setup_common(klass, obj_ptr);
123   oop new_obj = (oop)obj_ptr;
124   assert(new_obj->is_array(), "must be an array");
125   // notify jvmti and dtrace (must be after length is set for dtrace)
126   post_allocation_notify(klass, new_obj, new_obj->size());
127 }
128 
129 HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS) {
130 
131   // Clear unhandled oops for memory allocation.  Memory allocation might
132   // not take out a lock if from tlab, so clear here.
133   CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
134 
135   if (HAS_PENDING_EXCEPTION) {
136     NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
137     return NULL;  // caller does a CHECK_0 too
138   }
139 
140   bool gc_overhead_limit_was_exceeded = false;
141   CollectedHeap* heap = Universe::heap();
142   HeapWord* result = heap->obj_allocate_raw(klass, size, &gc_overhead_limit_was_exceeded, THREAD);
143 
144   if (result != NULL) {
145     NOT_PRODUCT(Universe::heap()->
146       check_for_non_bad_heap_word_value(result, size));
147     assert(!HAS_PENDING_EXCEPTION,
148            "Unexpected exception, will result in uninitialized storage");
149     THREAD->incr_allocated_bytes(size * HeapWordSize);
150 
151     AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD);
152 
153     return result;
154   }
155 
156   if (!gc_overhead_limit_was_exceeded) {
157     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
158     report_java_out_of_memory("Java heap space");
159 
160     if (JvmtiExport::should_post_resource_exhausted()) {
161       JvmtiExport::post_resource_exhausted(
162         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
163         "Java heap space");
164     }
165 
166     THROW_OOP_0(Universe::out_of_memory_error_java_heap());
167   } else {
168     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
169     report_java_out_of_memory("GC overhead limit exceeded");
170 
171     if (JvmtiExport::should_post_resource_exhausted()) {
172       JvmtiExport::post_resource_exhausted(
173         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
174         "GC overhead limit exceeded");
175     }
176 
177     THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
178   }
179 }
180 
181 HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRAPS) {
182   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
183   init_obj(obj, size);
184   return obj;
185 }
186 
187 HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, size_t size, TRAPS) {
188   assert(UseTLAB, "should use UseTLAB");
189 
190   HeapWord* obj = THREAD->tlab().allocate(size);
191   if (obj != NULL) {
192     return obj;
193   }
194   // Otherwise...
195   obj = allocate_from_tlab_slow(klass, size, THREAD);
196   assert(obj == NULL || !HAS_PENDING_EXCEPTION,
197          "Unexpected exception, will result in uninitialized storage");
198   return obj;
199 }
200 
201 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
202   assert(obj != NULL, "cannot initialize NULL object");
203   const size_t hs = oopDesc::header_size();
204   assert(size >= hs, "unexpected object size");
205   ((oop)obj)->set_klass_gap(0);
206   Copy::fill_to_aligned_words(obj + hs, size - hs);
207 }
208 
209 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
210   debug_only(check_for_valid_allocation_state());
211   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
212   assert(size >= 0, "int won't convert to size_t");
213   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
214   post_allocation_setup_obj(klass, obj, size);
215   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
216   return (oop)obj;
217 }
218 
219 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
220   debug_only(check_for_valid_allocation_state());
221   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
222   assert(size >= 0, "int won't convert to size_t");
223   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
224   post_allocation_setup_class(klass, obj, size); // set oop_size
225   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
226   return (oop)obj;
227 }
228 
229 oop CollectedHeap::array_allocate(Klass* klass,
230                                   int size,
231                                   int length,
232                                   TRAPS) {
233   debug_only(check_for_valid_allocation_state());
234   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
235   assert(size >= 0, "int won't convert to size_t");
236   HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
237   post_allocation_setup_array(klass, obj, length);
238   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
239   return (oop)obj;
240 }
241 
242 oop CollectedHeap::array_allocate_nozero(Klass* klass,
243                                          int size,
244                                          int length,
245                                          TRAPS) {
246   debug_only(check_for_valid_allocation_state());
247   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
248   assert(size >= 0, "int won't convert to size_t");
249   HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
250   ((oop)obj)->set_klass_gap(0);
251   post_allocation_setup_array(klass, obj, length);
252 #ifndef PRODUCT
253   const size_t hs = oopDesc::header_size()+1;
254   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
255 #endif
256   return (oop)obj;
257 }
258 
259 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
260                                                          HeapWord* end,
261                                                          unsigned short alignment_in_bytes) {
262   if (alignment_in_bytes <= ObjectAlignmentInBytes) {
263     return addr;
264   }
265 
266   assert(is_aligned(addr, HeapWordSize),
267          "Address " PTR_FORMAT " is not properly aligned.", p2i(addr));
268   assert(is_aligned(alignment_in_bytes, HeapWordSize),
269          "Alignment size %u is incorrect.", alignment_in_bytes);
270 
271   HeapWord* new_addr = align_up(addr, alignment_in_bytes);
272   size_t padding = pointer_delta(new_addr, addr);
273 
274   if (padding == 0) {
275     return addr;
276   }
277 
278   if (padding < CollectedHeap::min_fill_size()) {
279     padding += alignment_in_bytes / HeapWordSize;
280     assert(padding >= CollectedHeap::min_fill_size(),
281            "alignment_in_bytes %u is expect to be larger "
282            "than the minimum object size", alignment_in_bytes);
283     new_addr = addr + padding;
284   }
285 
286   assert(new_addr > addr, "Unexpected arithmetic overflow "
287          PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr));
288   if(new_addr < end) {
289     CollectedHeap::fill_with_object(addr, padding);
290     return new_addr;
291   } else {
292     return NULL;
293   }
294 }
295 
296 #endif // SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP