127
128 // Create a new tlab. All TLAB allocations must go through this.
129 // To allow more flexible TLAB allocations min_size specifies
130 // the minimum size needed, while requested_size is the requested
131 // size based on ergonomics. The actually allocated size will be
132 // returned in actual_size.
133 virtual HeapWord* allocate_new_tlab(size_t min_size,
134 size_t requested_size,
135 size_t* actual_size);
136
137 // Accumulate statistics on all tlabs.
138 virtual void accumulate_statistics_all_tlabs();
139
140 // Reinitialize tlabs before resuming mutators.
141 virtual void resize_all_tlabs();
142
143 // Allocate from the current thread's TLAB, with broken-out slow path.
144 inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
145 static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
146
147 // Allocate an uninitialized block of the given size, or returns NULL if
148 // this is impossible.
149 inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
150
151 // Like allocate_init, but the block returned by a successful allocation
152 // is guaranteed initialized to zeros.
153 inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
154
155 // Helper functions for (VM) allocation.
156 inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
157 inline static void post_allocation_setup_no_klass_install(Klass* klass,
158 HeapWord* objPtr);
159
160 inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
161
162 inline static void post_allocation_setup_array(Klass* klass,
163 HeapWord* obj, int length);
164
165 inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
166
291
292 bool is_in_closed_subset_or_null(const void* p) const {
293 return p == NULL || is_in_closed_subset(p);
294 }
295
296 void set_gc_cause(GCCause::Cause v) {
297 if (UsePerfData) {
298 _gc_lastcause = _gc_cause;
299 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
300 _perf_gc_cause->set_value(GCCause::to_string(v));
301 }
302 _gc_cause = v;
303 }
304 GCCause::Cause gc_cause() { return _gc_cause; }
305
306 // General obj/array allocation facilities.
307 inline static oop obj_allocate(Klass* klass, int size, TRAPS);
308 inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
309 inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
310 inline static oop class_allocate(Klass* klass, int size, TRAPS);
311
312 // Raw memory allocation facilities
313 // The obj and array allocate methods are covers for these methods.
314 // mem_allocate() should never be
315 // called to allocate TLABs, only individual objects.
316 virtual HeapWord* mem_allocate(size_t size,
317 bool* gc_overhead_limit_was_exceeded) = 0;
318
319 // Raw memory allocation. This may or may not use TLAB allocations to satisfy the
320 // allocation. A GC implementation may override this function to satisfy the allocation
321 // in any way. But the default is to try a TLAB allocation, and otherwise perform
322 // mem_allocate.
323 virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
324 bool* gc_overhead_limit_was_exceeded, TRAPS);
325
326 // Utilities for turning raw memory into filler objects.
327 //
328 // min_fill_size() is the smallest region that can be filled.
329 // fill_with_objects() can fill arbitrary-sized regions of the heap using
330 // multiple objects. fill_with_object() is for regions known to be smaller
331 // than the largest array of integers; it uses a single object to fill the
332 // region and has slightly less overhead.
333 static size_t min_fill_size() {
334 return size_t(align_object_size(oopDesc::header_size()));
335 }
336
337 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
|
127
128 // Create a new tlab. All TLAB allocations must go through this.
129 // To allow more flexible TLAB allocations min_size specifies
130 // the minimum size needed, while requested_size is the requested
131 // size based on ergonomics. The actually allocated size will be
132 // returned in actual_size.
133 virtual HeapWord* allocate_new_tlab(size_t min_size,
134 size_t requested_size,
135 size_t* actual_size);
136
137 // Accumulate statistics on all tlabs.
138 virtual void accumulate_statistics_all_tlabs();
139
140 // Reinitialize tlabs before resuming mutators.
141 virtual void resize_all_tlabs();
142
143 // Allocate from the current thread's TLAB, with broken-out slow path.
144 inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
145 static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
146
147 // Raw memory allocation facilities
148 // The obj and array allocate methods are covers for these methods.
149 // mem_allocate() should never be
150 // called to allocate TLABs, only individual objects.
151 virtual HeapWord* mem_allocate(size_t size,
152 bool* gc_overhead_limit_was_exceeded) = 0;
153
154 // Allocate an uninitialized block of the given size, or returns NULL if
155 // this is impossible.
156 inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
157
158 // Like allocate_init, but the block returned by a successful allocation
159 // is guaranteed initialized to zeros.
160 inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
161
162 // Helper functions for (VM) allocation.
163 inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
164 inline static void post_allocation_setup_no_klass_install(Klass* klass,
165 HeapWord* objPtr);
166
167 inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
168
169 inline static void post_allocation_setup_array(Klass* klass,
170 HeapWord* obj, int length);
171
172 inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
173
298
299 bool is_in_closed_subset_or_null(const void* p) const {
300 return p == NULL || is_in_closed_subset(p);
301 }
302
303 void set_gc_cause(GCCause::Cause v) {
304 if (UsePerfData) {
305 _gc_lastcause = _gc_cause;
306 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
307 _perf_gc_cause->set_value(GCCause::to_string(v));
308 }
309 _gc_cause = v;
310 }
311 GCCause::Cause gc_cause() { return _gc_cause; }
312
313 // General obj/array allocation facilities.
314 inline static oop obj_allocate(Klass* klass, int size, TRAPS);
315 inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
316 inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
317 inline static oop class_allocate(Klass* klass, int size, TRAPS);
318
319 // Raw memory allocation. This may or may not use TLAB allocations to satisfy the
320 // allocation. A GC implementation may override this function to satisfy the allocation
321 // in any way. But the default is to try a TLAB allocation, and otherwise perform
322 // mem_allocate.
323 virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
324 bool* gc_overhead_limit_was_exceeded, TRAPS);
325
326 // Utilities for turning raw memory into filler objects.
327 //
328 // min_fill_size() is the smallest region that can be filled.
329 // fill_with_objects() can fill arbitrary-sized regions of the heap using
330 // multiple objects. fill_with_object() is for regions known to be smaller
331 // than the largest array of integers; it uses a single object to fill the
332 // region and has slightly less overhead.
333 static size_t min_fill_size() {
334 return size_t(align_object_size(oopDesc::header_size()));
335 }
336
337 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
|