93 // ZCollectedHeap 94 // 95 class CollectedHeap : public CHeapObj<mtInternal> { 96 friend class VMStructs; 97 friend class JVMCIVMStructs; 98 friend class IsGCActiveMark; // Block structured external access to _is_gc_active 99 friend class MemAllocator; 100 101 private: 102 #ifdef ASSERT 103 static int _fire_out_of_memory_count; 104 #endif 105 106 GCHeapLog* _gc_heap_log; 107 108 MemRegion _reserved; 109 110 protected: 111 bool _is_gc_active; 112 113 // Used for filler objects (static, but initialized in ctor). 114 static size_t _filler_array_max_size; 115 116 unsigned int _total_collections; // ... started 117 unsigned int _total_full_collections; // ... started 118 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) 119 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) 120 121 // Reason for current garbage collection. Should be set to 122 // a value reflecting no collection between collections. 123 GCCause::Cause _gc_cause; 124 GCCause::Cause _gc_lastcause; 125 PerfStringVariable* _perf_gc_cause; 126 PerfStringVariable* _perf_gc_lastcause; 127 128 // Constructor 129 CollectedHeap(); 130 131 // Create a new tlab. All TLAB allocations must go through this. 132 // To allow more flexible TLAB allocations min_size specifies 133 // the minimum size needed, while requested_size is the requested 134 // size based on ergonomics. The actually allocated size will be 135 // returned in actual_size. 136 virtual HeapWord* allocate_new_tlab(size_t min_size, 137 size_t requested_size, 138 size_t* actual_size); 139 140 // Reinitialize tlabs before resuming mutators. 141 virtual void resize_all_tlabs(); 142 143 // Raw memory allocation facilities 144 // The obj and array allocate methods are covers for these methods. 145 // mem_allocate() should never be 146 // called to allocate TLABs, only individual objects. 147 virtual HeapWord* mem_allocate(size_t size, 148 bool* gc_overhead_limit_was_exceeded) = 0; 149 150 // Filler object utilities. 151 static inline size_t filler_array_hdr_size(); 152 static inline size_t filler_array_min_size(); 153 154 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) 155 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) 156 157 // Fill with a single array; caller must ensure filler_array_min_size() <= 158 // words <= filler_array_max_size(). 159 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); 160 161 // Fill with a single object (either an int array or a java.lang.Object). 162 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); 163 164 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); 165 166 // Verification functions 167 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) 168 PRODUCT_RETURN; 169 debug_only(static void check_for_valid_allocation_state();) 170 171 public: 172 enum Name { 173 None, 174 Serial, 175 Parallel, 176 CMS, 177 G1, 178 Epsilon, 179 Z 180 }; 181 182 static inline size_t filler_array_max_size() { 183 return _filler_array_max_size; 184 } 185 186 virtual Name kind() const = 0; 187 188 virtual const char* name() const = 0; 189 190 /** 191 * Returns JNI error code JNI_ENOMEM if memory could not be allocated, 192 * and JNI_OK on success. 193 */ 194 virtual jint initialize() = 0; 195 196 // In many heaps, there will be a need to perform some initialization activities 197 // after the Universe is fully formed, but before general heap allocation is allowed. 198 // This is the correct place to place such initialization methods. 199 virtual void post_initialize(); 200 201 // Stop any onging concurrent work and prepare for exit. 202 virtual void stop() {} 203 204 // Stop and resume concurrent GC threads interfering with safepoint operations 205 virtual void safepoint_synchronize_begin() {} 269 return is_in_reserved(p); 270 } 271 272 bool is_in_closed_subset_or_null(const void* p) const { 273 return p == NULL || is_in_closed_subset(p); 274 } 275 276 void set_gc_cause(GCCause::Cause v) { 277 if (UsePerfData) { 278 _gc_lastcause = _gc_cause; 279 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); 280 _perf_gc_cause->set_value(GCCause::to_string(v)); 281 } 282 _gc_cause = v; 283 } 284 GCCause::Cause gc_cause() { return _gc_cause; } 285 286 virtual oop obj_allocate(Klass* klass, int size, TRAPS); 287 virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS); 288 virtual oop class_allocate(Klass* klass, int size, TRAPS); 289 290 // Utilities for turning raw memory into filler objects. 291 // 292 // min_fill_size() is the smallest region that can be filled. 293 // fill_with_objects() can fill arbitrary-sized regions of the heap using 294 // multiple objects. fill_with_object() is for regions known to be smaller 295 // than the largest array of integers; it uses a single object to fill the 296 // region and has slightly less overhead. 297 static size_t min_fill_size() { 298 return size_t(align_object_size(oopDesc::header_size())); 299 } 300 301 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); 302 303 static void fill_with_object(HeapWord* start, size_t words, bool zap = true); 304 static void fill_with_object(MemRegion region, bool zap = true) { 305 fill_with_object(region.start(), region.word_size(), zap); 306 } 307 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { 308 fill_with_object(start, pointer_delta(end, start), zap); 309 } 310 311 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); 312 virtual size_t min_dummy_object_size() const; 313 size_t tlab_alloc_reserve() const; 314 315 // Return the address "addr" aligned by "alignment_in_bytes" if such 316 // an address is below "end". Return NULL otherwise. 317 inline static HeapWord* align_allocation_or_fail(HeapWord* addr, 318 HeapWord* end, 319 unsigned short alignment_in_bytes); 320 321 // Some heaps may offer a contiguous region for shared non-blocking 322 // allocation, via inlined code (by exporting the address of the top and 323 // end fields defining the extent of the contiguous allocation region.) 324 325 // This function returns "true" iff the heap supports this kind of 326 // allocation. (Default is "no".) 327 virtual bool supports_inline_contig_alloc() const { 328 return false; 329 } 330 // These functions return the addresses of the fields that define the 331 // boundaries of the contiguous allocation area. (These fields should be 332 // physically near to one another.) 333 virtual HeapWord* volatile* top_addr() const { | 93 // ZCollectedHeap 94 // 95 class CollectedHeap : public CHeapObj<mtInternal> { 96 friend class VMStructs; 97 friend class JVMCIVMStructs; 98 friend class IsGCActiveMark; // Block structured external access to _is_gc_active 99 friend class MemAllocator; 100 101 private: 102 #ifdef ASSERT 103 static int _fire_out_of_memory_count; 104 #endif 105 106 GCHeapLog* _gc_heap_log; 107 108 MemRegion _reserved; 109 110 protected: 111 bool _is_gc_active; 112 113 unsigned int _total_collections; // ... started 114 unsigned int _total_full_collections; // ... started 115 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) 116 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) 117 118 // Reason for current garbage collection. Should be set to 119 // a value reflecting no collection between collections. 120 GCCause::Cause _gc_cause; 121 GCCause::Cause _gc_lastcause; 122 PerfStringVariable* _perf_gc_cause; 123 PerfStringVariable* _perf_gc_lastcause; 124 125 // Constructor 126 CollectedHeap(); 127 128 // Create a new tlab. All TLAB allocations must go through this. 129 // To allow more flexible TLAB allocations min_size specifies 130 // the minimum size needed, while requested_size is the requested 131 // size based on ergonomics. The actually allocated size will be 132 // returned in actual_size. 133 virtual HeapWord* allocate_new_tlab(size_t min_size, 134 size_t requested_size, 135 size_t* actual_size); 136 137 // Reinitialize tlabs before resuming mutators. 138 virtual void resize_all_tlabs(); 139 140 // Raw memory allocation facilities 141 // The obj and array allocate methods are covers for these methods. 142 // mem_allocate() should never be 143 // called to allocate TLABs, only individual objects. 144 virtual HeapWord* mem_allocate(size_t size, 145 bool* gc_overhead_limit_was_exceeded) = 0; 146 147 148 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); 149 150 // Verification functions 151 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) 152 PRODUCT_RETURN; 153 debug_only(static void check_for_valid_allocation_state();) 154 155 public: 156 enum Name { 157 None, 158 Serial, 159 Parallel, 160 CMS, 161 G1, 162 Epsilon, 163 Z 164 }; 165 166 virtual Name kind() const = 0; 167 168 virtual const char* name() const = 0; 169 170 /** 171 * Returns JNI error code JNI_ENOMEM if memory could not be allocated, 172 * and JNI_OK on success. 173 */ 174 virtual jint initialize() = 0; 175 176 // In many heaps, there will be a need to perform some initialization activities 177 // after the Universe is fully formed, but before general heap allocation is allowed. 178 // This is the correct place to place such initialization methods. 179 virtual void post_initialize(); 180 181 // Stop any onging concurrent work and prepare for exit. 182 virtual void stop() {} 183 184 // Stop and resume concurrent GC threads interfering with safepoint operations 185 virtual void safepoint_synchronize_begin() {} 249 return is_in_reserved(p); 250 } 251 252 bool is_in_closed_subset_or_null(const void* p) const { 253 return p == NULL || is_in_closed_subset(p); 254 } 255 256 void set_gc_cause(GCCause::Cause v) { 257 if (UsePerfData) { 258 _gc_lastcause = _gc_cause; 259 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); 260 _perf_gc_cause->set_value(GCCause::to_string(v)); 261 } 262 _gc_cause = v; 263 } 264 GCCause::Cause gc_cause() { return _gc_cause; } 265 266 virtual oop obj_allocate(Klass* klass, int size, TRAPS); 267 virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS); 268 virtual oop class_allocate(Klass* klass, int size, TRAPS); 269 270 // Return the address "addr" aligned by "alignment_in_bytes" if such 271 // an address is below "end". Return NULL otherwise. 272 inline static HeapWord* align_allocation_or_fail(HeapWord* addr, 273 HeapWord* end, 274 unsigned short alignment_in_bytes); 275 276 // Some heaps may offer a contiguous region for shared non-blocking 277 // allocation, via inlined code (by exporting the address of the top and 278 // end fields defining the extent of the contiguous allocation region.) 279 280 // This function returns "true" iff the heap supports this kind of 281 // allocation. (Default is "no".) 282 virtual bool supports_inline_contig_alloc() const { 283 return false; 284 } 285 // These functions return the addresses of the fields that define the 286 // boundaries of the contiguous allocation area. (These fields should be 287 // physically near to one another.) 288 virtual HeapWord* volatile* top_addr() const { |