81 bool _should_clear_all_soft_refs; 82 // Set to true by the GC if the just-completed gc cleared all 83 // softrefs. This is set to true whenever a gc clears all softrefs, and 84 // set to false each time gc returns to the mutator. For example, in the 85 // ParallelScavengeHeap case the latter would be done toward the end of 86 // mem_allocate() where it returns op.result() 87 bool _all_soft_refs_clear; 88 89 CollectorPolicy() : 90 _min_alignment(1), 91 _max_alignment(1), 92 _initial_heap_byte_size(0), 93 _max_heap_byte_size(0), 94 _min_heap_byte_size(0), 95 _size_policy(NULL), 96 _should_clear_all_soft_refs(false), 97 _all_soft_refs_clear(false) 98 {} 99 100 public: 101 void set_min_alignment(size_t align) { _min_alignment = align; } 102 size_t min_alignment() { return _min_alignment; } 103 void set_max_alignment(size_t align) { _max_alignment = align; } 104 size_t max_alignment() { return _max_alignment; } 105 106 size_t initial_heap_byte_size() { return _initial_heap_byte_size; } 107 void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } 108 size_t max_heap_byte_size() { return _max_heap_byte_size; } 109 void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } 110 size_t min_heap_byte_size() { return _min_heap_byte_size; } 111 void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } 112 113 enum Name { 114 CollectorPolicyKind, 115 TwoGenerationCollectorPolicyKind, 116 ConcurrentMarkSweepPolicyKind, 117 ASConcurrentMarkSweepPolicyKind, 118 G1CollectorPolicyKind 119 }; 120 216 }; 217 218 class GenCollectorPolicy : public CollectorPolicy { 219 protected: 220 size_t _min_gen0_size; 221 size_t _initial_gen0_size; 222 size_t _max_gen0_size; 223 224 GenerationSpec **_generations; 225 226 // Return true if an allocation should be attempted in the older 227 // generation if it fails in the younger generation. Return 228 // false, otherwise. 229 virtual bool should_try_older_generation_allocation(size_t word_size) const; 230 231 void initialize_flags(); 232 void initialize_size_info(); 233 234 // Try to allocate space by expanding the heap. 235 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); 236 237 // compute max heap alignment 238 size_t compute_max_alignment(); 239 240 // Scale the base_size by NewRation according to 241 // result = base_size / (NewRatio + 1) 242 // and align by min_alignment() 243 size_t scale_by_NewRatio_aligned(size_t base_size); 244 245 // Bound the value by the given maximum minus the 246 // min_alignment. 247 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); 248 249 public: 250 // Accessors 251 size_t min_gen0_size() { return _min_gen0_size; } 252 void set_min_gen0_size(size_t v) { _min_gen0_size = v; } 253 size_t initial_gen0_size() { return _initial_gen0_size; } 254 void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } 255 size_t max_gen0_size() { return _max_gen0_size; } 256 void set_max_gen0_size(size_t v) { _max_gen0_size = v; } 257 258 virtual int number_of_generations() = 0; | 81 bool _should_clear_all_soft_refs; 82 // Set to true by the GC if the just-completed gc cleared all 83 // softrefs. This is set to true whenever a gc clears all softrefs, and 84 // set to false each time gc returns to the mutator. For example, in the 85 // ParallelScavengeHeap case the latter would be done toward the end of 86 // mem_allocate() where it returns op.result() 87 bool _all_soft_refs_clear; 88 89 CollectorPolicy() : 90 _min_alignment(1), 91 _max_alignment(1), 92 _initial_heap_byte_size(0), 93 _max_heap_byte_size(0), 94 _min_heap_byte_size(0), 95 _size_policy(NULL), 96 _should_clear_all_soft_refs(false), 97 _all_soft_refs_clear(false) 98 {} 99 100 public: 101 // compute (conservative) maximum heap alignment 102 static size_t compute_max_alignment(); 103 104 void set_min_alignment(size_t align) { _min_alignment = align; } 105 size_t min_alignment() { return _min_alignment; } 106 void set_max_alignment(size_t align) { _max_alignment = align; } 107 size_t max_alignment() { return _max_alignment; } 108 109 size_t initial_heap_byte_size() { return _initial_heap_byte_size; } 110 void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } 111 size_t max_heap_byte_size() { return _max_heap_byte_size; } 112 void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } 113 size_t min_heap_byte_size() { return _min_heap_byte_size; } 114 void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } 115 116 enum Name { 117 CollectorPolicyKind, 118 TwoGenerationCollectorPolicyKind, 119 ConcurrentMarkSweepPolicyKind, 120 ASConcurrentMarkSweepPolicyKind, 121 G1CollectorPolicyKind 122 }; 123 219 }; 220 221 class GenCollectorPolicy : public CollectorPolicy { 222 protected: 223 size_t _min_gen0_size; 224 size_t _initial_gen0_size; 225 size_t _max_gen0_size; 226 227 GenerationSpec **_generations; 228 229 // Return true if an allocation should be attempted in the older 230 // generation if it fails in the younger generation. Return 231 // false, otherwise. 232 virtual bool should_try_older_generation_allocation(size_t word_size) const; 233 234 void initialize_flags(); 235 void initialize_size_info(); 236 237 // Try to allocate space by expanding the heap. 238 virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); 239 240 // Scale the base_size by NewRation according to 241 // result = base_size / (NewRatio + 1) 242 // and align by min_alignment() 243 size_t scale_by_NewRatio_aligned(size_t base_size); 244 245 // Bound the value by the given maximum minus the 246 // min_alignment. 247 size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); 248 249 public: 250 // Accessors 251 size_t min_gen0_size() { return _min_gen0_size; } 252 void set_min_gen0_size(size_t v) { _min_gen0_size = v; } 253 size_t initial_gen0_size() { return _initial_gen0_size; } 254 void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } 255 size_t max_gen0_size() { return _max_gen0_size; } 256 void set_max_gen0_size(size_t v) { _max_gen0_size = v; } 257 258 virtual int number_of_generations() = 0; |