934
935 class KeepAliveClosure: public OopClosure {
936 private:
937 ParCompactionManager* _compaction_manager;
938 protected:
939 template <class T> inline void do_oop_work(T* p);
940 public:
941 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
942 virtual void do_oop(oop* p);
943 virtual void do_oop(narrowOop* p);
944 };
945
946 class FollowStackClosure: public VoidClosure {
947 private:
948 ParCompactionManager* _compaction_manager;
949 public:
950 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
951 virtual void do_void();
952 };
953
954 class AdjustPointerClosure: public OopClosure {
955 public:
956 virtual void do_oop(oop* p);
957 virtual void do_oop(narrowOop* p);
958 // do not walk from thread stacks to the code cache on this phase
959 virtual void do_code_blob(CodeBlob* cb) const { }
960 };
961
962 class AdjustKlassClosure : public KlassClosure {
963 public:
964 void do_klass(Klass* klass);
965 };
966
967 friend class KeepAliveClosure;
968 friend class FollowStackClosure;
969 friend class AdjustPointerClosure;
970 friend class AdjustKlassClosure;
971 friend class FollowKlassClosure;
972 friend class InstanceClassLoaderKlass;
973 friend class RefProcTaskProxy;
974
975 private:
976 static STWGCTimer _gc_timer;
977 static ParallelOldTracer _gc_tracer;
978 static elapsedTimer _accumulated_time;
979 static unsigned int _total_invocations;
1122 // Add dense prefix update tasks to the task queue.
1123 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1124 uint parallel_gc_threads);
1125
1126 // Add region stealing tasks to the task queue.
1127 static void enqueue_region_stealing_tasks(
1128 GCTaskQueue* q,
1129 ParallelTaskTerminator* terminator_ptr,
1130 uint parallel_gc_threads);
1131
1132 // If objects are left in eden after a collection, try to move the boundary
1133 // and absorb them into the old gen. Returns true if eden was emptied.
1134 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1135 PSYoungGen* young_gen,
1136 PSOldGen* old_gen);
1137
1138 // Reset time since last full gc
1139 static void reset_millis_since_last_gc();
1140
1141 public:
1142 class MarkAndPushClosure: public OopClosure {
1143 private:
1144 ParCompactionManager* _compaction_manager;
1145 public:
1146 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
1147 virtual void do_oop(oop* p);
1148 virtual void do_oop(narrowOop* p);
1149 };
1150
1151 // The one and only place to start following the classes.
1152 // Should only be applied to the ClassLoaderData klasses list.
1153 class FollowKlassClosure : public KlassClosure {
1154 private:
1155 MarkAndPushClosure* _mark_and_push_closure;
1156 public:
1157 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1158 _mark_and_push_closure(mark_and_push_closure) { }
1159 void do_klass(Klass* klass);
1160 };
1161
1162 PSParallelCompact();
1163
1164 // Convenient accessor for Universe::heap().
1165 static ParallelScavengeHeap* gc_heap() {
1166 return (ParallelScavengeHeap*)Universe::heap();
1167 }
1168
1169 static void invoke(bool maximum_heap_compaction);
1170 static bool invoke_no_policy(bool maximum_heap_compaction);
1171
1172 static void post_initialize();
1173 // Perform initialization for PSParallelCompact that requires
1174 // allocations. This should be called during the VM initialization
1175 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1176 // in the event of a failure.
1177 static bool initialize();
1178
1179 // Closure accessors
1180 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
1181 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1182 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1183
1184 // Public accessors
1185 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1186 static unsigned int total_invocations() { return _total_invocations; }
1187 static CollectorCounters* counters() { return _counters; }
1188
1189 // Used to add tasks
1190 static GCTaskManager* const gc_task_manager();
1191 static Klass* updated_int_array_klass_obj() {
1192 return _updated_int_array_klass_obj;
1193 }
1194
1195 // Marking support
1196 static inline bool mark_obj(oop obj);
1197 static inline bool is_marked(oop obj);
1198 // Check mark and maybe push on marking stack
1199 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1200 T* p);
1313 #ifdef ASSERT
1314 // Sanity check the new location of a word in the heap.
1315 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1316 // Verify that all the regions have been emptied.
1317 static void verify_complete(SpaceId space_id);
1318 #endif // #ifdef ASSERT
1319 };
1320
1321 inline bool PSParallelCompact::mark_obj(oop obj) {
1322 const int obj_size = obj->size();
1323 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1324 _summary_data.add_obj(obj, obj_size);
1325 return true;
1326 } else {
1327 return false;
1328 }
1329 }
1330
1331 inline bool PSParallelCompact::is_marked(oop obj) {
1332 return mark_bitmap()->is_marked(obj);
1333 }
1334
1335 template <class T>
1336 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1337 T heap_oop = oopDesc::load_heap_oop(p);
1338 if (!oopDesc::is_null(heap_oop)) {
1339 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1340 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1341 cm->push(obj);
1342 }
1343 }
1344 }
1345
1346 template <class T>
1347 inline void PSParallelCompact::adjust_pointer(T* p) {
1348 T heap_oop = oopDesc::load_heap_oop(p);
1349 if (!oopDesc::is_null(heap_oop)) {
1350 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1351 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1352 assert(new_obj != NULL, // is forwarding ptr?
1353 "should be forwarded");
1354 // Just always do the update unconditionally?
1355 if (new_obj != NULL) {
1356 assert(Universe::heap()->is_in_reserved(new_obj),
1357 "should be in object space");
1358 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1359 }
1360 }
1361 }
1362
1363 inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
1364 oop holder = klass->klass_holder();
1365 PSParallelCompact::mark_and_push(cm, &holder);
1366 }
1367
1368 template <class T>
1369 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1370 mark_and_push(_compaction_manager, p);
1371 }
1372
1373 inline bool PSParallelCompact::print_phases() {
1374 return _print_phases;
1375 }
1376
1377 inline double PSParallelCompact::normal_distribution(double density) {
1378 assert(_dwl_initialized, "uninitialized");
1379 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1380 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1381 }
1382
1383 inline bool
1384 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1385 idx_t bit)
|
934
935 class KeepAliveClosure: public OopClosure {
936 private:
937 ParCompactionManager* _compaction_manager;
938 protected:
939 template <class T> inline void do_oop_work(T* p);
940 public:
941 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
942 virtual void do_oop(oop* p);
943 virtual void do_oop(narrowOop* p);
944 };
945
946 class FollowStackClosure: public VoidClosure {
947 private:
948 ParCompactionManager* _compaction_manager;
949 public:
950 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
951 virtual void do_void();
952 };
953
954 class AdjustPointerClosure: public ExtendedOopClosure {
955 public:
956 template <typename T> void do_oop_nv(T* p);
957 virtual void do_oop(oop* p);
958 virtual void do_oop(narrowOop* p);
959
960 // do not walk from thread stacks to the code cache on this phase
961 virtual void do_code_blob(CodeBlob* cb) const { }
962
963 // This closure provides its own oop verification code.
964 debug_only(virtual bool should_verify_oops() { return false; })
965 };
966
967 class AdjustKlassClosure : public KlassClosure {
968 public:
969 void do_klass(Klass* klass);
970 };
971
972 friend class KeepAliveClosure;
973 friend class FollowStackClosure;
974 friend class AdjustPointerClosure;
975 friend class AdjustKlassClosure;
976 friend class FollowKlassClosure;
977 friend class InstanceClassLoaderKlass;
978 friend class RefProcTaskProxy;
979
980 private:
981 static STWGCTimer _gc_timer;
982 static ParallelOldTracer _gc_tracer;
983 static elapsedTimer _accumulated_time;
984 static unsigned int _total_invocations;
1127 // Add dense prefix update tasks to the task queue.
1128 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1129 uint parallel_gc_threads);
1130
1131 // Add region stealing tasks to the task queue.
1132 static void enqueue_region_stealing_tasks(
1133 GCTaskQueue* q,
1134 ParallelTaskTerminator* terminator_ptr,
1135 uint parallel_gc_threads);
1136
1137 // If objects are left in eden after a collection, try to move the boundary
1138 // and absorb them into the old gen. Returns true if eden was emptied.
1139 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1140 PSYoungGen* young_gen,
1141 PSOldGen* old_gen);
1142
1143 // Reset time since last full gc
1144 static void reset_millis_since_last_gc();
1145
1146 public:
1147 class MarkAndPushClosure: public ExtendedOopClosure {
1148 private:
1149 ParCompactionManager* _compaction_manager;
1150 public:
1151 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
1152
1153 template <typename T> void do_oop_nv(T* p);
1154 virtual void do_oop(oop* p);
1155 virtual void do_oop(narrowOop* p);
1156
1157 // This closure provides its own oop verification code.
1158 debug_only(virtual bool should_verify_oops() { return false; })
1159 };
1160
1161 // The one and only place to start following the classes.
1162 // Should only be applied to the ClassLoaderData klasses list.
1163 class FollowKlassClosure : public KlassClosure {
1164 private:
1165 MarkAndPushClosure* _mark_and_push_closure;
1166 public:
1167 FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
1168 _mark_and_push_closure(mark_and_push_closure) { }
1169 void do_klass(Klass* klass);
1170 };
1171
1172 PSParallelCompact();
1173
1174 // Convenient accessor for Universe::heap().
1175 static ParallelScavengeHeap* gc_heap() {
1176 return (ParallelScavengeHeap*)Universe::heap();
1177 }
1178
1179 static void invoke(bool maximum_heap_compaction);
1180 static bool invoke_no_policy(bool maximum_heap_compaction);
1181
1182 static void post_initialize();
1183 // Perform initialization for PSParallelCompact that requires
1184 // allocations. This should be called during the VM initialization
1185 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1186 // in the event of a failure.
1187 static bool initialize();
1188
1189 // Closure accessors
1190 static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() {
1191 return &_adjust_pointer_closure;
1192 }
1193 static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
1194 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
1195
1196 // Public accessors
1197 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1198 static unsigned int total_invocations() { return _total_invocations; }
1199 static CollectorCounters* counters() { return _counters; }
1200
1201 // Used to add tasks
1202 static GCTaskManager* const gc_task_manager();
1203 static Klass* updated_int_array_klass_obj() {
1204 return _updated_int_array_klass_obj;
1205 }
1206
1207 // Marking support
1208 static inline bool mark_obj(oop obj);
1209 static inline bool is_marked(oop obj);
1210 // Check mark and maybe push on marking stack
1211 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1212 T* p);
1325 #ifdef ASSERT
1326 // Sanity check the new location of a word in the heap.
1327 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1328 // Verify that all the regions have been emptied.
1329 static void verify_complete(SpaceId space_id);
1330 #endif // #ifdef ASSERT
1331 };
1332
1333 inline bool PSParallelCompact::mark_obj(oop obj) {
1334 const int obj_size = obj->size();
1335 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1336 _summary_data.add_obj(obj, obj_size);
1337 return true;
1338 } else {
1339 return false;
1340 }
1341 }
1342
1343 inline bool PSParallelCompact::is_marked(oop obj) {
1344 return mark_bitmap()->is_marked(obj);
1345 }
1346
1347 template <class T>
1348 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1349 mark_and_push(_compaction_manager, p);
1350 }
1351
1352 inline bool PSParallelCompact::print_phases() {
1353 return _print_phases;
1354 }
1355
1356 inline double PSParallelCompact::normal_distribution(double density) {
1357 assert(_dwl_initialized, "uninitialized");
1358 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1359 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1360 }
1361
1362 inline bool
1363 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1364 idx_t bit)
|