189 assert(_first_src_addr == NULL, "not clear");
190 }
191 #endif // #ifdef ASSERT
192
193
194 void PSParallelCompact::print_on_error(outputStream* st) {
195 _mark_bitmap.print_on_error(st);
196 }
197
198 #ifndef PRODUCT
199 const char* PSParallelCompact::space_names[] = {
200 "old ", "eden", "from", "to "
201 };
202
203 void PSParallelCompact::print_region_ranges() {
204 if (!log_develop_is_enabled(Trace, gc, compaction)) {
205 return;
206 }
207 Log(gc, compaction) log;
208 ResourceMark rm;
209 Universe::print_on(log.trace_stream());
210 log.trace("space bottom top end new_top");
211 log.trace("------ ---------- ---------- ---------- ----------");
212
213 for (unsigned int id = 0; id < last_space_id; ++id) {
214 const MutableSpace* space = _space_info[id].space();
215 log.trace("%u %s "
216 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
217 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
218 id, space_names[id],
219 summary_data().addr_to_region_idx(space->bottom()),
220 summary_data().addr_to_region_idx(space->top()),
221 summary_data().addr_to_region_idx(space->end()),
222 summary_data().addr_to_region_idx(_space_info[id].new_top()));
223 }
224 }
225
226 void
227 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
228 {
229 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
2355 GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2356
2357 // Once a thread has drained it's stack, it should try to steal regions from
2358 // other threads.
2359 for (uint j = 0; j < parallel_gc_threads; j++) {
2360 q->enqueue(new CompactionWithStealingTask(terminator_ptr));
2361 }
2362 }
2363
2364 #ifdef ASSERT
2365 // Write a histogram of the number of times the block table was filled for a
2366 // region.
2367 void PSParallelCompact::write_block_fill_histogram()
2368 {
2369 if (!log_develop_is_enabled(Trace, gc, compaction)) {
2370 return;
2371 }
2372
2373 Log(gc, compaction) log;
2374 ResourceMark rm;
2375 outputStream* out = log.trace_stream();
2376
2377 typedef ParallelCompactData::RegionData rd_t;
2378 ParallelCompactData& sd = summary_data();
2379
2380 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2381 MutableSpace* const spc = _space_info[id].space();
2382 if (spc->bottom() != spc->top()) {
2383 const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2384 HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2385 const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2386
2387 size_t histo[5] = { 0, 0, 0, 0, 0 };
2388 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2389 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2390
2391 for (const rd_t* cur = beg; cur < end; ++cur) {
2392 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2393 }
2394 out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2395 for (size_t i = 0; i < histo_len; ++i) {
|
189 assert(_first_src_addr == NULL, "not clear");
190 }
191 #endif // #ifdef ASSERT
192
193
194 void PSParallelCompact::print_on_error(outputStream* st) {
195 _mark_bitmap.print_on_error(st);
196 }
197
198 #ifndef PRODUCT
199 const char* PSParallelCompact::space_names[] = {
200 "old ", "eden", "from", "to "
201 };
202
203 void PSParallelCompact::print_region_ranges() {
204 if (!log_develop_is_enabled(Trace, gc, compaction)) {
205 return;
206 }
207 Log(gc, compaction) log;
208 ResourceMark rm;
209 LogStream ls(log.trace());
210 Universe::print_on(&ls);
211 log.trace("space bottom top end new_top");
212 log.trace("------ ---------- ---------- ---------- ----------");
213
214 for (unsigned int id = 0; id < last_space_id; ++id) {
215 const MutableSpace* space = _space_info[id].space();
216 log.trace("%u %s "
217 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
218 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
219 id, space_names[id],
220 summary_data().addr_to_region_idx(space->bottom()),
221 summary_data().addr_to_region_idx(space->top()),
222 summary_data().addr_to_region_idx(space->end()),
223 summary_data().addr_to_region_idx(_space_info[id].new_top()));
224 }
225 }
226
227 void
228 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
229 {
230 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
2356 GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2357
2358 // Once a thread has drained it's stack, it should try to steal regions from
2359 // other threads.
2360 for (uint j = 0; j < parallel_gc_threads; j++) {
2361 q->enqueue(new CompactionWithStealingTask(terminator_ptr));
2362 }
2363 }
2364
2365 #ifdef ASSERT
2366 // Write a histogram of the number of times the block table was filled for a
2367 // region.
2368 void PSParallelCompact::write_block_fill_histogram()
2369 {
2370 if (!log_develop_is_enabled(Trace, gc, compaction)) {
2371 return;
2372 }
2373
2374 Log(gc, compaction) log;
2375 ResourceMark rm;
2376 LogStream ls(log.trace());
2377 outputStream* out = &ls;
2378
2379 typedef ParallelCompactData::RegionData rd_t;
2380 ParallelCompactData& sd = summary_data();
2381
2382 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2383 MutableSpace* const spc = _space_info[id].space();
2384 if (spc->bottom() != spc->top()) {
2385 const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2386 HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2387 const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2388
2389 size_t histo[5] = { 0, 0, 0, 0, 0 };
2390 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2391 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2392
2393 for (const rd_t* cur = beg; cur < end; ++cur) {
2394 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2395 }
2396 out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2397 for (size_t i = 0; i < histo_len; ++i) {
|