186 assert(_first_src_addr == NULL, "not clear");
187 }
188 #endif // #ifdef ASSERT
189
190
191 void PSParallelCompact::print_on_error(outputStream* st) {
192 _mark_bitmap.print_on_error(st);
193 }
194
195 #ifndef PRODUCT
196 const char* PSParallelCompact::space_names[] = {
197 "old ", "eden", "from", "to "
198 };
199
200 void PSParallelCompact::print_region_ranges() {
201 if (!log_develop_is_enabled(Trace, gc, compaction)) {
202 return;
203 }
204 Log(gc, compaction) log;
205 ResourceMark rm;
206 Universe::print_on(log.trace_stream());
207 log.trace("space bottom top end new_top");
208 log.trace("------ ---------- ---------- ---------- ----------");
209
210 for (unsigned int id = 0; id < last_space_id; ++id) {
211 const MutableSpace* space = _space_info[id].space();
212 log.trace("%u %s "
213 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
214 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
215 id, space_names[id],
216 summary_data().addr_to_region_idx(space->bottom()),
217 summary_data().addr_to_region_idx(space->top()),
218 summary_data().addr_to_region_idx(space->end()),
219 summary_data().addr_to_region_idx(_space_info[id].new_top()));
220 }
221 }
222
223 void
224 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
225 {
226 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
2352 GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2353
2354 // Once a thread has drained it's stack, it should try to steal regions from
2355 // other threads.
2356 for (uint j = 0; j < parallel_gc_threads; j++) {
2357 q->enqueue(new CompactionWithStealingTask(terminator_ptr));
2358 }
2359 }
2360
2361 #ifdef ASSERT
2362 // Write a histogram of the number of times the block table was filled for a
2363 // region.
2364 void PSParallelCompact::write_block_fill_histogram()
2365 {
2366 if (!log_develop_is_enabled(Trace, gc, compaction)) {
2367 return;
2368 }
2369
2370 Log(gc, compaction) log;
2371 ResourceMark rm;
2372 outputStream* out = log.trace_stream();
2373
2374 typedef ParallelCompactData::RegionData rd_t;
2375 ParallelCompactData& sd = summary_data();
2376
2377 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2378 MutableSpace* const spc = _space_info[id].space();
2379 if (spc->bottom() != spc->top()) {
2380 const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2381 HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2382 const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2383
2384 size_t histo[5] = { 0, 0, 0, 0, 0 };
2385 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2386 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2387
2388 for (const rd_t* cur = beg; cur < end; ++cur) {
2389 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2390 }
2391 out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2392 for (size_t i = 0; i < histo_len; ++i) {
|
186 assert(_first_src_addr == NULL, "not clear");
187 }
188 #endif // #ifdef ASSERT
189
190
191 void PSParallelCompact::print_on_error(outputStream* st) {
192 _mark_bitmap.print_on_error(st);
193 }
194
195 #ifndef PRODUCT
196 const char* PSParallelCompact::space_names[] = {
197 "old ", "eden", "from", "to "
198 };
199
200 void PSParallelCompact::print_region_ranges() {
201 if (!log_develop_is_enabled(Trace, gc, compaction)) {
202 return;
203 }
204 Log(gc, compaction) log;
205 ResourceMark rm;
206 LogStream ls(log.trace());
207 Universe::print_on(&ls);
208 log.trace("space bottom top end new_top");
209 log.trace("------ ---------- ---------- ---------- ----------");
210
211 for (unsigned int id = 0; id < last_space_id; ++id) {
212 const MutableSpace* space = _space_info[id].space();
213 log.trace("%u %s "
214 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
215 SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
216 id, space_names[id],
217 summary_data().addr_to_region_idx(space->bottom()),
218 summary_data().addr_to_region_idx(space->top()),
219 summary_data().addr_to_region_idx(space->end()),
220 summary_data().addr_to_region_idx(_space_info[id].new_top()));
221 }
222 }
223
224 void
225 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
226 {
227 #define REGION_IDX_FORMAT SIZE_FORMAT_W(7)
2353 GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2354
2355 // Once a thread has drained it's stack, it should try to steal regions from
2356 // other threads.
2357 for (uint j = 0; j < parallel_gc_threads; j++) {
2358 q->enqueue(new CompactionWithStealingTask(terminator_ptr));
2359 }
2360 }
2361
2362 #ifdef ASSERT
2363 // Write a histogram of the number of times the block table was filled for a
2364 // region.
2365 void PSParallelCompact::write_block_fill_histogram()
2366 {
2367 if (!log_develop_is_enabled(Trace, gc, compaction)) {
2368 return;
2369 }
2370
2371 Log(gc, compaction) log;
2372 ResourceMark rm;
2373 LogStream ls(log.trace());
2374 outputStream* out = &ls;
2375
2376 typedef ParallelCompactData::RegionData rd_t;
2377 ParallelCompactData& sd = summary_data();
2378
2379 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2380 MutableSpace* const spc = _space_info[id].space();
2381 if (spc->bottom() != spc->top()) {
2382 const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2383 HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2384 const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2385
2386 size_t histo[5] = { 0, 0, 0, 0, 0 };
2387 const size_t histo_len = sizeof(histo) / sizeof(size_t);
2388 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2389
2390 for (const rd_t* cur = beg; cur < end; ++cur) {
2391 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2392 }
2393 out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2394 for (size_t i = 0; i < histo_len; ++i) {
|