263 }
264 } else {
265 // Too large; allocate the object individually.
266 obj = sp->par_allocate(word_sz);
267 }
268 }
269 return obj;
270 }
271
272 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
273 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
274 }
275
276 void ParScanThreadState::print_promotion_failure_size() {
277 if (_promotion_failed_info.has_failed()) {
278 log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
279 _thread_num, _promotion_failed_info.first_size());
280 }
281 }
282
283 class ParScanThreadStateSet: private ResourceArray {
284 public:
285 // Initializes states for the specified number of threads;
286 ParScanThreadStateSet(int num_threads,
287 Space& to_space,
288 ParNewGeneration& young_gen,
289 Generation& old_gen,
290 ObjToScanQueueSet& queue_set,
291 Stack<oop, mtGC>* overflow_stacks_,
292 PreservedMarksSet& preserved_marks_set,
293 size_t desired_plab_sz,
294 ParallelTaskTerminator& term);
295
296 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
297
298 inline ParScanThreadState& thread_state(int i);
299
300 void trace_promotion_failed(const YoungGCTracer* gc_tracer);
301 void reset(uint active_workers, bool promotion_failed);
302 void flush();
303
304 #if TASKQUEUE_STATS
305 static void
306 print_termination_stats_hdr(outputStream* const st);
307 void print_termination_stats();
308 static void
309 print_taskqueue_stats_hdr(outputStream* const st);
310 void print_taskqueue_stats();
311 void reset_stats();
312 #endif // TASKQUEUE_STATS
313
314 private:
315 ParallelTaskTerminator& _term;
316 ParNewGeneration& _young_gen;
317 Generation& _old_gen;
318 public:
319 bool is_valid(int id) const { return id < length(); }
320 ParallelTaskTerminator* terminator() { return &_term; }
321 };
322
323 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
324 Space& to_space,
325 ParNewGeneration& young_gen,
326 Generation& old_gen,
327 ObjToScanQueueSet& queue_set,
328 Stack<oop, mtGC>* overflow_stacks,
329 PreservedMarksSet& preserved_marks_set,
330 size_t desired_plab_sz,
331 ParallelTaskTerminator& term)
332 : ResourceArray(sizeof(ParScanThreadState), num_threads),
333 _young_gen(young_gen),
334 _old_gen(old_gen),
335 _term(term)
336 {
337 assert(num_threads > 0, "sanity check!");
338 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
339 "overflow_stack allocation mismatch");
340 // Initialize states.
341 for (int i = 0; i < num_threads; ++i) {
342 new ((ParScanThreadState*)_data + i)
343 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
344 overflow_stacks, preserved_marks_set.get(i),
345 desired_plab_sz, term);
346 }
347 }
348
349 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
350 assert(i >= 0 && i < length(), "sanity check!");
351 return ((ParScanThreadState*)_data)[i];
352 }
353
354 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
355 for (int i = 0; i < length(); ++i) {
356 if (thread_state(i).promotion_failed()) {
357 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
358 thread_state(i).promotion_failed_info().reset();
359 }
360 }
361 }
362
363 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
364 _term.reset_for_reuse(active_threads);
365 if (promotion_failed) {
366 for (int i = 0; i < length(); ++i) {
367 thread_state(i).print_promotion_failure_size();
368 }
369 }
370 }
371
|
263 }
264 } else {
265 // Too large; allocate the object individually.
266 obj = sp->par_allocate(word_sz);
267 }
268 }
269 return obj;
270 }
271
272 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
273 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
274 }
275
276 void ParScanThreadState::print_promotion_failure_size() {
277 if (_promotion_failed_info.has_failed()) {
278 log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
279 _thread_num, _promotion_failed_info.first_size());
280 }
281 }
282
283 class ParScanThreadStateSet: private GenericGrowableArray {
284 public:
285 // Initializes states for the specified number of threads;
286 ParScanThreadStateSet(int num_threads,
287 Space& to_space,
288 ParNewGeneration& young_gen,
289 Generation& old_gen,
290 ObjToScanQueueSet& queue_set,
291 Stack<oop, mtGC>* overflow_stacks_,
292 PreservedMarksSet& preserved_marks_set,
293 size_t desired_plab_sz,
294 ParallelTaskTerminator& term);
295
296 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
297
298 inline ParScanThreadState& thread_state(int i);
299
300 void trace_promotion_failed(const YoungGCTracer* gc_tracer);
301 void reset(uint active_workers, bool promotion_failed);
302 void flush();
303
304 #if TASKQUEUE_STATS
305 static void
306 print_termination_stats_hdr(outputStream* const st);
307 void print_termination_stats();
308 static void
309 print_taskqueue_stats_hdr(outputStream* const st);
310 void print_taskqueue_stats();
311 void reset_stats();
312 #endif // TASKQUEUE_STATS
313
314 private:
315 ParallelTaskTerminator& _term;
316 ParNewGeneration& _young_gen;
317 Generation& _old_gen;
318 ParScanThreadState* _data;
319 int length() const { return _len; };
320 public:
321 bool is_valid(int id) const { return id < length(); }
322 ParallelTaskTerminator* terminator() { return &_term; }
323 };
324
325 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
326 Space& to_space,
327 ParNewGeneration& young_gen,
328 Generation& old_gen,
329 ObjToScanQueueSet& queue_set,
330 Stack<oop, mtGC>* overflow_stacks,
331 PreservedMarksSet& preserved_marks_set,
332 size_t desired_plab_sz,
333 ParallelTaskTerminator& term)
334 : GenericGrowableArray(num_threads, num_threads, false, mtInternal),
335 _young_gen(young_gen),
336 _old_gen(old_gen),
337 _term(term)
338 {
339 assert(num_threads > 0, "sanity check!");
340 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
341 "overflow_stack allocation mismatch");
342 // Initialize states.
343 _data = (ParScanThreadState*) raw_allocate(sizeof(ParScanThreadState));
344 for (int i = 0; i < num_threads; ++i) {
345 ::new((void*) &_data[i])
346 ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
347 overflow_stacks, preserved_marks_set.get(i),
348 desired_plab_sz, term);
349 }
350 }
351
352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
353 assert(i >= 0 && i < length(), "sanity check!");
354 return _data[i];
355 }
356
357 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
358 for (int i = 0; i < length(); ++i) {
359 if (thread_state(i).promotion_failed()) {
360 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
361 thread_state(i).promotion_failed_info().reset();
362 }
363 }
364 }
365
366 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
367 _term.reset_for_reuse(active_threads);
368 if (promotion_failed) {
369 for (int i = 0; i < length(); ++i) {
370 thread_state(i).print_promotion_failure_size();
371 }
372 }
373 }
374
|