23
24 #include "precompiled.hpp"
25 #include "classfile/javaClasses.inline.hpp"
26 #include "gc/parallel/gcTaskManager.hpp"
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/parallelScavengeHeap.hpp"
29 #include "gc/parallel/psOldGen.hpp"
30 #include "gc/parallel/psPromotionManager.inline.hpp"
31 #include "gc/parallel/psScavenge.inline.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/preservedMarks.inline.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "memory/memRegion.hpp"
40 #include "memory/padded.inline.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "oops/access.inline.hpp"
43 #include "oops/arrayOop.inline.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/instanceClassLoaderKlass.inline.hpp"
46 #include "oops/instanceKlass.inline.hpp"
47 #include "oops/instanceMirrorKlass.inline.hpp"
48 #include "oops/objArrayKlass.inline.hpp"
49 #include "oops/objArrayOop.inline.hpp"
50 #include "oops/oop.inline.hpp"
51
52 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
53 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
54 PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL;
55 PSOldGen* PSPromotionManager::_old_gen = NULL;
56 MutableSpace* PSPromotionManager::_young_space = NULL;
57
58 void PSPromotionManager::initialize() {
59 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
60
61 _old_gen = heap->old_gen();
62 _young_space = heap->young_gen()->to_space();
63
64 const uint promotion_manager_num = ParallelGCThreads + 1;
65
66 // To prevent false sharing, we pad the PSPromotionManagers
67 // and make sure that the first instance starts at a cache line.
68 assert(_manager_array == NULL, "Attempt to initialize twice");
69 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
|
23
24 #include "precompiled.hpp"
25 #include "classfile/javaClasses.inline.hpp"
26 #include "gc/parallel/gcTaskManager.hpp"
27 #include "gc/parallel/mutableSpace.hpp"
28 #include "gc/parallel/parallelScavengeHeap.hpp"
29 #include "gc/parallel/psOldGen.hpp"
30 #include "gc/parallel/psPromotionManager.inline.hpp"
31 #include "gc/parallel/psScavenge.inline.hpp"
32 #include "gc/shared/gcTrace.hpp"
33 #include "gc/shared/preservedMarks.inline.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "memory/memRegion.hpp"
40 #include "memory/padded.inline.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "oops/access.inline.hpp"
43 #include "oops/compressedOops.inline.hpp"
44
45 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
46 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
47 PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL;
48 PSOldGen* PSPromotionManager::_old_gen = NULL;
49 MutableSpace* PSPromotionManager::_young_space = NULL;
50
51 void PSPromotionManager::initialize() {
52 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
53
54 _old_gen = heap->old_gen();
55 _young_space = heap->young_gen()->to_space();
56
57 const uint promotion_manager_num = ParallelGCThreads + 1;
58
59 // To prevent false sharing, we pad the PSPromotionManagers
60 // and make sure that the first instance starts at a cache line.
61 assert(_manager_array == NULL, "Attempt to initialize twice");
62 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
|
377 int const end = arrayOop(old)->length();
378 if (end > (int) _min_array_size_for_chunking) {
379 // we'll chunk more
380 start = end - _array_chunk_size;
381 assert(start > 0, "invariant");
382 arrayOop(old)->set_length(start);
383 push_depth(mask_chunked_array_oop(old));
384 TASKQUEUE_STATS_ONLY(++_masked_pushes);
385 } else {
386 // this is the final chunk for this array
387 start = 0;
388 int const actual_length = arrayOop(obj)->length();
389 arrayOop(old)->set_length(actual_length);
390 }
391
392 if (UseCompressedOops) {
393 process_array_chunk_work<narrowOop>(obj, start, end);
394 } else {
395 process_array_chunk_work<oop>(obj, start, end);
396 }
397 }
398
399 class PushContentsClosure : public BasicOopIterateClosure {
400 PSPromotionManager* _pm;
401 public:
402 PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {}
403
404 template <typename T> void do_oop_work(T* p) {
405 if (PSScavenge::should_scavenge(p)) {
406 _pm->claim_or_forward_depth(p);
407 }
408 }
409
410 virtual void do_oop(oop* p) { do_oop_work(p); }
411 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
412
413 // Don't use the oop verification code in the oop_oop_iterate framework.
414 debug_only(virtual bool should_verify_oops() { return false; })
415 };
416
417 void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
418 PushContentsClosure cl(pm);
419 if (UseCompressedOops) {
420 oop_oop_iterate_oop_maps_reverse<narrowOop>(obj, &cl);
421 } else {
422 oop_oop_iterate_oop_maps_reverse<oop>(obj, &cl);
423 }
424 }
425
426 void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
427 // Note that we don't have to follow the mirror -> klass pointer, since all
428 // klasses that are dirty will be scavenged when we iterate over the
429 // ClassLoaderData objects.
430
431 InstanceKlass::oop_ps_push_contents(obj, pm);
432
433 PushContentsClosure cl(pm);
434 if (UseCompressedOops) {
435 oop_oop_iterate_statics<narrowOop>(obj, &cl);
436 } else {
437 oop_oop_iterate_statics<oop>(obj, &cl);
438 }
439 }
440
441 void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
442 InstanceKlass::oop_ps_push_contents(obj, pm);
443
444 // This is called by the young collector. It will already have taken care of
445 // all class loader data. So, we don't have to follow the class loader ->
446 // class loader data link.
447 }
448
449 template <class T>
450 static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
451 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
452 if (PSScavenge::should_scavenge(referent_addr)) {
453 ReferenceProcessor* rp = PSScavenge::reference_processor();
454 if (rp->discover_reference(obj, klass->reference_type())) {
455 // reference discovered, referent will be traversed later.
456 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
457 return;
458 } else {
459 // treat referent as normal oop
460 pm->claim_or_forward_depth(referent_addr);
461 }
462 }
463 // Treat discovered as normal oop
464 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
465 if (PSScavenge::should_scavenge(discovered_addr)) {
466 pm->claim_or_forward_depth(discovered_addr);
467 }
468 klass->InstanceKlass::oop_ps_push_contents(obj, pm);
469 }
470
471 void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
472 if (UseCompressedOops) {
473 oop_ps_push_contents_specialized<narrowOop>(obj, this, pm);
474 } else {
475 oop_ps_push_contents_specialized<oop>(obj, this, pm);
476 }
477 }
478
479 void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
480 assert(obj->is_objArray(), "obj must be obj array");
481 PushContentsClosure cl(pm);
482 if (UseCompressedOops) {
483 oop_oop_iterate_elements<narrowOop>(objArrayOop(obj), &cl);
484 } else {
485 oop_oop_iterate_elements<oop>(objArrayOop(obj), &cl);
486 }
487 }
488
489 void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
490 assert(obj->is_typeArray(),"must be a type array");
491 ShouldNotReachHere();
492 }
493
494 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
495 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
496
497 // Attempt to CAS in the header.
498 // This tests if the header is still the same as when
499 // this started. If it is the same (i.e., no forwarding
500 // pointer has been installed), then this thread owns
501 // it.
502 if (obj->cas_forward_to(obj, obj_mark)) {
503 // We won any races, we "own" this object.
504 assert(obj == obj->forwardee(), "Sanity");
505
506 _promotion_failed_info.register_copy_failure(obj->size());
507
508 push_contents(obj);
509
510 _preserved_marks->push_if_necessary(obj, obj_mark);
|
370 int const end = arrayOop(old)->length();
371 if (end > (int) _min_array_size_for_chunking) {
372 // we'll chunk more
373 start = end - _array_chunk_size;
374 assert(start > 0, "invariant");
375 arrayOop(old)->set_length(start);
376 push_depth(mask_chunked_array_oop(old));
377 TASKQUEUE_STATS_ONLY(++_masked_pushes);
378 } else {
379 // this is the final chunk for this array
380 start = 0;
381 int const actual_length = arrayOop(obj)->length();
382 arrayOop(old)->set_length(actual_length);
383 }
384
385 if (UseCompressedOops) {
386 process_array_chunk_work<narrowOop>(obj, start, end);
387 } else {
388 process_array_chunk_work<oop>(obj, start, end);
389 }
390 }
391
392 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
393 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
394
395 // Attempt to CAS in the header.
396 // This tests if the header is still the same as when
397 // this started. If it is the same (i.e., no forwarding
398 // pointer has been installed), then this thread owns
399 // it.
400 if (obj->cas_forward_to(obj, obj_mark)) {
401 // We won any races, we "own" this object.
402 assert(obj == obj->forwardee(), "Sanity");
403
404 _promotion_failed_info.register_copy_failure(obj->size());
405
406 push_contents(obj);
407
408 _preserved_marks->push_if_necessary(obj, obj_mark);
|