12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
25 #define SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
26
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/parMarkBitMap.inline.hpp"
29 #include "gc/parallel/psOldGen.hpp"
30 #include "gc/parallel/psPromotionLAB.inline.hpp"
31 #include "gc/parallel/psPromotionManager.hpp"
32 #include "gc/parallel/psScavenge.hpp"
33 #include "gc/shared/taskqueue.inline.hpp"
34 #include "logging/log.hpp"
35 #include "oops/access.inline.hpp"
36 #include "oops/oop.inline.hpp"
37
38 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
39 assert(_manager_array != NULL, "access of NULL manager_array");
40 assert(index <= ParallelGCThreads, "out of range manager_array access");
41 return &_manager_array[index];
42 }
43
44 template <class T>
45 inline void PSPromotionManager::push_depth(T* p) {
46 claimed_stack_depth()->push(p);
47 }
48
49 template <class T>
50 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
51 if (p != NULL) { // XXX: error if p != NULL here
52 oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
53 if (o->is_forwarded()) {
|
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
25 #define SHARE_VM_GC_PARALLEL_PSPROMOTIONMANAGER_INLINE_HPP
26
27 #include "gc/parallel/parallelScavengeHeap.hpp"
28 #include "gc/parallel/parMarkBitMap.inline.hpp"
29 #include "gc/parallel/psOldGen.hpp"
30 #include "gc/parallel/psPromotionLAB.inline.hpp"
31 #include "gc/parallel/psPromotionManager.hpp"
32 #include "gc/parallel/psScavenge.inline.hpp"
33 #include "gc/shared/taskqueue.inline.hpp"
34 #include "logging/log.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38
39 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
40 assert(_manager_array != NULL, "access of NULL manager_array");
41 assert(index <= ParallelGCThreads, "out of range manager_array access");
42 return &_manager_array[index];
43 }
44
45 template <class T>
46 inline void PSPromotionManager::push_depth(T* p) {
47 claimed_stack_depth()->push(p);
48 }
49
50 template <class T>
51 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
52 if (p != NULL) { // XXX: error if p != NULL here
53 oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
54 if (o->is_forwarded()) {
|
81
82 if (lab != NULL) {
83 // Promotion of object through newly allocated PLAB
84 if (gc_tracer->should_report_promotion_in_new_plab_event()) {
85 size_t obj_bytes = obj_size * HeapWordSize;
86 size_t lab_size = lab->capacity();
87 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
88 age, tenured, lab_size);
89 }
90 } else {
91 // Promotion of object directly to heap
92 if (gc_tracer->should_report_promotion_outside_plab_event()) {
93 size_t obj_bytes = obj_size * HeapWordSize;
94 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
95 age, tenured);
96 }
97 }
98 }
99 }
100
101 inline void PSPromotionManager::push_contents(oop obj) {
102 obj->ps_push_contents(this);
103 }
104 //
105 // This method is pretty bulky. It would be nice to split it up
106 // into smaller submethods, but we need to be careful not to hurt
107 // performance.
108 //
109 template<bool promote_immediately>
110 inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
111 assert(should_scavenge(&o), "Sanity");
112
113 oop new_obj = NULL;
114
115 // NOTE! We must be very careful with any methods that access the mark
116 // in o. There may be multiple threads racing on it, and it may be forwarded
117 // at any time. Do not use oop methods for accessing the mark!
118 markOop test_mark = o->mark_raw();
119
120 // The same test as "o->is_forwarded()"
121 if (!test_mark->is_marked()) {
|
82
83 if (lab != NULL) {
84 // Promotion of object through newly allocated PLAB
85 if (gc_tracer->should_report_promotion_in_new_plab_event()) {
86 size_t obj_bytes = obj_size * HeapWordSize;
87 size_t lab_size = lab->capacity();
88 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
89 age, tenured, lab_size);
90 }
91 } else {
92 // Promotion of object directly to heap
93 if (gc_tracer->should_report_promotion_outside_plab_event()) {
94 size_t obj_bytes = obj_size * HeapWordSize;
95 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
96 age, tenured);
97 }
98 }
99 }
100 }
101
102 class PSPushContentsClosure: public BasicOopIterateClosure {
103 PSPromotionManager* _pm;
104 public:
105 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {}
106
107 template <typename T> void do_oop_nv(T* p) {
108 if (PSScavenge::should_scavenge(p)) {
109 _pm->claim_or_forward_depth(p);
110 }
111 }
112
113 virtual void do_oop(oop* p) { do_oop_nv(p); }
114 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
115
116 // Don't use the oop verification code in the oop_oop_iterate framework.
117 debug_only(virtual bool should_verify_oops() { return false; })
118 };
119
120 //
121 // This closure specialization will override the one that is defined in
122 // instanceRefKlass.inline.cpp. It swaps the order of oop_oop_iterate and
123 // oop_oop_iterate_ref_processing. Unfortunately G1 and Parallel behaves
124 // significantly better (especially in the Derby benchmark) using opposite
125 // order of these function calls.
126 //
127 template <>
128 inline void InstanceRefKlass::oop_oop_iterate_reverse<oop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* closure)
129 oop_oop_iterate_ref_processing<oop>(obj, closure);
130 InstanceKlass::oop_oop_iterate_reverse<oop>(obj, closure);
131 }
132
133 template <>
134 inline void InstanceRefKlass::oop_oop_iterate_reverse<narrowOop, PSPushContentsClosure>(oop obj, PSPushContentsClosure* clo
135 oop_oop_iterate_ref_processing<narrowOop>(obj, closure);
136 InstanceKlass::oop_oop_iterate_reverse<narrowOop>(obj, closure);
137 }
138
139 inline void PSPromotionManager::push_contents(oop obj) {
140 if (!obj->klass()->is_typeArray_klass()) {
141 PSPushContentsClosure pcc(this);
142 obj->oop_iterate_backwards(&pcc);
143 }
144 }
145 //
146 // This method is pretty bulky. It would be nice to split it up
147 // into smaller submethods, but we need to be careful not to hurt
148 // performance.
149 //
150 template<bool promote_immediately>
151 inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
152 assert(should_scavenge(&o), "Sanity");
153
154 oop new_obj = NULL;
155
156 // NOTE! We must be very careful with any methods that access the mark
157 // in o. There may be multiple threads racing on it, and it may be forwarded
158 // at any time. Do not use oop methods for accessing the mark!
159 markOop test_mark = o->mark_raw();
160
161 // The same test as "o->is_forwarded()"
162 if (!test_mark->is_marked()) {
|