64 Klass* _klass;
65 narrowKlass _compressed_klass;
66 } _metadata;
67
68 // Fast access to barrier set. Must be initialized.
69 static BarrierSet* _bs;
70
71 public:
72 markOop mark() const { return _mark; }
73 markOop* mark_addr() const { return (markOop*) &_mark; }
74
75 void set_mark(volatile markOop m) { _mark = m; }
76
77 inline void release_set_mark(markOop m);
78 inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
79
80 // Used only to re-initialize the mark word (e.g., of promoted
81 // objects during a GC) -- requires a valid klass pointer
82 inline void init_mark();
83
84 /*inline*/ Klass* klass() const;
85 inline Klass* klass_or_null() const volatile;
86 inline Klass** klass_addr();
87 inline narrowKlass* compressed_klass_addr();
88
89 /*inline*/ void set_klass(Klass* k);
90
91 // For klass field compression
92 inline int klass_gap() const;
93 /*inline*/ void set_klass_gap(int z);
94 // For when the klass pointer is being used as a linked list "next" field.
95 inline void set_klass_to_list_ptr(oop k);
96 inline oop list_ptr_from_klass();
97
98 // size of object header, aligned to platform wordSize
99 static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
100
101 // Returns whether this is an instance of k or an instance of a subclass of k
102 inline bool is_a(Klass* k) const;
103
104 // Returns the actual oop size of the object
105 /*inline*/ int size();
106
107 // Sometimes (for complicated concurrency-related reasons), it is useful
108 // to be able to figure out the size of an object knowing its klass.
109 inline int size_given_klass(Klass* klass);
110
111 // type test operations (inlined in oop.inline.hpp)
112 inline bool is_instance() const;
113 /*inline*/ bool is_array() const;
114 inline bool is_objArray() const;
115 inline bool is_typeArray() const;
116
117 // type test operations that don't require inclusion of oop.inline.hpp.
118 bool is_instance_noinline() const;
119 bool is_array_noinline() const;
120 bool is_objArray_noinline() const;
121 bool is_typeArray_noinline() const;
122
123 private:
124 // field addresses in oop
125 inline void* field_base(int offset) const;
126
127 inline jbyte* byte_field_addr(int offset) const;
128 inline jchar* char_field_addr(int offset) const;
129 inline jboolean* bool_field_addr(int offset) const;
130 inline jint* int_field_addr(int offset) const;
131 inline jshort* short_field_addr(int offset) const;
132 inline jlong* long_field_addr(int offset) const;
133 inline jfloat* float_field_addr(int offset) const;
134 inline jdouble* double_field_addr(int offset) const;
135 inline Metadata** metadata_field_addr(int offset) const;
136
137 public:
138 // Need this as public for garbage collection.
139 template <class T> inline T* obj_field_addr(int offset) const;
140
141 // Needed for javaClasses
142 inline address* address_field_addr(int offset) const;
143
144 inline static bool is_null(oop obj) { return obj == NULL; }
145 inline static bool is_null(narrowOop obj) { return obj == 0; }
146
147 // Decode an oop pointer from a narrowOop if compressed.
148 // These are overloaded for oop and narrowOop as are the other functions
149 // below so that they can be called in template functions.
150 static inline oop decode_heap_oop_not_null(oop v) { return v; }
151 static /*inline*/ oop decode_heap_oop_not_null(narrowOop v);
152 static inline oop decode_heap_oop(oop v) { return v; }
153 static /*inline*/ oop decode_heap_oop(narrowOop v);
154
155 // Encode an oop pointer to a narrow oop. The or_null versions accept
156 // null oop pointer, others do not in order to eliminate the
157 // null checking branches.
158 static inline narrowOop encode_heap_oop_not_null(oop v);
159 static /*inline*/ narrowOop encode_heap_oop(oop v);
160
161 // Load an oop out of the Java heap as is without decoding.
162 // Called by GC to check for null before decoding.
163 static inline narrowOop load_heap_oop(narrowOop* p) { return *p; }
164 static inline oop load_heap_oop(oop* p) { return *p; }
165
166 // Load an oop out of Java heap and decode it to an uncompressed oop.
167 static inline oop load_decode_heap_oop_not_null(narrowOop* p);
168 static inline oop load_decode_heap_oop_not_null(oop* p) { return *p; }
169 static inline oop load_decode_heap_oop(narrowOop* p);
170 static inline oop load_decode_heap_oop(oop* p) { return *p; }
171
172 // Store already encoded heap oop into the heap.
173 static inline void store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
174 static inline void store_heap_oop(oop* p, oop v) { *p = v; }
175
176 // Encode oop if UseCompressedOops and store into the heap.
177 static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
178 static inline void encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
179 static inline void encode_store_heap_oop(narrowOop* p, oop v);
266
267 // printing on default output stream
268 void print();
269 void print_value();
270 void print_address();
271
272 // return the print strings
273 char* print_string();
274 char* print_value_string();
275
276 // verification operations
277 void verify_on(outputStream* st);
278 void verify();
279
280 // locking operations
281 inline bool is_locked() const;
282 inline bool is_unlocked() const;
283 inline bool has_bias_pattern() const;
284
285 // asserts
286 /*inline*/ bool is_oop(bool ignore_mark_word = false) const;
287 /*inline*/ bool is_oop_or_null(bool ignore_mark_word = false) const;
288 #ifndef PRODUCT
289 inline bool is_unlocked_oop() const;
290 #endif
291
292 // garbage collection
293 inline bool is_gc_marked() const;
294
295 inline bool is_scavengable() const;
296
297 // Forward pointer operations for scavenge
298 inline bool is_forwarded() const;
299
300 inline void forward_to(oop p);
301 inline bool cas_forward_to(oop p, markOop compare);
302
303 #if INCLUDE_ALL_GCS
304 // Like "forward_to", but inserts the forwarding pointer atomically.
305 // Exactly one thread succeeds in inserting the forwarding pointer, and
306 // this call returns "NULL" for that thread; any other thread has the
307 // value of the forwarding pointer returned and does not modify "this".
308 inline oop forward_to_atomic(oop p);
309 #endif // INCLUDE_ALL_GCS
310
311 inline oop forwardee() const;
312
313 // Age of object during scavenge
314 /*inline*/ uint age() const;
315 inline void incr_age();
316
317 // mark-sweep support
318 void follow_body(int begin, int end);
319
320 // Fast access to barrier set
321 static BarrierSet* bs() { return _bs; }
322 static void set_bs(BarrierSet* bs) { _bs = bs; }
323
324 // Garbage Collection support
325
326 // Mark Sweep
327 // Adjust all pointers in this object to point at it's forwarded location and
328 // return the size of this oop. This is used by the MarkSweep collector.
329 inline int ms_adjust_pointers();
330 #if INCLUDE_ALL_GCS
331 // Parallel Compact
332 inline void pc_follow_contents(ParCompactionManager* pc);
333 inline void pc_update_contents();
334 // Parallel Scavenge
335 inline void ps_push_contents(PSPromotionManager* pm);
|
64 Klass* _klass;
65 narrowKlass _compressed_klass;
66 } _metadata;
67
68 // Fast access to barrier set. Must be initialized.
69 static BarrierSet* _bs;
70
71 public:
72 markOop mark() const { return _mark; }
73 markOop* mark_addr() const { return (markOop*) &_mark; }
74
75 void set_mark(volatile markOop m) { _mark = m; }
76
77 inline void release_set_mark(markOop m);
78 inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
79
80 // Used only to re-initialize the mark word (e.g., of promoted
81 // objects during a GC) -- requires a valid klass pointer
82 inline void init_mark();
83
84 inline Klass* klass() const;
85 inline Klass* klass_or_null() const volatile;
86 inline Klass** klass_addr();
87 inline narrowKlass* compressed_klass_addr();
88
89 inline void set_klass(Klass* k);
90
91 // For klass field compression
92 inline int klass_gap() const;
93 inline void set_klass_gap(int z);
94 // For when the klass pointer is being used as a linked list "next" field.
95 inline void set_klass_to_list_ptr(oop k);
96 inline oop list_ptr_from_klass();
97
98 // size of object header, aligned to platform wordSize
99 static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
100
101 // Returns whether this is an instance of k or an instance of a subclass of k
102 inline bool is_a(Klass* k) const;
103
104 // Returns the actual oop size of the object
105 inline int size();
106
107 // Sometimes (for complicated concurrency-related reasons), it is useful
108 // to be able to figure out the size of an object knowing its klass.
109 inline int size_given_klass(Klass* klass);
110
111 // type test operations (inlined in oop.inline.hpp)
112 inline bool is_instance() const;
113 inline bool is_array() const;
114 inline bool is_objArray() const;
115 inline bool is_typeArray() const;
116
117 // type test operations that don't require inclusion of oop.inline.hpp.
118 bool is_instance_noinline() const;
119 bool is_array_noinline() const;
120 bool is_objArray_noinline() const;
121 bool is_typeArray_noinline() const;
122
123 private:
124 // field addresses in oop
125 inline void* field_base(int offset) const;
126
127 inline jbyte* byte_field_addr(int offset) const;
128 inline jchar* char_field_addr(int offset) const;
129 inline jboolean* bool_field_addr(int offset) const;
130 inline jint* int_field_addr(int offset) const;
131 inline jshort* short_field_addr(int offset) const;
132 inline jlong* long_field_addr(int offset) const;
133 inline jfloat* float_field_addr(int offset) const;
134 inline jdouble* double_field_addr(int offset) const;
135 inline Metadata** metadata_field_addr(int offset) const;
136
137 public:
138 // Need this as public for garbage collection.
139 template <class T> inline T* obj_field_addr(int offset) const;
140
141 // Needed for javaClasses
142 inline address* address_field_addr(int offset) const;
143
144 inline static bool is_null(oop obj) { return obj == NULL; }
145 inline static bool is_null(narrowOop obj) { return obj == 0; }
146
147 // Decode an oop pointer from a narrowOop if compressed.
148 // These are overloaded for oop and narrowOop as are the other functions
149 // below so that they can be called in template functions.
150 static inline oop decode_heap_oop_not_null(oop v) { return v; }
151 static inline oop decode_heap_oop_not_null(narrowOop v);
152 static inline oop decode_heap_oop(oop v) { return v; }
153 static inline oop decode_heap_oop(narrowOop v);
154
155 // Encode an oop pointer to a narrow oop. The or_null versions accept
156 // null oop pointer, others do not in order to eliminate the
157 // null checking branches.
158 static inline narrowOop encode_heap_oop_not_null(oop v);
159 static inline narrowOop encode_heap_oop(oop v);
160
161 // Load an oop out of the Java heap as is without decoding.
162 // Called by GC to check for null before decoding.
163 static inline narrowOop load_heap_oop(narrowOop* p) { return *p; }
164 static inline oop load_heap_oop(oop* p) { return *p; }
165
166 // Load an oop out of Java heap and decode it to an uncompressed oop.
167 static inline oop load_decode_heap_oop_not_null(narrowOop* p);
168 static inline oop load_decode_heap_oop_not_null(oop* p) { return *p; }
169 static inline oop load_decode_heap_oop(narrowOop* p);
170 static inline oop load_decode_heap_oop(oop* p) { return *p; }
171
172 // Store already encoded heap oop into the heap.
173 static inline void store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
174 static inline void store_heap_oop(oop* p, oop v) { *p = v; }
175
176 // Encode oop if UseCompressedOops and store into the heap.
177 static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
178 static inline void encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
179 static inline void encode_store_heap_oop(narrowOop* p, oop v);
266
267 // printing on default output stream
268 void print();
269 void print_value();
270 void print_address();
271
272 // return the print strings
273 char* print_string();
274 char* print_value_string();
275
276 // verification operations
277 void verify_on(outputStream* st);
278 void verify();
279
280 // locking operations
281 inline bool is_locked() const;
282 inline bool is_unlocked() const;
283 inline bool has_bias_pattern() const;
284
285 // asserts
286 inline bool is_oop(bool ignore_mark_word = false) const;
287 inline bool is_oop_or_null(bool ignore_mark_word = false) const;
288 #ifndef PRODUCT
289 inline bool is_unlocked_oop() const;
290 #endif
291
292 // garbage collection
293 inline bool is_gc_marked() const;
294
295 inline bool is_scavengable() const;
296
297 // Forward pointer operations for scavenge
298 inline bool is_forwarded() const;
299
300 inline void forward_to(oop p);
301 inline bool cas_forward_to(oop p, markOop compare);
302
303 #if INCLUDE_ALL_GCS
304 // Like "forward_to", but inserts the forwarding pointer atomically.
305 // Exactly one thread succeeds in inserting the forwarding pointer, and
306 // this call returns "NULL" for that thread; any other thread has the
307 // value of the forwarding pointer returned and does not modify "this".
308 inline oop forward_to_atomic(oop p);
309 #endif // INCLUDE_ALL_GCS
310
311 inline oop forwardee() const;
312
313 // Age of object during scavenge
314 inline uint age() const;
315 inline void incr_age();
316
317
318 // mark-sweep support
319 void follow_body(int begin, int end);
320
321 // Fast access to barrier set
322 static BarrierSet* bs() { return _bs; }
323 static void set_bs(BarrierSet* bs) { _bs = bs; }
324
325 // Garbage Collection support
326
327 // Mark Sweep
328 // Adjust all pointers in this object to point at it's forwarded location and
329 // return the size of this oop. This is used by the MarkSweep collector.
330 inline int ms_adjust_pointers();
331 #if INCLUDE_ALL_GCS
332 // Parallel Compact
333 inline void pc_follow_contents(ParCompactionManager* pc);
334 inline void pc_update_contents();
335 // Parallel Scavenge
336 inline void ps_push_contents(PSPromotionManager* pm);
|