65 Klass* _klass;
66 narrowKlass _compressed_klass;
67 } _metadata;
68
69 // Fast access to barrier set. Must be initialized.
70 static BarrierSet* _bs;
71
72 public:
73 markOop mark() const { return _mark; }
74 markOop* mark_addr() const { return (markOop*) &_mark; }
75
76 void set_mark(volatile markOop m) { _mark = m; }
77
78 inline void release_set_mark(markOop m);
79 inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
80
81 // Used only to re-initialize the mark word (e.g., of promoted
82 // objects during a GC) -- requires a valid klass pointer
83 inline void init_mark();
84
85 /*inline*/ Klass* klass() const;
86 inline Klass* klass_or_null() const volatile;
87 inline Klass** klass_addr();
88 inline narrowKlass* compressed_klass_addr();
89
90 /*inline*/ void set_klass(Klass* k);
91
92 // For klass field compression
93 inline int klass_gap() const;
94 /*inline*/ void set_klass_gap(int z);
95 // For when the klass pointer is being used as a linked list "next" field.
96 inline void set_klass_to_list_ptr(oop k);
97 inline oop list_ptr_from_klass();
98
99 // size of object header, aligned to platform wordSize
100 static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
101
102 // Returns whether this is an instance of k or an instance of a subclass of k
103 inline bool is_a(Klass* k) const;
104
105 // Returns the actual oop size of the object
106 /*inline*/ int size();
107
108 // Sometimes (for complicated concurrency-related reasons), it is useful
109 // to be able to figure out the size of an object knowing its klass.
110 inline int size_given_klass(Klass* klass);
111
112 // type test operations (inlined in oop.inline.hpp)
113 inline bool is_instance() const;
114 /*inline*/ bool is_array() const;
115 inline bool is_objArray() const;
116 inline bool is_typeArray() const;
117
118 // type test operations that don't require inclusion of oop.inline.hpp.
119 bool is_instance_noinline() const;
120 bool is_array_noinline() const;
121 bool is_objArray_noinline() const;
122 bool is_typeArray_noinline() const;
123
124 private:
125 // field addresses in oop
126 inline void* field_base(int offset) const;
127
128 inline jbyte* byte_field_addr(int offset) const;
129 inline jchar* char_field_addr(int offset) const;
130 inline jboolean* bool_field_addr(int offset) const;
131 inline jint* int_field_addr(int offset) const;
132 inline jshort* short_field_addr(int offset) const;
133 inline jlong* long_field_addr(int offset) const;
134 inline jfloat* float_field_addr(int offset) const;
135 inline jdouble* double_field_addr(int offset) const;
136 inline Metadata** metadata_field_addr(int offset) const;
137
138 public:
139 // Need this as public for garbage collection.
140 template <class T> inline T* obj_field_addr(int offset) const;
141
142 // Needed for javaClasses
143 inline address* address_field_addr(int offset) const;
144
145 inline static bool is_null(oop obj) { return obj == NULL; }
146 inline static bool is_null(narrowOop obj) { return obj == 0; }
147
148 // Decode an oop pointer from a narrowOop if compressed.
149 // These are overloaded for oop and narrowOop as are the other functions
150 // below so that they can be called in template functions.
151 static inline oop decode_heap_oop_not_null(oop v) { return v; }
152 static /*inline*/ oop decode_heap_oop_not_null(narrowOop v);
153 static inline oop decode_heap_oop(oop v) { return v; }
154 static /*inline*/ oop decode_heap_oop(narrowOop v);
155
156 // Encode an oop pointer to a narrow oop. The or_null versions accept
157 // null oop pointer, others do not in order to eliminate the
158 // null checking branches.
159 static inline narrowOop encode_heap_oop_not_null(oop v);
160 static /*inline*/ narrowOop encode_heap_oop(oop v);
161
162 // Load an oop out of the Java heap as is without decoding.
163 // Called by GC to check for null before decoding.
164 static inline narrowOop load_heap_oop(narrowOop* p) { return *p; }
165 static inline oop load_heap_oop(oop* p) { return *p; }
166
167 // Load an oop out of Java heap and decode it to an uncompressed oop.
168 static inline oop load_decode_heap_oop_not_null(narrowOop* p);
169 static inline oop load_decode_heap_oop_not_null(oop* p) { return *p; }
170 static inline oop load_decode_heap_oop(narrowOop* p);
171 static inline oop load_decode_heap_oop(oop* p) { return *p; }
172
173 // Store already encoded heap oop into the heap.
174 static inline void store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
175 static inline void store_heap_oop(oop* p, oop v) { *p = v; }
176
177 // Encode oop if UseCompressedOops and store into the heap.
178 static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
179 static inline void encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
180 static inline void encode_store_heap_oop(narrowOop* p, oop v);
267
268 // printing on default output stream
269 void print();
270 void print_value();
271 void print_address();
272
273 // return the print strings
274 char* print_string();
275 char* print_value_string();
276
277 // verification operations
278 void verify_on(outputStream* st);
279 void verify();
280
281 // locking operations
282 inline bool is_locked() const;
283 inline bool is_unlocked() const;
284 inline bool has_bias_pattern() const;
285
286 // asserts
287 /*inline*/ bool is_oop(bool ignore_mark_word = false) const;
288 /*inline*/ bool is_oop_or_null(bool ignore_mark_word = false) const;
289 #ifndef PRODUCT
290 inline bool is_unlocked_oop() const;
291 #endif
292
293 // garbage collection
294 inline bool is_gc_marked() const;
295
296 inline bool is_scavengable() const;
297
298 // Forward pointer operations for scavenge
299 inline bool is_forwarded() const;
300
301 inline void forward_to(oop p);
302 inline bool cas_forward_to(oop p, markOop compare);
303
304 #if INCLUDE_ALL_GCS
305 // Like "forward_to", but inserts the forwarding pointer atomically.
306 // Exactly one thread succeeds in inserting the forwarding pointer, and
307 // this call returns "NULL" for that thread; any other thread has the
308 // value of the forwarding pointer returned and does not modify "this".
309 inline oop forward_to_atomic(oop p);
310 #endif // INCLUDE_ALL_GCS
311
312 inline oop forwardee() const;
313
314 // Age of object during scavenge
315 /*inline*/ uint age() const;
316 inline void incr_age();
317
318 // mark-sweep support
319 void follow_body(int begin, int end);
320
321 // Fast access to barrier set
322 static BarrierSet* bs() { return _bs; }
323 static void set_bs(BarrierSet* bs) { _bs = bs; }
324
325 // Garbage Collection support
326
327 // Mark Sweep
328 // Adjust all pointers in this object to point at it's forwarded location and
329 // return the size of this oop. This is used by the MarkSweep collector.
330 inline int ms_adjust_pointers();
331 #if INCLUDE_ALL_GCS
332 // Parallel Compact
333 inline void pc_follow_contents(ParCompactionManager* pc);
334 inline void pc_update_contents();
335 // Parallel Scavenge
|
65 Klass* _klass;
66 narrowKlass _compressed_klass;
67 } _metadata;
68
69 // Fast access to barrier set. Must be initialized.
70 static BarrierSet* _bs;
71
72 public:
73 markOop mark() const { return _mark; }
74 markOop* mark_addr() const { return (markOop*) &_mark; }
75
76 void set_mark(volatile markOop m) { _mark = m; }
77
78 inline void release_set_mark(markOop m);
79 inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
80
81 // Used only to re-initialize the mark word (e.g., of promoted
82 // objects during a GC) -- requires a valid klass pointer
83 inline void init_mark();
84
85 inline Klass* klass() const;
86 inline Klass* klass_or_null() const volatile;
87 inline Klass** klass_addr();
88 inline narrowKlass* compressed_klass_addr();
89
90 inline void set_klass(Klass* k);
91
92 // For klass field compression
93 inline int klass_gap() const;
94 inline void set_klass_gap(int z);
95 // For when the klass pointer is being used as a linked list "next" field.
96 inline void set_klass_to_list_ptr(oop k);
97 inline oop list_ptr_from_klass();
98
99 // size of object header, aligned to platform wordSize
100 static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
101
102 // Returns whether this is an instance of k or an instance of a subclass of k
103 inline bool is_a(Klass* k) const;
104
105 // Returns the actual oop size of the object
106 inline int size();
107
108 // Sometimes (for complicated concurrency-related reasons), it is useful
109 // to be able to figure out the size of an object knowing its klass.
110 inline int size_given_klass(Klass* klass);
111
112 // type test operations (inlined in oop.inline.hpp)
113 inline bool is_instance() const;
114 inline bool is_array() const;
115 inline bool is_objArray() const;
116 inline bool is_typeArray() const;
117
118 // type test operations that don't require inclusion of oop.inline.hpp.
119 bool is_instance_noinline() const;
120 bool is_array_noinline() const;
121 bool is_objArray_noinline() const;
122 bool is_typeArray_noinline() const;
123
124 private:
125 // field addresses in oop
126 inline void* field_base(int offset) const;
127
128 inline jbyte* byte_field_addr(int offset) const;
129 inline jchar* char_field_addr(int offset) const;
130 inline jboolean* bool_field_addr(int offset) const;
131 inline jint* int_field_addr(int offset) const;
132 inline jshort* short_field_addr(int offset) const;
133 inline jlong* long_field_addr(int offset) const;
134 inline jfloat* float_field_addr(int offset) const;
135 inline jdouble* double_field_addr(int offset) const;
136 inline Metadata** metadata_field_addr(int offset) const;
137
138 public:
139 // Need this as public for garbage collection.
140 template <class T> inline T* obj_field_addr(int offset) const;
141
142 // Needed for javaClasses
143 inline address* address_field_addr(int offset) const;
144
145 inline static bool is_null(oop obj) { return obj == NULL; }
146 inline static bool is_null(narrowOop obj) { return obj == 0; }
147
148 // Decode an oop pointer from a narrowOop if compressed.
149 // These are overloaded for oop and narrowOop as are the other functions
150 // below so that they can be called in template functions.
151 static inline oop decode_heap_oop_not_null(oop v) { return v; }
152 static inline oop decode_heap_oop_not_null(narrowOop v);
153 static inline oop decode_heap_oop(oop v) { return v; }
154 static inline oop decode_heap_oop(narrowOop v);
155
156 // Encode an oop pointer to a narrow oop. The or_null versions accept
157 // null oop pointer, others do not in order to eliminate the
158 // null checking branches.
159 static inline narrowOop encode_heap_oop_not_null(oop v);
160 static inline narrowOop encode_heap_oop(oop v);
161
162 // Load an oop out of the Java heap as is without decoding.
163 // Called by GC to check for null before decoding.
164 static inline narrowOop load_heap_oop(narrowOop* p) { return *p; }
165 static inline oop load_heap_oop(oop* p) { return *p; }
166
167 // Load an oop out of Java heap and decode it to an uncompressed oop.
168 static inline oop load_decode_heap_oop_not_null(narrowOop* p);
169 static inline oop load_decode_heap_oop_not_null(oop* p) { return *p; }
170 static inline oop load_decode_heap_oop(narrowOop* p);
171 static inline oop load_decode_heap_oop(oop* p) { return *p; }
172
173 // Store already encoded heap oop into the heap.
174 static inline void store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
175 static inline void store_heap_oop(oop* p, oop v) { *p = v; }
176
177 // Encode oop if UseCompressedOops and store into the heap.
178 static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
179 static inline void encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
180 static inline void encode_store_heap_oop(narrowOop* p, oop v);
267
268 // printing on default output stream
269 void print();
270 void print_value();
271 void print_address();
272
273 // return the print strings
274 char* print_string();
275 char* print_value_string();
276
277 // verification operations
278 void verify_on(outputStream* st);
279 void verify();
280
281 // locking operations
282 inline bool is_locked() const;
283 inline bool is_unlocked() const;
284 inline bool has_bias_pattern() const;
285
286 // asserts
287 inline bool is_oop(bool ignore_mark_word = false) const;
288 inline bool is_oop_or_null(bool ignore_mark_word = false) const;
289 #ifndef PRODUCT
290 inline bool is_unlocked_oop() const;
291 #endif
292
293 // garbage collection
294 inline bool is_gc_marked() const;
295
296 inline bool is_scavengable() const;
297
298 // Forward pointer operations for scavenge
299 inline bool is_forwarded() const;
300
301 inline void forward_to(oop p);
302 inline bool cas_forward_to(oop p, markOop compare);
303
304 #if INCLUDE_ALL_GCS
305 // Like "forward_to", but inserts the forwarding pointer atomically.
306 // Exactly one thread succeeds in inserting the forwarding pointer, and
307 // this call returns "NULL" for that thread; any other thread has the
308 // value of the forwarding pointer returned and does not modify "this".
309 inline oop forward_to_atomic(oop p);
310 #endif // INCLUDE_ALL_GCS
311
312 inline oop forwardee() const;
313
314 // Age of object during scavenge
315 inline uint age() const;
316 inline void incr_age();
317
318 // mark-sweep support
319 void follow_body(int begin, int end);
320
321 // Fast access to barrier set
322 static BarrierSet* bs() { return _bs; }
323 static void set_bs(BarrierSet* bs) { _bs = bs; }
324
325 // Garbage Collection support
326
327 // Mark Sweep
328 // Adjust all pointers in this object to point at it's forwarded location and
329 // return the size of this oop. This is used by the MarkSweep collector.
330 inline int ms_adjust_pointers();
331 #if INCLUDE_ALL_GCS
332 // Parallel Compact
333 inline void pc_follow_contents(ParCompactionManager* pc);
334 inline void pc_update_contents();
335 // Parallel Scavenge
|