1 /*
  2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
 27 
 28 #include "gc/shared/collectedHeap.hpp"
 29 #include "oops/access.inline.hpp"
 30 #include "oops/arrayKlass.hpp"
 31 #include "oops/arrayOop.hpp"
 32 #include "oops/compressedOops.inline.hpp"
 33 #include "oops/klass.inline.hpp"
 34 #include "oops/markOop.inline.hpp"
 35 #include "oops/oop.hpp"
 36 #include "runtime/atomic.hpp"
 37 #include "runtime/orderAccess.hpp"
 38 #include "runtime/os.hpp"
 39 #include "utilities/align.hpp"
 40 #include "utilities/macros.hpp"
 41 
 42 // Implementation of all inlined member functions defined in oop.hpp
 43 // We need a separate file to avoid circular references
 44 
 45 markOop  oopDesc::mark()      const {
 46   return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
 47 }
 48 
 49 markOop  oopDesc::mark_raw()  const {
 50   return _mark;
 51 }
 52 
 53 markOop* oopDesc::mark_addr_raw() const {
 54   return (markOop*) &_mark;
 55 }
 56 
 57 void oopDesc::set_mark(volatile markOop m) {
 58   HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
 59 }
 60 
 61 void oopDesc::set_mark_raw(volatile markOop m) {
 62   _mark = m;
 63 }
 64 
 65 void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
 66   *(markOop*)(((char*)mem) + mark_offset_in_bytes()) = m;
 67 }
 68 
 69 void oopDesc::release_set_mark(markOop m) {
 70   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
 71 }
 72 
 73 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
 74   return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
 75 }
 76 
 77 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
 78   return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
 79 }
 80 
 81 void oopDesc::init_mark() {
 82   set_mark(markOopDesc::prototype_for_object(this));
 83 }
 84 
 85 void oopDesc::init_mark_raw() {
 86   set_mark_raw(markOopDesc::prototype_for_object(this));
 87 }
 88 
 89 Klass* oopDesc::klass() const {
 90   if (UseCompressedClassPointers) {
 91     return Klass::decode_klass_not_null(_metadata._compressed_klass);
 92   } else {
 93     return _metadata._klass;
 94   }
 95 }
 96 
 97 Klass* oopDesc::klass_or_null() const volatile {
 98   if (UseCompressedClassPointers) {
 99     return Klass::decode_klass(_metadata._compressed_klass);
100   } else {
101     return _metadata._klass;
102   }
103 }
104 
105 Klass* oopDesc::klass_or_null_acquire() const volatile {
106   if (UseCompressedClassPointers) {
107     // Workaround for non-const load_acquire parameter.
108     const volatile narrowKlass* addr = &_metadata._compressed_klass;
109     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
110     return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
111   } else {
112     return OrderAccess::load_acquire(&_metadata._klass);
113   }
114 }
115 
116 Klass** oopDesc::klass_addr(HeapWord* mem) {
117   // Only used internally and with CMS and will not work with
118   // UseCompressedOops
119   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
120   ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
121   return (Klass**) (((char*)mem) + in_bytes(offset));
122 }
123 
124 narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
125   assert(UseCompressedClassPointers, "only called by compressed klass pointers");
126   ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
127   return (narrowKlass*) (((char*)mem) + in_bytes(offset));
128 }
129 
130 Klass** oopDesc::klass_addr() {
131   return klass_addr((HeapWord*)this);
132 }
133 
134 narrowKlass* oopDesc::compressed_klass_addr() {
135   return compressed_klass_addr((HeapWord*)this);
136 }
137 
138 #define CHECK_SET_KLASS(k)                                                \
139   do {                                                                    \
140     assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass");      \
141     assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
142   } while (0)
143 
144 void oopDesc::set_klass(Klass* k) {
145   CHECK_SET_KLASS(k);
146   if (UseCompressedClassPointers) {
147     *compressed_klass_addr() = Klass::encode_klass_not_null(k);
148   } else {
149     *klass_addr() = k;
150   }
151 }
152 
153 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
154   CHECK_SET_KLASS(klass);
155   if (UseCompressedClassPointers) {
156     OrderAccess::release_store(compressed_klass_addr(mem),
157                                Klass::encode_klass_not_null(klass));
158   } else {
159     OrderAccess::release_store(klass_addr(mem), klass);
160   }
161 }
162 
163 #undef CHECK_SET_KLASS
164 
165 int oopDesc::klass_gap() const {
166   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
167 }
168 
169 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
170   if (UseCompressedClassPointers) {
171     *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
172   }
173 }
174 
175 void oopDesc::set_klass_gap(int v) {
176   set_klass_gap((HeapWord*)this, v);
177 }
178 
179 void oopDesc::set_klass_to_list_ptr(oop k) {
180   // This is only to be used during GC, for from-space objects, so no
181   // barrier is needed.
182   if (UseCompressedClassPointers) {
183     _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
184   } else {
185     _metadata._klass = (Klass*)(address)k;
186   }
187 }
188 
189 oop oopDesc::list_ptr_from_klass() {
190   // This is only to be used during GC, for from-space objects.
191   if (UseCompressedClassPointers) {
192     return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
193   } else {
194     // Special case for GC
195     return (oop)(address)_metadata._klass;
196   }
197 }
198 
199 bool oopDesc::is_a(Klass* k) const {
200   return klass()->is_subtype_of(k);
201 }
202 
203 int oopDesc::size()  {
204   return size_given_klass(klass());
205 }
206 
207 int oopDesc::size_given_klass(Klass* klass)  {
208   int lh = klass->layout_helper();
209   int s;
210 
211   // lh is now a value computed at class initialization that may hint
212   // at the size.  For instances, this is positive and equal to the
213   // size.  For arrays, this is negative and provides log2 of the
214   // array element size.  For other oops, it is zero and thus requires
215   // a virtual call.
216   //
217   // We go to all this trouble because the size computation is at the
218   // heart of phase 2 of mark-compaction, and called for every object,
219   // alive or dead.  So the speed here is equal in importance to the
220   // speed of allocation.
221 
222   if (lh > Klass::_lh_neutral_value) {
223     if (!Klass::layout_helper_needs_slow_path(lh)) {
224       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
225     } else {
226       s = klass->oop_size(this);
227     }
228   } else if (lh <= Klass::_lh_neutral_value) {
229     // The most common case is instances; fall through if so.
230     if (lh < Klass::_lh_neutral_value) {
231       // Second most common case is arrays.  We have to fetch the
232       // length of the array, shift (multiply) it appropriately,
233       // up to wordSize, add the header, and align to object size.
234       size_t size_in_bytes;
235       size_t array_length = (size_t) ((arrayOop)this)->length();
236       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
237       size_in_bytes += Klass::layout_helper_header_size(lh);
238 
239       // This code could be simplified, but by keeping array_header_in_bytes
240       // in units of bytes and doing it this way we can round up just once,
241       // skipping the intermediate round to HeapWordSize.
242       s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
243 
244       // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
245       // of an "old copy" of an object array in the young gen so it indicates
246       // the grey portion of an already copied array. This will cause the first
247       // disjunct below to fail if the two comparands are computed across such
248       // a concurrent change.
249       // ParNew also runs with promotion labs (which look like int
250       // filler arrays) which are subject to changing their declared size
251       // when finally retiring a PLAB; this also can cause the first disjunct
252       // to fail for another worker thread that is concurrently walking the block
253       // offset table. Both these invariant failures are benign for their
254       // current uses; we relax the assertion checking to cover these two cases below:
255       //     is_objArray() && is_forwarded()   // covers first scenario above
256       //  || is_typeArray()                    // covers second scenario above
257       // If and when UseParallelGC uses the same obj array oop stealing/chunking
258       // technique, we will need to suitably modify the assertion.
259       assert((s == klass->oop_size(this)) ||
260              (Universe::heap()->is_gc_active() &&
261               ((is_typeArray() && UseConcMarkSweepGC) ||
262                (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
263              "wrong array object size");
264     } else {
265       // Must be zero, so bite the bullet and take the virtual call.
266       s = klass->oop_size(this);
267     }
268   }
269 
270   assert(s > 0, "Oop size must be greater than zero, not %d", s);
271   assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
272   return s;
273 }
274 
275 bool oopDesc::is_instance()  const { return klass()->is_instance_klass();  }
276 bool oopDesc::is_array()     const { return klass()->is_array_klass();     }
277 bool oopDesc::is_objArray()  const { return klass()->is_objArray_klass();  }
278 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
279 
280 void*    oopDesc::field_addr_raw(int offset)     const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
281 void*    oopDesc::field_addr(int offset)         const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
282 
283 template <class T>
284 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
285 
286 template <typename T>
287 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
288 
289 template <DecoratorSet decorators>
290 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
291 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
292 
293 inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
294 
295 inline jbyte oopDesc::byte_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
296 inline void  oopDesc::byte_field_put(int offset, jbyte value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
297 
298 inline jchar oopDesc::char_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
299 inline void  oopDesc::char_field_put(int offset, jchar value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
300 
301 inline jboolean oopDesc::bool_field(int offset) const               { return HeapAccess<>::load_at(as_oop(), offset);                }
302 inline void     oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
303 
304 inline jshort oopDesc::short_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
305 inline void   oopDesc::short_field_put(int offset, jshort value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
306 
307 inline jint oopDesc::int_field(int offset) const                    { return HeapAccess<>::load_at(as_oop(), offset);  }
308 inline jint oopDesc::int_field_raw(int offset) const                { return RawAccess<>::load_at(as_oop(), offset);   }
309 inline void oopDesc::int_field_put(int offset, jint value)          { HeapAccess<>::store_at(as_oop(), offset, value); }
310 
311 inline jlong oopDesc::long_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
312 inline void  oopDesc::long_field_put(int offset, jlong value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
313 
314 inline jfloat oopDesc::float_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
315 inline void   oopDesc::float_field_put(int offset, jfloat value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
316 
317 inline jdouble oopDesc::double_field(int offset) const              { return HeapAccess<>::load_at(as_oop(), offset);  }
318 inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
319 
320 bool oopDesc::is_locked() const {
321   return mark()->is_locked();
322 }
323 
324 bool oopDesc::is_unlocked() const {
325   return mark()->is_unlocked();
326 }
327 
328 bool oopDesc::has_bias_pattern() const {
329   return mark()->has_bias_pattern();
330 }
331 
332 bool oopDesc::has_bias_pattern_raw() const {
333   return mark_raw()->has_bias_pattern();
334 }
335 
336 // Used only for markSweep, scavenging
337 bool oopDesc::is_gc_marked() const {
338   return mark_raw()->is_marked();
339 }
340 
341 // Used by scavengers
342 bool oopDesc::is_forwarded() const {
343   // The extra heap check is needed since the obj might be locked, in which case the
344   // mark would point to a stack location and have the sentinel bit cleared
345   return mark_raw()->is_marked();
346 }
347 
348 // Used by scavengers
349 void oopDesc::forward_to(oop p) {
350   assert(check_obj_alignment(p),
351          "forwarding to something not aligned");
352   assert(Universe::heap()->is_in_reserved(p),
353          "forwarding to something not in heap");
354   assert(!is_archive_object(oop(this)) &&
355          !is_archive_object(p),
356          "forwarding archive object");
357   markOop m = markOopDesc::encode_pointer_as_mark(p);
358   assert(m->decode_pointer() == p, "encoding must be reversable");
359   set_mark_raw(m);
360 }
361 
362 // Used by parallel scavengers
363 bool oopDesc::cas_forward_to(oop p, markOop compare, atomic_memory_order order) {
364   assert(check_obj_alignment(p),
365          "forwarding to something not aligned");
366   assert(Universe::heap()->is_in_reserved(p),
367          "forwarding to something not in heap");
368   markOop m = markOopDesc::encode_pointer_as_mark(p);
369   assert(m->decode_pointer() == p, "encoding must be reversable");
370   return cas_set_mark_raw(m, compare, order) == compare;
371 }
372 
373 oop oopDesc::forward_to_atomic(oop p, atomic_memory_order order) {
374   markOop oldMark = mark_raw();
375   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
376   markOop curMark;
377 
378   assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
379   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
380 
381   while (!oldMark->is_marked()) {
382     curMark = cas_set_mark_raw(forwardPtrMark, oldMark, order);
383     assert(is_forwarded(), "object should have been forwarded");
384     if (curMark == oldMark) {
385       return NULL;
386     }
387     // If the CAS was unsuccessful then curMark->is_marked()
388     // should return true as another thread has CAS'd in another
389     // forwarding pointer.
390     oldMark = curMark;
391   }
392   return forwardee();
393 }
394 
395 // Note that the forwardee is not the same thing as the displaced_mark.
396 // The forwardee is used when copying during scavenge and mark-sweep.
397 // It does need to clear the low two locking- and GC-related bits.
398 oop oopDesc::forwardee() const {
399   return (oop) mark_raw()->decode_pointer();
400 }
401 
402 // Note that the forwardee is not the same thing as the displaced_mark.
403 // The forwardee is used when copying during scavenge and mark-sweep.
404 // It does need to clear the low two locking- and GC-related bits.
405 oop oopDesc::forwardee_acquire() const {
406   markOop m = OrderAccess::load_acquire(&_mark);
407   return (oop) m->decode_pointer();
408 }
409 
410 // The following method needs to be MT safe.
411 uint oopDesc::age() const {
412   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
413   if (has_displaced_mark_raw()) {
414     return displaced_mark_raw()->age();
415   } else {
416     return mark_raw()->age();
417   }
418 }
419 
420 void oopDesc::incr_age() {
421   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
422   if (has_displaced_mark_raw()) {
423     set_displaced_mark_raw(displaced_mark_raw()->incr_age());
424   } else {
425     set_mark_raw(mark_raw()->incr_age());
426   }
427 }
428 
429 #if INCLUDE_PARALLELGC
430 
431 void oopDesc::pc_update_contents(ParCompactionManager* cm) {
432   Klass* k = klass();
433   if (!k->is_typeArray_klass()) {
434     // It might contain oops beyond the header, so take the virtual call.
435     k->oop_pc_update_pointers(this, cm);
436   }
437   // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
438 }
439 #endif // INCLUDE_PARALLELGC
440 
441 template <typename OopClosureType>
442 void oopDesc::oop_iterate(OopClosureType* cl) {
443   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
444 }
445 
446 template <typename OopClosureType>
447 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
448   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
449 }
450 
451 template <typename OopClosureType>
452 int oopDesc::oop_iterate_size(OopClosureType* cl) {
453   Klass* k = klass();
454   int size = size_given_klass(k);
455   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
456   return size;
457 }
458 
459 template <typename OopClosureType>
460 int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
461   Klass* k = klass();
462   int size = size_given_klass(k);
463   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
464   return size;
465 }
466 
467 template <typename OopClosureType>
468 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
469   OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, klass());
470 }
471 
472 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
473   return obj == NULL || obj->klass()->is_subtype_of(klass);
474 }
475 
476 intptr_t oopDesc::identity_hash() {
477   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
478   // Note: The mark must be read into local variable to avoid concurrent updates.
479   markOop mrk = mark();
480   if (mrk->is_unlocked() && !mrk->has_no_hash()) {
481     return mrk->hash();
482   } else if (mrk->is_marked()) {
483     return mrk->hash();
484   } else {
485     return slow_identity_hash();
486   }
487 }
488 
489 bool oopDesc::has_displaced_mark_raw() const {
490   return mark_raw()->has_displaced_mark_helper();
491 }
492 
493 markOop oopDesc::displaced_mark_raw() const {
494   return mark_raw()->displaced_mark_helper();
495 }
496 
497 void oopDesc::set_displaced_mark_raw(markOop m) {
498   mark_raw()->set_displaced_mark_helper(m);
499 }
500 
501 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP