1 /*
  2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
 26 #define SHARE_OOPS_OOP_INLINE_HPP
 27 
 28 #include "gc/shared/collectedHeap.hpp"
 29 #include "memory/universe.hpp"
 30 #include "oops/access.inline.hpp"
 31 #include "oops/arrayKlass.hpp"
 32 #include "oops/arrayOop.hpp"
 33 #include "oops/compressedOops.inline.hpp"
 34 #include "oops/klass.inline.hpp"
 35 #include "oops/markWord.inline.hpp"
 36 #include "oops/oop.hpp"
 37 #include "runtime/atomic.hpp"
 38 #include "runtime/orderAccess.hpp"
 39 #include "runtime/os.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/macros.hpp"
 42 
 43 // Implementation of all inlined member functions defined in oop.hpp
 44 // We need a separate file to avoid circular references
 45 
 46 markWord oopDesc::mark() const {
 47   uintptr_t v = HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
 48   return markWord(v);
 49 }
 50 
 51 markWord oopDesc::mark_raw() const {
 52   return Atomic::load(&_mark);
 53 }
 54 
 55 markWord* oopDesc::mark_addr_raw() const {
 56   return (markWord*) &_mark;
 57 }
 58 
 59 void oopDesc::set_mark(markWord m) {
 60   HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
 61 }
 62 
 63 void oopDesc::set_mark_raw(markWord m) {
 64   Atomic::store(m, &_mark);
 65 }
 66 
 67 void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
 68   *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
 69 }
 70 
 71 void oopDesc::release_set_mark(markWord m) {
 72   HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
 73 }
 74 
 75 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
 76   uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value());
 77   return markWord(v);
 78 }
 79 
 80 markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
 81   return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
 82 }
 83 
 84 void oopDesc::init_mark() {
 85   set_mark(markWord::prototype_for_klass(klass()));
 86 }
 87 
 88 void oopDesc::init_mark_raw() {
 89   set_mark_raw(markWord::prototype_for_klass(klass()));
 90 }
 91 
 92 Klass* oopDesc::klass() const {
 93   if (UseCompressedClassPointers) {
 94     return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
 95   } else {
 96     return _metadata._klass;
 97   }
 98 }
 99 
100 Klass* oopDesc::klass_or_null() const volatile {
101   if (UseCompressedClassPointers) {
102     return CompressedKlassPointers::decode(_metadata._compressed_klass);
103   } else {
104     return _metadata._klass;
105   }
106 }
107 
108 Klass* oopDesc::klass_or_null_acquire() const volatile {
109   if (UseCompressedClassPointers) {
110     // Workaround for non-const load_acquire parameter.
111     const volatile narrowKlass* addr = &_metadata._compressed_klass;
112     volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
113     return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
114   } else {
115     return OrderAccess::load_acquire(&_metadata._klass);
116   }
117 }
118 
119 Klass** oopDesc::klass_addr(HeapWord* mem) {
120   // Only used internally and with CMS and will not work with
121   // UseCompressedOops
122   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
123   ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
124   return (Klass**) (((char*)mem) + in_bytes(offset));
125 }
126 
127 narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
128   assert(UseCompressedClassPointers, "only called by compressed klass pointers");
129   ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
130   return (narrowKlass*) (((char*)mem) + in_bytes(offset));
131 }
132 
133 Klass** oopDesc::klass_addr() {
134   return klass_addr((HeapWord*)this);
135 }
136 
137 narrowKlass* oopDesc::compressed_klass_addr() {
138   return compressed_klass_addr((HeapWord*)this);
139 }
140 
141 #define CHECK_SET_KLASS(k)                                                \
142   do {                                                                    \
143     assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass");      \
144     assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
145   } while (0)
146 
147 void oopDesc::set_klass(Klass* k) {
148   CHECK_SET_KLASS(k);
149   if (UseCompressedClassPointers) {
150     *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
151   } else {
152     *klass_addr() = k;
153   }
154 }
155 
156 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
157   CHECK_SET_KLASS(klass);
158   if (UseCompressedClassPointers) {
159     OrderAccess::release_store(compressed_klass_addr(mem),
160                                CompressedKlassPointers::encode_not_null(klass));
161   } else {
162     OrderAccess::release_store(klass_addr(mem), klass);
163   }
164 }
165 
166 #undef CHECK_SET_KLASS
167 
168 int oopDesc::klass_gap() const {
169   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
170 }
171 
172 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
173   if (UseCompressedClassPointers) {
174     *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
175   }
176 }
177 
178 void oopDesc::set_klass_gap(int v) {
179   set_klass_gap((HeapWord*)this, v);
180 }
181 
182 void oopDesc::set_klass_to_list_ptr(oop k) {
183   // This is only to be used during GC, for from-space objects, so no
184   // barrier is needed.
185   if (UseCompressedClassPointers) {
186     _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);
187   } else {
188     _metadata._klass = (Klass*)(address)k;
189   }
190 }
191 
192 oop oopDesc::list_ptr_from_klass() {
193   // This is only to be used during GC, for from-space objects.
194   if (UseCompressedClassPointers) {
195     return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
196   } else {
197     // Special case for GC
198     return (oop)(address)_metadata._klass;
199   }
200 }
201 
202 bool oopDesc::is_a(Klass* k) const {
203   return klass()->is_subtype_of(k);
204 }
205 
206 int oopDesc::size()  {
207   return size_given_klass(klass());
208 }
209 
210 int oopDesc::size_given_klass(Klass* klass)  {
211   int lh = klass->layout_helper();
212   int s;
213 
214   // lh is now a value computed at class initialization that may hint
215   // at the size.  For instances, this is positive and equal to the
216   // size.  For arrays, this is negative and provides log2 of the
217   // array element size.  For other oops, it is zero and thus requires
218   // a virtual call.
219   //
220   // We go to all this trouble because the size computation is at the
221   // heart of phase 2 of mark-compaction, and called for every object,
222   // alive or dead.  So the speed here is equal in importance to the
223   // speed of allocation.
224 
225   if (lh > Klass::_lh_neutral_value) {
226     if (!Klass::layout_helper_needs_slow_path(lh)) {
227       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
228     } else {
229       s = klass->oop_size(this);
230     }
231   } else if (lh <= Klass::_lh_neutral_value) {
232     // The most common case is instances; fall through if so.
233     if (lh < Klass::_lh_neutral_value) {
234       // Second most common case is arrays.  We have to fetch the
235       // length of the array, shift (multiply) it appropriately,
236       // up to wordSize, add the header, and align to object size.
237       size_t size_in_bytes;
238       size_t array_length = (size_t) ((arrayOop)this)->length();
239       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
240       size_in_bytes += Klass::layout_helper_header_size(lh);
241 
242       // This code could be simplified, but by keeping array_header_in_bytes
243       // in units of bytes and doing it this way we can round up just once,
244       // skipping the intermediate round to HeapWordSize.
245       s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
246 
247       // UseParallelGC and UseG1GC can change the length field
248       // of an "old copy" of an object array in the young gen so it indicates
249       // the grey portion of an already copied array. This will cause the first
250       // disjunct below to fail if the two comparands are computed across such
251       // a concurrent change.
252       assert((s == klass->oop_size(this)) ||
253              (Universe::heap()->is_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC)),
254              "wrong array object size");
255     } else {
256       // Must be zero, so bite the bullet and take the virtual call.
257       s = klass->oop_size(this);
258     }
259   }
260 
261   assert(s > 0, "Oop size must be greater than zero, not %d", s);
262   assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
263   return s;
264 }
265 
266 bool oopDesc::is_instance()  const { return klass()->is_instance_klass();  }
267 bool oopDesc::is_array()     const { return klass()->is_array_klass();     }
268 bool oopDesc::is_objArray()  const { return klass()->is_objArray_klass();  }
269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
270 
271 void*    oopDesc::field_addr_raw(int offset)     const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
272 void*    oopDesc::field_addr(int offset)         const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
273 
274 template <class T>
275 T*       oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
276 
277 template <typename T>
278 size_t   oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
279 
280 template <DecoratorSet decorators>
281 inline oop  oopDesc::obj_field_access(int offset) const             { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
282 inline oop  oopDesc::obj_field(int offset) const                    { return HeapAccess<>::oop_load_at(as_oop(), offset);  }
283 
284 inline void oopDesc::obj_field_put(int offset, oop value)           { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
285 
286 inline jbyte oopDesc::byte_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
287 inline void  oopDesc::byte_field_put(int offset, jbyte value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
288 
289 inline jchar oopDesc::char_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
290 inline void  oopDesc::char_field_put(int offset, jchar value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
291 
292 inline jboolean oopDesc::bool_field(int offset) const               { return HeapAccess<>::load_at(as_oop(), offset);                }
293 inline void     oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
294 
295 inline jshort oopDesc::short_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
296 inline void   oopDesc::short_field_put(int offset, jshort value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
297 
298 inline jint oopDesc::int_field(int offset) const                    { return HeapAccess<>::load_at(as_oop(), offset);  }
299 inline jint oopDesc::int_field_raw(int offset) const                { return RawAccess<>::load_at(as_oop(), offset);   }
300 inline void oopDesc::int_field_put(int offset, jint value)          { HeapAccess<>::store_at(as_oop(), offset, value); }
301 
302 inline jlong oopDesc::long_field(int offset) const                  { return HeapAccess<>::load_at(as_oop(), offset);  }
303 inline void  oopDesc::long_field_put(int offset, jlong value)       { HeapAccess<>::store_at(as_oop(), offset, value); }
304 
305 inline jfloat oopDesc::float_field(int offset) const                { return HeapAccess<>::load_at(as_oop(), offset);  }
306 inline void   oopDesc::float_field_put(int offset, jfloat value)    { HeapAccess<>::store_at(as_oop(), offset, value); }
307 
308 inline jdouble oopDesc::double_field(int offset) const              { return HeapAccess<>::load_at(as_oop(), offset);  }
309 inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
310 
311 bool oopDesc::is_locked() const {
312   return mark().is_locked();
313 }
314 
315 bool oopDesc::is_unlocked() const {
316   return mark().is_unlocked();
317 }
318 
319 bool oopDesc::has_bias_pattern() const {
320   return mark().has_bias_pattern();
321 }
322 
323 bool oopDesc::has_bias_pattern_raw() const {
324   return mark_raw().has_bias_pattern();
325 }
326 
327 // Used only for markSweep, scavenging
328 bool oopDesc::is_gc_marked() const {
329   return mark_raw().is_marked();
330 }
331 
332 // Used by scavengers
333 bool oopDesc::is_forwarded() const {
334   // The extra heap check is needed since the obj might be locked, in which case the
335   // mark would point to a stack location and have the sentinel bit cleared
336   return mark_raw().is_marked();
337 }
338 
339 // Used by scavengers
340 void oopDesc::forward_to(oop p) {
341   verify_forwardee(p);
342   markWord m = markWord::encode_pointer_as_mark(p);
343   assert(m.decode_pointer() == p, "encoding must be reversable");
344   set_mark_raw(m);
345 }
346 
347 // Used by parallel scavengers
348 bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
349   verify_forwardee(p);
350   markWord m = markWord::encode_pointer_as_mark(p);
351   assert(m.decode_pointer() == p, "encoding must be reversable");
352   return cas_set_mark_raw(m, compare, order) == compare;
353 }
354 
355 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
356   verify_forwardee(p);
357   markWord m = markWord::encode_pointer_as_mark(p);
358   assert(m.decode_pointer() == p, "encoding must be reversable");
359   markWord old_mark = cas_set_mark_raw(m, compare, order);
360   if (old_mark == compare) {
361     return NULL;
362   } else {
363     return (oop)old_mark.decode_pointer();
364   }
365 }
366 
367 // Note that the forwardee is not the same thing as the displaced_mark.
368 // The forwardee is used when copying during scavenge and mark-sweep.
369 // It does need to clear the low two locking- and GC-related bits.
370 oop oopDesc::forwardee() const {
371   return (oop) mark_raw().decode_pointer();
372 }
373 
374 // Note that the forwardee is not the same thing as the displaced_mark.
375 // The forwardee is used when copying during scavenge and mark-sweep.
376 // It does need to clear the low two locking- and GC-related bits.
377 oop oopDesc::forwardee_acquire() const {
378   return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
379 }
380 
381 // The following method needs to be MT safe.
382 uint oopDesc::age() const {
383   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
384   if (has_displaced_mark_raw()) {
385     return displaced_mark_raw().age();
386   } else {
387     return mark_raw().age();
388   }
389 }
390 
391 void oopDesc::incr_age() {
392   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
393   if (has_displaced_mark_raw()) {
394     set_displaced_mark_raw(displaced_mark_raw().incr_age());
395   } else {
396     set_mark_raw(mark_raw().incr_age());
397   }
398 }
399 
400 template <typename OopClosureType>
401 void oopDesc::oop_iterate(OopClosureType* cl) {
402   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
403 }
404 
405 template <typename OopClosureType>
406 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
407   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
408 }
409 
410 template <typename OopClosureType>
411 int oopDesc::oop_iterate_size(OopClosureType* cl) {
412   Klass* k = klass();
413   int size = size_given_klass(k);
414   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
415   return size;
416 }
417 
418 template <typename OopClosureType>
419 int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
420   Klass* k = klass();
421   int size = size_given_klass(k);
422   OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
423   return size;
424 }
425 
426 template <typename OopClosureType>
427 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
428   OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, klass());
429 }
430 
431 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
432   return obj == NULL || obj->klass()->is_subtype_of(klass);
433 }
434 
435 intptr_t oopDesc::identity_hash() {
436   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
437   // Note: The mark must be read into local variable to avoid concurrent updates.
438   markWord mrk = mark();
439   if (mrk.is_unlocked() && !mrk.has_no_hash()) {
440     return mrk.hash();
441   } else if (mrk.is_marked()) {
442     return mrk.hash();
443   } else {
444     return slow_identity_hash();
445   }
446 }
447 
448 bool oopDesc::has_displaced_mark_raw() const {
449   return mark_raw().has_displaced_mark_helper();
450 }
451 
452 markWord oopDesc::displaced_mark_raw() const {
453   return mark_raw().displaced_mark_helper();
454 }
455 
456 void oopDesc::set_displaced_mark_raw(markWord m) {
457   mark_raw().set_displaced_mark_helper(m);
458 }
459 
460 // Supports deferred calling of obj->klass().
461 class DeferredObjectToKlass {
462   const oopDesc* _obj;
463 
464 public:
465   DeferredObjectToKlass(const oopDesc* obj) : _obj(obj) {}
466 
467   // Implicitly convertible to const Klass*.
468   operator const Klass*() const {
469     return _obj->klass();
470   }
471 };
472 
473 bool oopDesc::mark_must_be_preserved() const {
474   return mark_must_be_preserved(mark_raw());
475 }
476 
477 bool oopDesc::mark_must_be_preserved(markWord m) const {
478   // There's a circular dependency between oop.inline.hpp and
479   // markWord.inline.hpp because markWord::must_be_preserved wants to call
480   // oopDesc::klass(). This could be solved by calling klass() here. However,
481   // not all paths inside must_be_preserved calls klass(). Defer the call until
482   // the klass is actually needed.
483   return m.must_be_preserved(DeferredObjectToKlass(this));
484 }
485 
486 bool oopDesc::mark_must_be_preserved_for_promotion_failure(markWord m) const {
487   return m.must_be_preserved_for_promotion_failure(DeferredObjectToKlass(this));
488 }
489 
490 #endif // SHARE_OOPS_OOP_INLINE_HPP