1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc/shared/ageTable.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/generation.hpp" 31 #include "oops/access.inline.hpp" 32 #include "oops/arrayKlass.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/compressedOops.inline.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "oops/markOop.inline.hpp" 37 #include "oops/oop.hpp" 38 #include "runtime/atomic.hpp" 39 #include "runtime/orderAccess.inline.hpp" 40 #include "runtime/os.hpp" 41 #include "utilities/align.hpp" 42 #include "utilities/macros.hpp" 43 44 // Implementation of all inlined member functions defined in oop.hpp 45 // We need a separate file to avoid circular references 46 47 markOop oopDesc::mark() const { 48 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes()); 49 } 50 51 markOop oopDesc::mark_raw() const { 52 return _mark; 53 } 54 55 markOop* oopDesc::mark_addr_raw() const { 56 return (markOop*) &_mark; 57 } 58 59 void oopDesc::set_mark(volatile markOop m) { 60 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m); 61 } 62 63 void oopDesc::set_mark_raw(volatile markOop m) { 64 _mark = m; 65 } 66 67 void oopDesc::release_set_mark(markOop m) { 68 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m); 69 } 70 71 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 72 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark); 73 } 74 75 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark) { 76 return Atomic::cmpxchg(new_mark, &_mark, old_mark); 77 } 78 79 void oopDesc::init_mark() { 80 set_mark(markOopDesc::prototype_for_object(this)); 81 } 82 83 void oopDesc::init_mark_raw() { 84 set_mark_raw(markOopDesc::prototype_for_object(this)); 85 } 86 87 Klass* oopDesc::klass() const { 88 if (UseCompressedClassPointers) { 89 return Klass::decode_klass_not_null(_metadata._compressed_klass); 90 } else { 91 return _metadata._klass; 92 } 93 } 94 95 Klass* oopDesc::klass_or_null() const volatile { 96 if (UseCompressedClassPointers) { 97 return Klass::decode_klass(_metadata._compressed_klass); 98 } else { 99 return _metadata._klass; 100 } 101 } 102 103 Klass* oopDesc::klass_or_null_acquire() const volatile { 104 if (UseCompressedClassPointers) { 105 // Workaround for non-const load_acquire parameter. 106 const volatile narrowKlass* addr = &_metadata._compressed_klass; 107 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr); 108 return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); 109 } else { 110 return OrderAccess::load_acquire(&_metadata._klass); 111 } 112 } 113 114 Klass** oopDesc::klass_addr() { 115 // Only used internally and with CMS and will not work with 116 // UseCompressedOops 117 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 118 return (Klass**) &_metadata._klass; 119 } 120 121 narrowKlass* oopDesc::compressed_klass_addr() { 122 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 123 return &_metadata._compressed_klass; 124 } 125 126 #define CHECK_SET_KLASS(k) \ 127 do { \ 128 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \ 129 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \ 130 } while (0) 131 132 void oopDesc::set_klass(Klass* k) { 133 CHECK_SET_KLASS(k); 134 if (UseCompressedClassPointers) { 135 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 136 } else { 137 *klass_addr() = k; 138 } 139 } 140 141 void oopDesc::release_set_klass(Klass* k) { 142 CHECK_SET_KLASS(k); 143 if (UseCompressedClassPointers) { 144 OrderAccess::release_store(compressed_klass_addr(), 145 Klass::encode_klass_not_null(k)); 146 } else { 147 OrderAccess::release_store(klass_addr(), k); 148 } 149 } 150 151 #undef CHECK_SET_KLASS 152 153 int oopDesc::klass_gap() const { 154 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 155 } 156 157 void oopDesc::set_klass_gap(int v) { 158 if (UseCompressedClassPointers) { 159 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 160 } 161 } 162 163 void oopDesc::set_klass_to_list_ptr(oop k) { 164 // This is only to be used during GC, for from-space objects, so no 165 // barrier is needed. 166 if (UseCompressedClassPointers) { 167 _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling) 168 } else { 169 _metadata._klass = (Klass*)(address)k; 170 } 171 } 172 173 oop oopDesc::list_ptr_from_klass() { 174 // This is only to be used during GC, for from-space objects. 175 if (UseCompressedClassPointers) { 176 return CompressedOops::decode((narrowOop)_metadata._compressed_klass); 177 } else { 178 // Special case for GC 179 return (oop)(address)_metadata._klass; 180 } 181 } 182 183 bool oopDesc::is_a(Klass* k) const { 184 return klass()->is_subtype_of(k); 185 } 186 187 int oopDesc::size() { 188 return size_given_klass(klass()); 189 } 190 191 int oopDesc::size_given_klass(Klass* klass) { 192 int lh = klass->layout_helper(); 193 int s; 194 195 // lh is now a value computed at class initialization that may hint 196 // at the size. For instances, this is positive and equal to the 197 // size. For arrays, this is negative and provides log2 of the 198 // array element size. For other oops, it is zero and thus requires 199 // a virtual call. 200 // 201 // We go to all this trouble because the size computation is at the 202 // heart of phase 2 of mark-compaction, and called for every object, 203 // alive or dead. So the speed here is equal in importance to the 204 // speed of allocation. 205 206 if (lh > Klass::_lh_neutral_value) { 207 if (!Klass::layout_helper_needs_slow_path(lh)) { 208 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 209 } else { 210 s = klass->oop_size(this); 211 } 212 } else if (lh <= Klass::_lh_neutral_value) { 213 // The most common case is instances; fall through if so. 214 if (lh < Klass::_lh_neutral_value) { 215 // Second most common case is arrays. We have to fetch the 216 // length of the array, shift (multiply) it appropriately, 217 // up to wordSize, add the header, and align to object size. 218 size_t size_in_bytes; 219 size_t array_length = (size_t) ((arrayOop)this)->length(); 220 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 221 size_in_bytes += Klass::layout_helper_header_size(lh); 222 223 // This code could be simplified, but by keeping array_header_in_bytes 224 // in units of bytes and doing it this way we can round up just once, 225 // skipping the intermediate round to HeapWordSize. 226 s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize); 227 228 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field 229 // of an "old copy" of an object array in the young gen so it indicates 230 // the grey portion of an already copied array. This will cause the first 231 // disjunct below to fail if the two comparands are computed across such 232 // a concurrent change. 233 // ParNew also runs with promotion labs (which look like int 234 // filler arrays) which are subject to changing their declared size 235 // when finally retiring a PLAB; this also can cause the first disjunct 236 // to fail for another worker thread that is concurrently walking the block 237 // offset table. Both these invariant failures are benign for their 238 // current uses; we relax the assertion checking to cover these two cases below: 239 // is_objArray() && is_forwarded() // covers first scenario above 240 // || is_typeArray() // covers second scenario above 241 // If and when UseParallelGC uses the same obj array oop stealing/chunking 242 // technique, we will need to suitably modify the assertion. 243 assert((s == klass->oop_size(this)) || 244 (Universe::heap()->is_gc_active() && 245 ((is_typeArray() && UseConcMarkSweepGC) || 246 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))), 247 "wrong array object size"); 248 } else { 249 // Must be zero, so bite the bullet and take the virtual call. 250 s = klass->oop_size(this); 251 } 252 } 253 254 assert(s > 0, "Oop size must be greater than zero, not %d", s); 255 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s); 256 return s; 257 } 258 259 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } 260 bool oopDesc::is_array() const { return klass()->is_array_klass(); } 261 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } 262 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } 263 264 void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); } 265 void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); } 266 267 template <class T> 268 T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); } 269 270 template <DecoratorSet decorators> 271 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); } 272 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); } 273 274 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); } 275 276 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 277 inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); } 278 279 inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 280 inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); } 281 282 inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 283 inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); } 284 285 inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 286 inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); } 287 288 inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 289 inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); } 290 291 inline jlong oopDesc::long_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 292 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); } 293 294 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 295 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); } 296 297 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 298 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); } 299 300 bool oopDesc::is_locked() const { 301 return mark()->is_locked(); 302 } 303 304 bool oopDesc::is_unlocked() const { 305 return mark()->is_unlocked(); 306 } 307 308 bool oopDesc::has_bias_pattern() const { 309 return mark()->has_bias_pattern(); 310 } 311 312 bool oopDesc::has_bias_pattern_raw() const { 313 return mark_raw()->has_bias_pattern(); 314 } 315 316 // Used only for markSweep, scavenging 317 bool oopDesc::is_gc_marked() const { 318 return mark_raw()->is_marked(); 319 } 320 321 // Used by scavengers 322 bool oopDesc::is_forwarded() const { 323 // The extra heap check is needed since the obj might be locked, in which case the 324 // mark would point to a stack location and have the sentinel bit cleared 325 return mark_raw()->is_marked(); 326 } 327 328 // Used by scavengers 329 void oopDesc::forward_to(oop p) { 330 assert(check_obj_alignment(p), 331 "forwarding to something not aligned"); 332 assert(Universe::heap()->is_in_reserved(p), 333 "forwarding to something not in heap"); 334 assert(!is_archive_object(oop(this)) && 335 !is_archive_object(p), 336 "forwarding archive object"); 337 markOop m = markOopDesc::encode_pointer_as_mark(p); 338 assert(m->decode_pointer() == p, "encoding must be reversable"); 339 set_mark_raw(m); 340 } 341 342 // Used by parallel scavengers 343 bool oopDesc::cas_forward_to(oop p, markOop compare) { 344 assert(check_obj_alignment(p), 345 "forwarding to something not aligned"); 346 assert(Universe::heap()->is_in_reserved(p), 347 "forwarding to something not in heap"); 348 markOop m = markOopDesc::encode_pointer_as_mark(p); 349 assert(m->decode_pointer() == p, "encoding must be reversable"); 350 return cas_set_mark_raw(m, compare) == compare; 351 } 352 353 #if INCLUDE_ALL_GCS 354 oop oopDesc::forward_to_atomic(oop p) { 355 markOop oldMark = mark_raw(); 356 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); 357 markOop curMark; 358 359 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); 360 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); 361 362 while (!oldMark->is_marked()) { 363 curMark = cas_set_mark_raw(forwardPtrMark, oldMark); 364 assert(is_forwarded(), "object should have been forwarded"); 365 if (curMark == oldMark) { 366 return NULL; 367 } 368 // If the CAS was unsuccessful then curMark->is_marked() 369 // should return true as another thread has CAS'd in another 370 // forwarding pointer. 371 oldMark = curMark; 372 } 373 return forwardee(); 374 } 375 #endif 376 377 // Note that the forwardee is not the same thing as the displaced_mark. 378 // The forwardee is used when copying during scavenge and mark-sweep. 379 // It does need to clear the low two locking- and GC-related bits. 380 oop oopDesc::forwardee() const { 381 return (oop) mark_raw()->decode_pointer(); 382 } 383 384 // The following method needs to be MT safe. 385 uint oopDesc::age() const { 386 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 387 if (has_displaced_mark_raw()) { 388 return displaced_mark_raw()->age(); 389 } else { 390 return mark_raw()->age(); 391 } 392 } 393 394 void oopDesc::incr_age() { 395 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 396 if (has_displaced_mark_raw()) { 397 set_displaced_mark_raw(displaced_mark_raw()->incr_age()); 398 } else { 399 set_mark_raw(mark_raw()->incr_age()); 400 } 401 } 402 403 #if INCLUDE_ALL_GCS 404 void oopDesc::pc_follow_contents(ParCompactionManager* cm) { 405 klass()->oop_pc_follow_contents(this, cm); 406 } 407 408 void oopDesc::pc_update_contents(ParCompactionManager* cm) { 409 Klass* k = klass(); 410 if (!k->is_typeArray_klass()) { 411 // It might contain oops beyond the header, so take the virtual call. 412 k->oop_pc_update_pointers(this, cm); 413 } 414 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 415 } 416 417 void oopDesc::ps_push_contents(PSPromotionManager* pm) { 418 Klass* k = klass(); 419 if (!k->is_typeArray_klass()) { 420 // It might contain oops beyond the header, so take the virtual call. 421 k->oop_ps_push_contents(this, pm); 422 } 423 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 424 } 425 #endif // INCLUDE_ALL_GCS 426 427 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 428 \ 429 void oopDesc::oop_iterate(OopClosureType* blk) { \ 430 klass()->oop_oop_iterate##nv_suffix(this, blk); \ 431 } \ 432 \ 433 void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 434 klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 435 } 436 437 #define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 438 \ 439 int oopDesc::oop_iterate_size(OopClosureType* blk) { \ 440 Klass* k = klass(); \ 441 int size = size_given_klass(k); \ 442 k->oop_oop_iterate##nv_suffix(this, blk); \ 443 return size; \ 444 } \ 445 \ 446 int oopDesc::oop_iterate_size(OopClosureType* blk, MemRegion mr) { \ 447 Klass* k = klass(); \ 448 int size = size_given_klass(k); \ 449 k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 450 return size; \ 451 } 452 453 int oopDesc::oop_iterate_no_header(OopClosure* blk) { 454 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 455 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 456 NoHeaderExtendedOopClosure cl(blk); 457 return oop_iterate_size(&cl); 458 } 459 460 int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 461 NoHeaderExtendedOopClosure cl(blk); 462 return oop_iterate_size(&cl, mr); 463 } 464 465 #if INCLUDE_ALL_GCS 466 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 467 \ 468 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 469 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 470 } 471 #else 472 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 473 #endif // INCLUDE_ALL_GCS 474 475 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ 476 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 477 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 478 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 479 480 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) 481 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) 482 483 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) { 484 return obj == NULL || obj->klass()->is_subtype_of(klass); 485 } 486 487 intptr_t oopDesc::identity_hash() { 488 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 489 // Note: The mark must be read into local variable to avoid concurrent updates. 490 markOop mrk = mark(); 491 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 492 return mrk->hash(); 493 } else if (mrk->is_marked()) { 494 return mrk->hash(); 495 } else { 496 return slow_identity_hash(); 497 } 498 } 499 500 bool oopDesc::has_displaced_mark_raw() const { 501 return mark_raw()->has_displaced_mark_helper(); 502 } 503 504 markOop oopDesc::displaced_mark_raw() const { 505 return mark_raw()->displaced_mark_helper(); 506 } 507 508 void oopDesc::set_displaced_mark_raw(markOop m) { 509 mark_raw()->set_displaced_mark_helper(m); 510 } 511 512 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP