1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc/shared/collectedHeap.hpp" 29 #include "oops/access.inline.hpp" 30 #include "oops/arrayKlass.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/compressedOops.inline.hpp" 33 #include "oops/klass.inline.hpp" 34 #include "oops/markOop.inline.hpp" 35 #include "oops/oop.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/orderAccess.inline.hpp" 38 #include "runtime/os.hpp" 39 #include "utilities/align.hpp" 40 #include "utilities/macros.hpp" 41 42 // Implementation of all inlined member functions defined in oop.hpp 43 // We need a separate file to avoid circular references 44 45 markOop oopDesc::mark() const { 46 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes()); 47 } 48 49 markOop oopDesc::mark_raw() const { 50 return _mark; 51 } 52 53 markOop* oopDesc::mark_addr_raw() const { 54 return (markOop*) &_mark; 55 } 56 57 void oopDesc::set_mark(volatile markOop m) { 58 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m); 59 } 60 61 void oopDesc::set_mark_raw(volatile markOop m) { 62 _mark = m; 63 } 64 65 void oopDesc::release_set_mark(markOop m) { 66 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m); 67 } 68 69 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 70 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark); 71 } 72 73 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark) { 74 return Atomic::cmpxchg(new_mark, &_mark, old_mark); 75 } 76 77 void oopDesc::init_mark() { 78 set_mark(markOopDesc::prototype_for_object(this)); 79 } 80 81 void oopDesc::init_mark_raw() { 82 set_mark_raw(markOopDesc::prototype_for_object(this)); 83 } 84 85 Klass* oopDesc::klass() const { 86 if (UseCompressedClassPointers) { 87 return Klass::decode_klass_not_null(_metadata._compressed_klass); 88 } else { 89 return _metadata._klass; 90 } 91 } 92 93 Klass* oopDesc::klass_or_null() const volatile { 94 if (UseCompressedClassPointers) { 95 return Klass::decode_klass(_metadata._compressed_klass); 96 } else { 97 return _metadata._klass; 98 } 99 } 100 101 Klass* oopDesc::klass_or_null_acquire() const volatile { 102 if (UseCompressedClassPointers) { 103 // Workaround for non-const load_acquire parameter. 104 const volatile narrowKlass* addr = &_metadata._compressed_klass; 105 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr); 106 return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); 107 } else { 108 return OrderAccess::load_acquire(&_metadata._klass); 109 } 110 } 111 112 Klass** oopDesc::klass_addr() { 113 // Only used internally and with CMS and will not work with 114 // UseCompressedOops 115 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 116 return (Klass**) &_metadata._klass; 117 } 118 119 narrowKlass* oopDesc::compressed_klass_addr() { 120 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 121 return &_metadata._compressed_klass; 122 } 123 124 #define CHECK_SET_KLASS(k) \ 125 do { \ 126 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \ 127 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \ 128 } while (0) 129 130 void oopDesc::set_klass(Klass* k) { 131 CHECK_SET_KLASS(k); 132 if (UseCompressedClassPointers) { 133 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 134 } else { 135 *klass_addr() = k; 136 } 137 } 138 139 void oopDesc::release_set_klass(Klass* k) { 140 CHECK_SET_KLASS(k); 141 if (UseCompressedClassPointers) { 142 OrderAccess::release_store(compressed_klass_addr(), 143 Klass::encode_klass_not_null(k)); 144 } else { 145 OrderAccess::release_store(klass_addr(), k); 146 } 147 } 148 149 #undef CHECK_SET_KLASS 150 151 int oopDesc::klass_gap() const { 152 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 153 } 154 155 void oopDesc::set_klass_gap(int v) { 156 if (UseCompressedClassPointers) { 157 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 158 } 159 } 160 161 void oopDesc::set_klass_to_list_ptr(oop k) { 162 // This is only to be used during GC, for from-space objects, so no 163 // barrier is needed. 164 if (UseCompressedClassPointers) { 165 _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling) 166 } else { 167 _metadata._klass = (Klass*)(address)k; 168 } 169 } 170 171 oop oopDesc::list_ptr_from_klass() { 172 // This is only to be used during GC, for from-space objects. 173 if (UseCompressedClassPointers) { 174 return CompressedOops::decode((narrowOop)_metadata._compressed_klass); 175 } else { 176 // Special case for GC 177 return (oop)(address)_metadata._klass; 178 } 179 } 180 181 bool oopDesc::is_a(Klass* k) const { 182 return klass()->is_subtype_of(k); 183 } 184 185 int oopDesc::size() { 186 return size_given_klass(klass()); 187 } 188 189 int oopDesc::size_given_klass(Klass* klass) { 190 int lh = klass->layout_helper(); 191 int s; 192 193 // lh is now a value computed at class initialization that may hint 194 // at the size. For instances, this is positive and equal to the 195 // size. For arrays, this is negative and provides log2 of the 196 // array element size. For other oops, it is zero and thus requires 197 // a virtual call. 198 // 199 // We go to all this trouble because the size computation is at the 200 // heart of phase 2 of mark-compaction, and called for every object, 201 // alive or dead. So the speed here is equal in importance to the 202 // speed of allocation. 203 204 if (lh > Klass::_lh_neutral_value) { 205 if (!Klass::layout_helper_needs_slow_path(lh)) { 206 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 207 } else { 208 s = klass->oop_size(this); 209 } 210 } else if (lh <= Klass::_lh_neutral_value) { 211 // The most common case is instances; fall through if so. 212 if (lh < Klass::_lh_neutral_value) { 213 // Second most common case is arrays. We have to fetch the 214 // length of the array, shift (multiply) it appropriately, 215 // up to wordSize, add the header, and align to object size. 216 size_t size_in_bytes; 217 size_t array_length = (size_t) ((arrayOop)this)->length(); 218 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 219 size_in_bytes += Klass::layout_helper_header_size(lh); 220 221 // This code could be simplified, but by keeping array_header_in_bytes 222 // in units of bytes and doing it this way we can round up just once, 223 // skipping the intermediate round to HeapWordSize. 224 s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize); 225 226 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field 227 // of an "old copy" of an object array in the young gen so it indicates 228 // the grey portion of an already copied array. This will cause the first 229 // disjunct below to fail if the two comparands are computed across such 230 // a concurrent change. 231 // ParNew also runs with promotion labs (which look like int 232 // filler arrays) which are subject to changing their declared size 233 // when finally retiring a PLAB; this also can cause the first disjunct 234 // to fail for another worker thread that is concurrently walking the block 235 // offset table. Both these invariant failures are benign for their 236 // current uses; we relax the assertion checking to cover these two cases below: 237 // is_objArray() && is_forwarded() // covers first scenario above 238 // || is_typeArray() // covers second scenario above 239 // If and when UseParallelGC uses the same obj array oop stealing/chunking 240 // technique, we will need to suitably modify the assertion. 241 assert((s == klass->oop_size(this)) || 242 (Universe::heap()->is_gc_active() && 243 ((is_typeArray() && UseConcMarkSweepGC) || 244 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))), 245 "wrong array object size"); 246 } else { 247 // Must be zero, so bite the bullet and take the virtual call. 248 s = klass->oop_size(this); 249 } 250 } 251 252 assert(s > 0, "Oop size must be greater than zero, not %d", s); 253 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s); 254 return s; 255 } 256 257 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } 258 bool oopDesc::is_array() const { return klass()->is_array_klass(); } 259 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } 260 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } 261 262 void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); } 263 void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); } 264 265 template <class T> 266 T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); } 267 268 template <DecoratorSet decorators> 269 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); } 270 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); } 271 272 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); } 273 274 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 275 inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); } 276 277 inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 278 inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); } 279 280 inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 281 inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); } 282 283 inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 284 inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); } 285 286 inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 287 inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); } 288 289 inline jlong oopDesc::long_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 290 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); } 291 292 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 293 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); } 294 295 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); } 296 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); } 297 298 bool oopDesc::is_locked() const { 299 return mark()->is_locked(); 300 } 301 302 bool oopDesc::is_unlocked() const { 303 return mark()->is_unlocked(); 304 } 305 306 bool oopDesc::has_bias_pattern() const { 307 return mark()->has_bias_pattern(); 308 } 309 310 bool oopDesc::has_bias_pattern_raw() const { 311 return mark_raw()->has_bias_pattern(); 312 } 313 314 // Used only for markSweep, scavenging 315 bool oopDesc::is_gc_marked() const { 316 return mark_raw()->is_marked(); 317 } 318 319 // Used by scavengers 320 bool oopDesc::is_forwarded() const { 321 // The extra heap check is needed since the obj might be locked, in which case the 322 // mark would point to a stack location and have the sentinel bit cleared 323 return mark_raw()->is_marked(); 324 } 325 326 // Used by scavengers 327 void oopDesc::forward_to(oop p) { 328 assert(check_obj_alignment(p), 329 "forwarding to something not aligned"); 330 assert(Universe::heap()->is_in_reserved(p), 331 "forwarding to something not in heap"); 332 assert(!is_archive_object(oop(this)) && 333 !is_archive_object(p), 334 "forwarding archive object"); 335 markOop m = markOopDesc::encode_pointer_as_mark(p); 336 assert(m->decode_pointer() == p, "encoding must be reversable"); 337 set_mark_raw(m); 338 } 339 340 // Used by parallel scavengers 341 bool oopDesc::cas_forward_to(oop p, markOop compare) { 342 assert(check_obj_alignment(p), 343 "forwarding to something not aligned"); 344 assert(Universe::heap()->is_in_reserved(p), 345 "forwarding to something not in heap"); 346 markOop m = markOopDesc::encode_pointer_as_mark(p); 347 assert(m->decode_pointer() == p, "encoding must be reversable"); 348 return cas_set_mark_raw(m, compare) == compare; 349 } 350 351 #if INCLUDE_ALL_GCS 352 oop oopDesc::forward_to_atomic(oop p) { 353 markOop oldMark = mark_raw(); 354 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); 355 markOop curMark; 356 357 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); 358 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); 359 360 while (!oldMark->is_marked()) { 361 curMark = cas_set_mark_raw(forwardPtrMark, oldMark); 362 assert(is_forwarded(), "object should have been forwarded"); 363 if (curMark == oldMark) { 364 return NULL; 365 } 366 // If the CAS was unsuccessful then curMark->is_marked() 367 // should return true as another thread has CAS'd in another 368 // forwarding pointer. 369 oldMark = curMark; 370 } 371 return forwardee(); 372 } 373 #endif 374 375 // Note that the forwardee is not the same thing as the displaced_mark. 376 // The forwardee is used when copying during scavenge and mark-sweep. 377 // It does need to clear the low two locking- and GC-related bits. 378 oop oopDesc::forwardee() const { 379 return (oop) mark_raw()->decode_pointer(); 380 } 381 382 // The following method needs to be MT safe. 383 uint oopDesc::age() const { 384 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 385 if (has_displaced_mark_raw()) { 386 return displaced_mark_raw()->age(); 387 } else { 388 return mark_raw()->age(); 389 } 390 } 391 392 void oopDesc::incr_age() { 393 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 394 if (has_displaced_mark_raw()) { 395 set_displaced_mark_raw(displaced_mark_raw()->incr_age()); 396 } else { 397 set_mark_raw(mark_raw()->incr_age()); 398 } 399 } 400 401 #if INCLUDE_ALL_GCS 402 void oopDesc::pc_follow_contents(ParCompactionManager* cm) { 403 klass()->oop_pc_follow_contents(this, cm); 404 } 405 406 void oopDesc::pc_update_contents(ParCompactionManager* cm) { 407 Klass* k = klass(); 408 if (!k->is_typeArray_klass()) { 409 // It might contain oops beyond the header, so take the virtual call. 410 k->oop_pc_update_pointers(this, cm); 411 } 412 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 413 } 414 415 void oopDesc::ps_push_contents(PSPromotionManager* pm) { 416 Klass* k = klass(); 417 if (!k->is_typeArray_klass()) { 418 // It might contain oops beyond the header, so take the virtual call. 419 k->oop_ps_push_contents(this, pm); 420 } 421 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 422 } 423 #endif // INCLUDE_ALL_GCS 424 425 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 426 \ 427 void oopDesc::oop_iterate(OopClosureType* blk) { \ 428 klass()->oop_oop_iterate##nv_suffix(this, blk); \ 429 } \ 430 \ 431 void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 432 klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 433 } 434 435 #define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 436 \ 437 int oopDesc::oop_iterate_size(OopClosureType* blk) { \ 438 Klass* k = klass(); \ 439 int size = size_given_klass(k); \ 440 k->oop_oop_iterate##nv_suffix(this, blk); \ 441 return size; \ 442 } \ 443 \ 444 int oopDesc::oop_iterate_size(OopClosureType* blk, MemRegion mr) { \ 445 Klass* k = klass(); \ 446 int size = size_given_klass(k); \ 447 k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 448 return size; \ 449 } 450 451 int oopDesc::oop_iterate_no_header(OopClosure* blk) { 452 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 453 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 454 NoHeaderExtendedOopClosure cl(blk); 455 return oop_iterate_size(&cl); 456 } 457 458 int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 459 NoHeaderExtendedOopClosure cl(blk); 460 return oop_iterate_size(&cl, mr); 461 } 462 463 #if INCLUDE_ALL_GCS 464 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 465 \ 466 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 467 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 468 } 469 #else 470 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 471 #endif // INCLUDE_ALL_GCS 472 473 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ 474 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 475 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 476 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 477 478 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) 479 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) 480 481 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) { 482 return obj == NULL || obj->klass()->is_subtype_of(klass); 483 } 484 485 intptr_t oopDesc::identity_hash() { 486 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 487 // Note: The mark must be read into local variable to avoid concurrent updates. 488 markOop mrk = mark(); 489 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 490 return mrk->hash(); 491 } else if (mrk->is_marked()) { 492 return mrk->hash(); 493 } else { 494 return slow_identity_hash(); 495 } 496 } 497 498 bool oopDesc::has_displaced_mark_raw() const { 499 return mark_raw()->has_displaced_mark_helper(); 500 } 501 502 markOop oopDesc::displaced_mark_raw() const { 503 return mark_raw()->displaced_mark_helper(); 504 } 505 506 void oopDesc::set_displaced_mark_raw(markOop m) { 507 mark_raw()->set_displaced_mark_helper(m); 508 } 509 510 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP