1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc/shared/ageTable.hpp" 29 #include "gc/shared/barrierSet.inline.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "gc/shared/genCollectedHeap.hpp" 33 #include "gc/shared/generation.hpp" 34 #include "oops/arrayKlass.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/markOop.inline.hpp" 38 #include "oops/oop.hpp" 39 #include "runtime/atomic.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 #include "runtime/os.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/macros.hpp" 44 45 inline void update_barrier_set(void* p, oop v, bool release = false) { 46 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 47 oopDesc::bs()->write_ref_field(p, v, release); 48 } 49 50 template <class T> inline void update_barrier_set_pre(T* p, oop v) { 51 oopDesc::bs()->write_ref_field_pre(p, v); 52 } 53 54 template <class T> void oop_store(T* p, oop v) { 55 if (always_do_update_barrier) { 56 oop_store((volatile T*)p, v); 57 } else { 58 update_barrier_set_pre(p, v); 59 oopDesc::encode_store_heap_oop(p, v); 60 // always_do_update_barrier == false => 61 // Either we are at a safepoint (in GC) or CMS is not used. In both 62 // cases it's unnecessary to mark the card as dirty with release sematics. 63 update_barrier_set((void*)p, v, false /* release */); // cast away type 64 } 65 } 66 67 template <class T> void oop_store(volatile T* p, oop v) { 68 update_barrier_set_pre((T*)p, v); // cast away volatile 69 // Used by release_obj_field_put, so use release_store_ptr. 70 oopDesc::release_encode_store_heap_oop(p, v); 71 // When using CMS we must mark the card corresponding to p as dirty 72 // with release sematics to prevent that CMS sees the dirty card but 73 // not the new value v at p due to reordering of the two 74 // stores. Note that CMS has a concurrent precleaning phase, where 75 // it reads the card table while the Java threads are running. 76 update_barrier_set((void*)p, v, true /* release */); // cast away type 77 } 78 79 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 80 // (without having to remember the function name this calls). 81 inline void oop_store_raw(HeapWord* addr, oop value) { 82 if (UseCompressedOops) { 83 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 84 } else { 85 oopDesc::encode_store_heap_oop((oop*)addr, value); 86 } 87 } 88 89 // Implementation of all inlined member functions defined in oop.hpp 90 // We need a separate file to avoid circular references 91 92 void oopDesc::release_set_mark(markOop m) { 93 OrderAccess::release_store_ptr(&_mark, m); 94 } 95 96 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 97 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 98 } 99 100 void oopDesc::init_mark() { 101 set_mark(markOopDesc::prototype_for_object(this)); 102 } 103 104 Klass* oopDesc::klass() const { 105 if (UseCompressedClassPointers) { 106 return Klass::decode_klass_not_null(_metadata._compressed_klass); 107 } else { 108 return _metadata._klass; 109 } 110 } 111 112 Klass* oopDesc::klass_or_null() const volatile { 113 if (UseCompressedClassPointers) { 114 return Klass::decode_klass(_metadata._compressed_klass); 115 } else { 116 return _metadata._klass; 117 } 118 } 119 120 Klass* oopDesc::klass_or_null_acquire() const volatile { 121 if (UseCompressedClassPointers) { 122 // Workaround for non-const load_acquire parameter. 123 const volatile narrowKlass* addr = &_metadata._compressed_klass; 124 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr); 125 return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); 126 } else { 127 return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass); 128 } 129 } 130 131 Klass** oopDesc::klass_addr() { 132 // Only used internally and with CMS and will not work with 133 // UseCompressedOops 134 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 135 return (Klass**) &_metadata._klass; 136 } 137 138 narrowKlass* oopDesc::compressed_klass_addr() { 139 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 140 return &_metadata._compressed_klass; 141 } 142 143 #define CHECK_SET_KLASS(k) \ 144 do { \ 145 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \ 146 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \ 147 } while (0) 148 149 void oopDesc::set_klass(Klass* k) { 150 CHECK_SET_KLASS(k); 151 if (UseCompressedClassPointers) { 152 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 153 } else { 154 *klass_addr() = k; 155 } 156 } 157 158 void oopDesc::release_set_klass(Klass* k) { 159 CHECK_SET_KLASS(k); 160 if (UseCompressedClassPointers) { 161 OrderAccess::release_store(compressed_klass_addr(), 162 Klass::encode_klass_not_null(k)); 163 } else { 164 OrderAccess::release_store_ptr(klass_addr(), k); 165 } 166 } 167 168 #undef CHECK_SET_KLASS 169 170 int oopDesc::klass_gap() const { 171 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 172 } 173 174 void oopDesc::set_klass_gap(int v) { 175 if (UseCompressedClassPointers) { 176 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 177 } 178 } 179 180 void oopDesc::set_klass_to_list_ptr(oop k) { 181 // This is only to be used during GC, for from-space objects, so no 182 // barrier is needed. 183 if (UseCompressedClassPointers) { 184 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) 185 } else { 186 _metadata._klass = (Klass*)(address)k; 187 } 188 } 189 190 oop oopDesc::list_ptr_from_klass() { 191 // This is only to be used during GC, for from-space objects. 192 if (UseCompressedClassPointers) { 193 return decode_heap_oop((narrowOop)_metadata._compressed_klass); 194 } else { 195 // Special case for GC 196 return (oop)(address)_metadata._klass; 197 } 198 } 199 200 bool oopDesc::is_a(Klass* k) const { 201 return klass()->is_subtype_of(k); 202 } 203 204 int oopDesc::size() { 205 return size_given_klass(klass()); 206 } 207 208 int oopDesc::size_given_klass(Klass* klass) { 209 int lh = klass->layout_helper(); 210 int s; 211 212 // lh is now a value computed at class initialization that may hint 213 // at the size. For instances, this is positive and equal to the 214 // size. For arrays, this is negative and provides log2 of the 215 // array element size. For other oops, it is zero and thus requires 216 // a virtual call. 217 // 218 // We go to all this trouble because the size computation is at the 219 // heart of phase 2 of mark-compaction, and called for every object, 220 // alive or dead. So the speed here is equal in importance to the 221 // speed of allocation. 222 223 if (lh > Klass::_lh_neutral_value) { 224 if (!Klass::layout_helper_needs_slow_path(lh)) { 225 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 226 } else { 227 s = klass->oop_size(this); 228 } 229 } else if (lh <= Klass::_lh_neutral_value) { 230 // The most common case is instances; fall through if so. 231 if (lh < Klass::_lh_neutral_value) { 232 // Second most common case is arrays. We have to fetch the 233 // length of the array, shift (multiply) it appropriately, 234 // up to wordSize, add the header, and align to object size. 235 size_t size_in_bytes; 236 size_t array_length = (size_t) ((arrayOop)this)->length(); 237 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 238 size_in_bytes += Klass::layout_helper_header_size(lh); 239 240 // This code could be simplified, but by keeping array_header_in_bytes 241 // in units of bytes and doing it this way we can round up just once, 242 // skipping the intermediate round to HeapWordSize. 243 s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize); 244 245 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field 246 // of an "old copy" of an object array in the young gen so it indicates 247 // the grey portion of an already copied array. This will cause the first 248 // disjunct below to fail if the two comparands are computed across such 249 // a concurrent change. 250 // ParNew also runs with promotion labs (which look like int 251 // filler arrays) which are subject to changing their declared size 252 // when finally retiring a PLAB; this also can cause the first disjunct 253 // to fail for another worker thread that is concurrently walking the block 254 // offset table. Both these invariant failures are benign for their 255 // current uses; we relax the assertion checking to cover these two cases below: 256 // is_objArray() && is_forwarded() // covers first scenario above 257 // || is_typeArray() // covers second scenario above 258 // If and when UseParallelGC uses the same obj array oop stealing/chunking 259 // technique, we will need to suitably modify the assertion. 260 assert((s == klass->oop_size(this)) || 261 (Universe::heap()->is_gc_active() && 262 ((is_typeArray() && UseConcMarkSweepGC) || 263 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))), 264 "wrong array object size"); 265 } else { 266 // Must be zero, so bite the bullet and take the virtual call. 267 s = klass->oop_size(this); 268 } 269 } 270 271 assert(s > 0, "Oop size must be greater than zero, not %d", s); 272 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s); 273 return s; 274 } 275 276 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } 277 bool oopDesc::is_array() const { return klass()->is_array_klass(); } 278 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } 279 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } 280 281 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 282 283 jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 284 jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 285 jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*) field_base(offset); } 286 jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 287 jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 288 jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 289 jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 290 jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 291 Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); } 292 293 template <class T> T* oopDesc::obj_field_addr(int offset) const { return (T*) field_base(offset); } 294 address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } 295 296 297 // Functions for getting and setting oops within instance objects. 298 // If the oops are compressed, the type passed to these overloaded functions 299 // is narrowOop. All functions are overloaded so they can be called by 300 // template functions without conditionals (the compiler instantiates via 301 // the right type and inlines the appopriate code). 302 303 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 304 // offset from the heap base. Saving the check for null can save instructions 305 // in inner GC loops so these are separated. 306 307 inline bool check_obj_alignment(oop obj) { 308 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0; 309 } 310 311 oop oopDesc::decode_heap_oop_not_null(narrowOop v) { 312 assert(!is_null(v), "narrow oop value can never be zero"); 313 address base = Universe::narrow_oop_base(); 314 int shift = Universe::narrow_oop_shift(); 315 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 316 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result)); 317 return result; 318 } 319 320 oop oopDesc::decode_heap_oop(narrowOop v) { 321 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); 322 } 323 324 narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 325 assert(!is_null(v), "oop value can never be zero"); 326 assert(check_obj_alignment(v), "Address not aligned"); 327 assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); 328 address base = Universe::narrow_oop_base(); 329 int shift = Universe::narrow_oop_shift(); 330 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 331 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); 332 uint64_t result = pd >> shift; 333 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); 334 assert(decode_heap_oop(result) == v, "reversibility"); 335 return (narrowOop)result; 336 } 337 338 narrowOop oopDesc::encode_heap_oop(oop v) { 339 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); 340 } 341 342 // Load and decode an oop out of the Java heap into a wide oop. 343 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { 344 return decode_heap_oop_not_null(*p); 345 } 346 347 // Load and decode an oop out of the heap accepting null 348 oop oopDesc::load_decode_heap_oop(narrowOop* p) { 349 return decode_heap_oop(*p); 350 } 351 352 // Encode and store a heap oop. 353 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { 354 *p = encode_heap_oop_not_null(v); 355 } 356 357 // Encode and store a heap oop allowing for null. 358 void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { 359 *p = encode_heap_oop(v); 360 } 361 362 // Store heap oop as is for volatile fields. 363 void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { 364 OrderAccess::release_store_ptr(p, v); 365 } 366 void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) { 367 OrderAccess::release_store(p, v); 368 } 369 370 void oopDesc::release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v) { 371 // heap oop is not pointer sized. 372 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); 373 } 374 void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) { 375 OrderAccess::release_store_ptr(p, v); 376 } 377 378 void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) { 379 OrderAccess::release_store_ptr(p, v); 380 } 381 void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) { 382 OrderAccess::release_store(p, encode_heap_oop(v)); 383 } 384 385 // These functions are only used to exchange oop fields in instances, 386 // not headers. 387 oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { 388 if (UseCompressedOops) { 389 // encode exchange value from oop to T 390 narrowOop val = encode_heap_oop(exchange_value); 391 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); 392 // decode old from T to oop 393 return decode_heap_oop(old); 394 } else { 395 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 396 } 397 } 398 399 oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, 400 volatile HeapWord *dest, 401 oop compare_value, 402 bool prebarrier) { 403 if (UseCompressedOops) { 404 if (prebarrier) { 405 update_barrier_set_pre((narrowOop*)dest, exchange_value); 406 } 407 // encode exchange and compare value from oop to T 408 narrowOop val = encode_heap_oop(exchange_value); 409 narrowOop cmp = encode_heap_oop(compare_value); 410 411 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); 412 // decode old from T to oop 413 return decode_heap_oop(old); 414 } else { 415 if (prebarrier) { 416 update_barrier_set_pre((oop*)dest, exchange_value); 417 } 418 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); 419 } 420 } 421 422 // In order to put or get a field out of an instance, must first check 423 // if the field has been compressed and uncompress it. 424 oop oopDesc::obj_field(int offset) const { 425 return UseCompressedOops ? 426 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : 427 load_decode_heap_oop(obj_field_addr<oop>(offset)); 428 } 429 430 void oopDesc::obj_field_put(int offset, oop value) { 431 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : 432 oop_store(obj_field_addr<oop>(offset), value); 433 } 434 435 void oopDesc::obj_field_put_raw(int offset, oop value) { 436 UseCompressedOops ? 437 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : 438 encode_store_heap_oop(obj_field_addr<oop>(offset), value); 439 } 440 void oopDesc::obj_field_put_volatile(int offset, oop value) { 441 OrderAccess::release(); 442 obj_field_put(offset, value); 443 OrderAccess::fence(); 444 } 445 446 Metadata* oopDesc::metadata_field(int offset) const { return *metadata_field_addr(offset); } 447 void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value; } 448 449 Metadata* oopDesc::metadata_field_acquire(int offset) const { 450 return (Metadata*)OrderAccess::load_ptr_acquire(metadata_field_addr(offset)); 451 } 452 453 void oopDesc::release_metadata_field_put(int offset, Metadata* value) { 454 OrderAccess::release_store_ptr(metadata_field_addr(offset), value); 455 } 456 457 jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } 458 void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } 459 460 jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); } 461 void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; } 462 463 jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } 464 void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (((jint) contents) & 1); } 465 466 jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); } 467 void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; } 468 469 jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); } 470 void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;} 471 472 jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); } 473 void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; } 474 475 jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); } 476 void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } 477 478 jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } 479 void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } 480 481 address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } 482 void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } 483 484 oop oopDesc::obj_field_acquire(int offset) const { 485 return UseCompressedOops ? 486 decode_heap_oop((narrowOop) 487 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) 488 : decode_heap_oop((oop) 489 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); 490 } 491 void oopDesc::release_obj_field_put(int offset, oop value) { 492 UseCompressedOops ? 493 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) : 494 oop_store((volatile oop*) obj_field_addr<oop>(offset), value); 495 } 496 497 jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } 498 void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } 499 500 jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); } 501 void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); } 502 503 jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } 504 void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); } 505 506 jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); } 507 void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); } 508 509 jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); } 510 void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); } 511 512 jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); } 513 void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); } 514 515 jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); } 516 void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); } 517 518 jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } 519 void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } 520 521 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } 522 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } 523 524 bool oopDesc::is_locked() const { 525 return mark()->is_locked(); 526 } 527 528 bool oopDesc::is_unlocked() const { 529 return mark()->is_unlocked(); 530 } 531 532 bool oopDesc::has_bias_pattern() const { 533 return mark()->has_bias_pattern(); 534 } 535 536 // used only for asserts and guarantees 537 inline bool oopDesc::is_oop(oop obj, bool ignore_mark_word) { 538 if (!check_obj_alignment(obj)) return false; 539 if (!Universe::heap()->is_in_reserved(obj)) return false; 540 // obj is aligned and accessible in heap 541 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false; 542 543 // Header verification: the mark is typically non-NULL. If we're 544 // at a safepoint, it must not be null. 545 // Outside of a safepoint, the header could be changing (for example, 546 // another thread could be inflating a lock on this object). 547 if (ignore_mark_word) { 548 return true; 549 } 550 if (obj->mark() != NULL) { 551 return true; 552 } 553 return !SafepointSynchronize::is_at_safepoint(); 554 } 555 556 // used only for asserts and guarantees 557 inline bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) { 558 return obj == NULL ? true : is_oop(obj, ignore_mark_word); 559 } 560 561 #ifndef PRODUCT 562 // used only for asserts 563 bool oopDesc::is_unlocked_oop() const { 564 if (!Universe::heap()->is_in_reserved(this)) return false; 565 return mark()->is_unlocked(); 566 } 567 #endif // PRODUCT 568 569 // Used only for markSweep, scavenging 570 bool oopDesc::is_gc_marked() const { 571 return mark()->is_marked(); 572 } 573 574 bool oopDesc::is_scavengable() const { 575 return Universe::heap()->is_scavengable(this); 576 } 577 578 // Used by scavengers 579 bool oopDesc::is_forwarded() const { 580 // The extra heap check is needed since the obj might be locked, in which case the 581 // mark would point to a stack location and have the sentinel bit cleared 582 return mark()->is_marked(); 583 } 584 585 // Used by scavengers 586 void oopDesc::forward_to(oop p) { 587 assert(check_obj_alignment(p), 588 "forwarding to something not aligned"); 589 assert(Universe::heap()->is_in_reserved(p), 590 "forwarding to something not in heap"); 591 assert(!is_archive_object(oop(this)) && 592 !is_archive_object(p), 593 "forwarding archive object"); 594 markOop m = markOopDesc::encode_pointer_as_mark(p); 595 assert(m->decode_pointer() == p, "encoding must be reversable"); 596 set_mark(m); 597 } 598 599 // Used by parallel scavengers 600 bool oopDesc::cas_forward_to(oop p, markOop compare) { 601 assert(check_obj_alignment(p), 602 "forwarding to something not aligned"); 603 assert(Universe::heap()->is_in_reserved(p), 604 "forwarding to something not in heap"); 605 markOop m = markOopDesc::encode_pointer_as_mark(p); 606 assert(m->decode_pointer() == p, "encoding must be reversable"); 607 return cas_set_mark(m, compare) == compare; 608 } 609 610 #if INCLUDE_ALL_GCS 611 oop oopDesc::forward_to_atomic(oop p) { 612 markOop oldMark = mark(); 613 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); 614 markOop curMark; 615 616 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); 617 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); 618 619 while (!oldMark->is_marked()) { 620 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark); 621 assert(is_forwarded(), "object should have been forwarded"); 622 if (curMark == oldMark) { 623 return NULL; 624 } 625 // If the CAS was unsuccessful then curMark->is_marked() 626 // should return true as another thread has CAS'd in another 627 // forwarding pointer. 628 oldMark = curMark; 629 } 630 return forwardee(); 631 } 632 #endif 633 634 // Note that the forwardee is not the same thing as the displaced_mark. 635 // The forwardee is used when copying during scavenge and mark-sweep. 636 // It does need to clear the low two locking- and GC-related bits. 637 oop oopDesc::forwardee() const { 638 return (oop) mark()->decode_pointer(); 639 } 640 641 // The following method needs to be MT safe. 642 uint oopDesc::age() const { 643 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 644 if (has_displaced_mark()) { 645 return displaced_mark()->age(); 646 } else { 647 return mark()->age(); 648 } 649 } 650 651 void oopDesc::incr_age() { 652 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 653 if (has_displaced_mark()) { 654 set_displaced_mark(displaced_mark()->incr_age()); 655 } else { 656 set_mark(mark()->incr_age()); 657 } 658 } 659 660 #if INCLUDE_ALL_GCS 661 void oopDesc::pc_follow_contents(ParCompactionManager* cm) { 662 klass()->oop_pc_follow_contents(this, cm); 663 } 664 665 void oopDesc::pc_update_contents(ParCompactionManager* cm) { 666 Klass* k = klass(); 667 if (!k->is_typeArray_klass()) { 668 // It might contain oops beyond the header, so take the virtual call. 669 k->oop_pc_update_pointers(this, cm); 670 } 671 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 672 } 673 674 void oopDesc::ps_push_contents(PSPromotionManager* pm) { 675 Klass* k = klass(); 676 if (!k->is_typeArray_klass()) { 677 // It might contain oops beyond the header, so take the virtual call. 678 k->oop_ps_push_contents(this, pm); 679 } 680 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 681 } 682 #endif // INCLUDE_ALL_GCS 683 684 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 685 \ 686 void oopDesc::oop_iterate(OopClosureType* blk) { \ 687 klass()->oop_oop_iterate##nv_suffix(this, blk); \ 688 } \ 689 \ 690 void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 691 klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 692 } 693 694 #define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 695 \ 696 int oopDesc::oop_iterate_size(OopClosureType* blk) { \ 697 Klass* k = klass(); \ 698 int size = size_given_klass(k); \ 699 k->oop_oop_iterate##nv_suffix(this, blk); \ 700 return size; \ 701 } \ 702 \ 703 int oopDesc::oop_iterate_size(OopClosureType* blk, MemRegion mr) { \ 704 Klass* k = klass(); \ 705 int size = size_given_klass(k); \ 706 k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 707 return size; \ 708 } 709 710 int oopDesc::oop_iterate_no_header(OopClosure* blk) { 711 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 712 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 713 NoHeaderExtendedOopClosure cl(blk); 714 return oop_iterate_size(&cl); 715 } 716 717 int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 718 NoHeaderExtendedOopClosure cl(blk); 719 return oop_iterate_size(&cl, mr); 720 } 721 722 #if INCLUDE_ALL_GCS 723 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 724 \ 725 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 726 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 727 } 728 #else 729 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 730 #endif // INCLUDE_ALL_GCS 731 732 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ 733 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 734 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 735 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 736 737 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) 738 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) 739 740 intptr_t oopDesc::identity_hash() { 741 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 742 // Note: The mark must be read into local variable to avoid concurrent updates. 743 markOop mrk = mark(); 744 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 745 return mrk->hash(); 746 } else if (mrk->is_marked()) { 747 return mrk->hash(); 748 } else { 749 return slow_identity_hash(); 750 } 751 } 752 753 bool oopDesc::has_displaced_mark() const { 754 return mark()->has_displaced_mark_helper(); 755 } 756 757 markOop oopDesc::displaced_mark() const { 758 return mark()->displaced_mark_helper(); 759 } 760 761 void oopDesc::set_displaced_mark(markOop m) { 762 mark()->set_displaced_mark_helper(m); 763 } 764 765 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP