1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc/shared/ageTable.hpp" 29 #include "gc/shared/barrierSet.inline.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "gc/shared/genCollectedHeap.hpp" 33 #include "gc/shared/generation.hpp" 34 #include "oops/arrayKlass.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/markOop.inline.hpp" 38 #include "oops/oop.hpp" 39 #include "runtime/atomic.inline.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 #include "runtime/os.hpp" 42 #include "utilities/macros.hpp" 43 44 inline void update_barrier_set(void* p, oop v, bool release = false) { 45 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 46 oopDesc::bs()->write_ref_field(p, v, release); 47 } 48 49 template <class T> inline void update_barrier_set_pre(T* p, oop v) { 50 oopDesc::bs()->write_ref_field_pre(p, v); 51 } 52 53 template <class T> void oop_store(T* p, oop v) { 54 if (always_do_update_barrier) { 55 oop_store((volatile T*)p, v); 56 } else { 57 update_barrier_set_pre(p, v); 58 oopDesc::encode_store_heap_oop(p, v); 59 // always_do_update_barrier == false => 60 // Either we are at a safepoint (in GC) or CMS is not used. In both 61 // cases it's unnecessary to mark the card as dirty with release sematics. 62 update_barrier_set((void*)p, v, false /* release */); // cast away type 63 } 64 } 65 66 template <class T> void oop_store(volatile T* p, oop v) { 67 update_barrier_set_pre((T*)p, v); // cast away volatile 68 // Used by release_obj_field_put, so use release_store_ptr. 69 oopDesc::release_encode_store_heap_oop(p, v); 70 // When using CMS we must mark the card corresponding to p as dirty 71 // with release sematics to prevent that CMS sees the dirty card but 72 // not the new value v at p due to reordering of the two 73 // stores. Note that CMS has a concurrent precleaning phase, where 74 // it reads the card table while the Java threads are running. 75 update_barrier_set((void*)p, v, true /* release */); // cast away type 76 } 77 78 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 79 // (without having to remember the function name this calls). 80 inline void oop_store_raw(HeapWord* addr, oop value) { 81 if (UseCompressedOops) { 82 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 83 } else { 84 oopDesc::encode_store_heap_oop((oop*)addr, value); 85 } 86 } 87 88 // Implementation of all inlined member functions defined in oop.hpp 89 // We need a separate file to avoid circular references 90 91 void oopDesc::release_set_mark(markOop m) { 92 OrderAccess::release_store_ptr(&_mark, m); 93 } 94 95 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 96 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 97 } 98 99 void oopDesc::init_mark() { 100 set_mark(markOopDesc::prototype_for_object(this)); 101 } 102 103 Klass* oopDesc::klass() const { 104 if (UseCompressedClassPointers) { 105 return Klass::decode_klass_not_null(_metadata._compressed_klass); 106 } else { 107 return _metadata._klass; 108 } 109 } 110 111 Klass* oopDesc::klass_or_null() const volatile { 112 // can be NULL in CMS 113 if (UseCompressedClassPointers) { 114 return Klass::decode_klass(_metadata._compressed_klass); 115 } else { 116 return _metadata._klass; 117 } 118 } 119 120 Klass** oopDesc::klass_addr() { 121 // Only used internally and with CMS and will not work with 122 // UseCompressedOops 123 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 124 return (Klass**) &_metadata._klass; 125 } 126 127 narrowKlass* oopDesc::compressed_klass_addr() { 128 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 129 return &_metadata._compressed_klass; 130 } 131 132 void oopDesc::set_klass(Klass* k) { 133 // since klasses are promoted no store check is needed 134 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*"); 135 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*"); 136 if (UseCompressedClassPointers) { 137 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 138 } else { 139 *klass_addr() = k; 140 } 141 } 142 143 int oopDesc::klass_gap() const { 144 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 145 } 146 147 void oopDesc::set_klass_gap(int v) { 148 if (UseCompressedClassPointers) { 149 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 150 } 151 } 152 153 void oopDesc::set_klass_to_list_ptr(oop k) { 154 // This is only to be used during GC, for from-space objects, so no 155 // barrier is needed. 156 if (UseCompressedClassPointers) { 157 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) 158 } else { 159 _metadata._klass = (Klass*)(address)k; 160 } 161 } 162 163 oop oopDesc::list_ptr_from_klass() { 164 // This is only to be used during GC, for from-space objects. 165 if (UseCompressedClassPointers) { 166 return decode_heap_oop((narrowOop)_metadata._compressed_klass); 167 } else { 168 // Special case for GC 169 return (oop)(address)_metadata._klass; 170 } 171 } 172 173 bool oopDesc::is_a(Klass* k) const { 174 return klass()->is_subtype_of(k); 175 } 176 177 int oopDesc::size() { 178 return size_given_klass(klass()); 179 } 180 181 int oopDesc::size_given_klass(Klass* klass) { 182 int lh = klass->layout_helper(); 183 int s; 184 185 // lh is now a value computed at class initialization that may hint 186 // at the size. For instances, this is positive and equal to the 187 // size. For arrays, this is negative and provides log2 of the 188 // array element size. For other oops, it is zero and thus requires 189 // a virtual call. 190 // 191 // We go to all this trouble because the size computation is at the 192 // heart of phase 2 of mark-compaction, and called for every object, 193 // alive or dead. So the speed here is equal in importance to the 194 // speed of allocation. 195 196 if (lh > Klass::_lh_neutral_value) { 197 if (!Klass::layout_helper_needs_slow_path(lh)) { 198 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 199 } else { 200 s = klass->oop_size(this); 201 } 202 } else if (lh <= Klass::_lh_neutral_value) { 203 // The most common case is instances; fall through if so. 204 if (lh < Klass::_lh_neutral_value) { 205 // Second most common case is arrays. We have to fetch the 206 // length of the array, shift (multiply) it appropriately, 207 // up to wordSize, add the header, and align to object size. 208 size_t size_in_bytes; 209 #ifdef _M_IA64 210 // The Windows Itanium Aug 2002 SDK hoists this load above 211 // the check for s < 0. An oop at the end of the heap will 212 // cause an access violation if this load is performed on a non 213 // array oop. Making the reference volatile prohibits this. 214 // (%%% please explain by what magic the length is actually fetched!) 215 volatile int *array_length; 216 array_length = (volatile int *)( (intptr_t)this + 217 arrayOopDesc::length_offset_in_bytes() ); 218 assert(array_length > 0, "Integer arithmetic problem somewhere"); 219 // Put into size_t to avoid overflow. 220 size_in_bytes = (size_t) array_length; 221 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh); 222 #else 223 size_t array_length = (size_t) ((arrayOop)this)->length(); 224 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 225 #endif 226 size_in_bytes += Klass::layout_helper_header_size(lh); 227 228 // This code could be simplified, but by keeping array_header_in_bytes 229 // in units of bytes and doing it this way we can round up just once, 230 // skipping the intermediate round to HeapWordSize. Cast the result 231 // of round_to to size_t to guarantee unsigned division == right shift. 232 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 233 HeapWordSize); 234 235 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field 236 // of an "old copy" of an object array in the young gen so it indicates 237 // the grey portion of an already copied array. This will cause the first 238 // disjunct below to fail if the two comparands are computed across such 239 // a concurrent change. 240 // ParNew also runs with promotion labs (which look like int 241 // filler arrays) which are subject to changing their declared size 242 // when finally retiring a PLAB; this also can cause the first disjunct 243 // to fail for another worker thread that is concurrently walking the block 244 // offset table. Both these invariant failures are benign for their 245 // current uses; we relax the assertion checking to cover these two cases below: 246 // is_objArray() && is_forwarded() // covers first scenario above 247 // || is_typeArray() // covers second scenario above 248 // If and when UseParallelGC uses the same obj array oop stealing/chunking 249 // technique, we will need to suitably modify the assertion. 250 assert((s == klass->oop_size(this)) || 251 (Universe::heap()->is_gc_active() && 252 ((is_typeArray() && UseConcMarkSweepGC) || 253 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))), 254 "wrong array object size"); 255 } else { 256 // Must be zero, so bite the bullet and take the virtual call. 257 s = klass->oop_size(this); 258 } 259 } 260 261 assert(s % MinObjAlignment == 0, "alignment check"); 262 assert(s > 0, "Bad size calculated"); 263 return s; 264 } 265 266 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); } 267 bool oopDesc::is_array() const { return klass()->is_array_klass(); } 268 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); } 269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); } 270 271 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 272 273 jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 274 jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 275 jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*) field_base(offset); } 276 jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 277 jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 278 jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 279 jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 280 jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 281 Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); } 282 283 template <class T> T* oopDesc::obj_field_addr(int offset) const { return (T*) field_base(offset); } 284 address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } 285 286 287 // Functions for getting and setting oops within instance objects. 288 // If the oops are compressed, the type passed to these overloaded functions 289 // is narrowOop. All functions are overloaded so they can be called by 290 // template functions without conditionals (the compiler instantiates via 291 // the right type and inlines the appopriate code). 292 293 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 294 // offset from the heap base. Saving the check for null can save instructions 295 // in inner GC loops so these are separated. 296 297 inline bool check_obj_alignment(oop obj) { 298 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0; 299 } 300 301 oop oopDesc::decode_heap_oop_not_null(narrowOop v) { 302 assert(!is_null(v), "narrow oop value can never be zero"); 303 address base = Universe::narrow_oop_base(); 304 int shift = Universe::narrow_oop_shift(); 305 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 306 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result)); 307 return result; 308 } 309 310 oop oopDesc::decode_heap_oop(narrowOop v) { 311 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); 312 } 313 314 narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 315 assert(!is_null(v), "oop value can never be zero"); 316 assert(check_obj_alignment(v), "Address not aligned"); 317 assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); 318 address base = Universe::narrow_oop_base(); 319 int shift = Universe::narrow_oop_shift(); 320 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 321 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); 322 uint64_t result = pd >> shift; 323 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); 324 assert(decode_heap_oop(result) == v, "reversibility"); 325 return (narrowOop)result; 326 } 327 328 narrowOop oopDesc::encode_heap_oop(oop v) { 329 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); 330 } 331 332 // Load and decode an oop out of the Java heap into a wide oop. 333 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { 334 return decode_heap_oop_not_null(*p); 335 } 336 337 // Load and decode an oop out of the heap accepting null 338 oop oopDesc::load_decode_heap_oop(narrowOop* p) { 339 return decode_heap_oop(*p); 340 } 341 342 // Encode and store a heap oop. 343 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { 344 *p = encode_heap_oop_not_null(v); 345 } 346 347 // Encode and store a heap oop allowing for null. 348 void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { 349 *p = encode_heap_oop(v); 350 } 351 352 // Store heap oop as is for volatile fields. 353 void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { 354 OrderAccess::release_store_ptr(p, v); 355 } 356 void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) { 357 OrderAccess::release_store(p, v); 358 } 359 360 void oopDesc::release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v) { 361 // heap oop is not pointer sized. 362 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); 363 } 364 void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) { 365 OrderAccess::release_store_ptr(p, v); 366 } 367 368 void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) { 369 OrderAccess::release_store_ptr(p, v); 370 } 371 void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) { 372 OrderAccess::release_store(p, encode_heap_oop(v)); 373 } 374 375 // These functions are only used to exchange oop fields in instances, 376 // not headers. 377 oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { 378 if (UseCompressedOops) { 379 // encode exchange value from oop to T 380 narrowOop val = encode_heap_oop(exchange_value); 381 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); 382 // decode old from T to oop 383 return decode_heap_oop(old); 384 } else { 385 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 386 } 387 } 388 389 oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, 390 volatile HeapWord *dest, 391 oop compare_value, 392 bool prebarrier) { 393 if (UseCompressedOops) { 394 if (prebarrier) { 395 update_barrier_set_pre((narrowOop*)dest, exchange_value); 396 } 397 // encode exchange and compare value from oop to T 398 narrowOop val = encode_heap_oop(exchange_value); 399 narrowOop cmp = encode_heap_oop(compare_value); 400 401 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); 402 // decode old from T to oop 403 return decode_heap_oop(old); 404 } else { 405 if (prebarrier) { 406 update_barrier_set_pre((oop*)dest, exchange_value); 407 } 408 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); 409 } 410 } 411 412 // In order to put or get a field out of an instance, must first check 413 // if the field has been compressed and uncompress it. 414 oop oopDesc::obj_field(int offset) const { 415 return UseCompressedOops ? 416 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : 417 load_decode_heap_oop(obj_field_addr<oop>(offset)); 418 } 419 420 void oopDesc::obj_field_put(int offset, oop value) { 421 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : 422 oop_store(obj_field_addr<oop>(offset), value); 423 } 424 425 void oopDesc::obj_field_put_raw(int offset, oop value) { 426 UseCompressedOops ? 427 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : 428 encode_store_heap_oop(obj_field_addr<oop>(offset), value); 429 } 430 void oopDesc::obj_field_put_volatile(int offset, oop value) { 431 OrderAccess::release(); 432 obj_field_put(offset, value); 433 OrderAccess::fence(); 434 } 435 436 Metadata* oopDesc::metadata_field(int offset) const { return *metadata_field_addr(offset); } 437 void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value; } 438 439 jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } 440 void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } 441 442 jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); } 443 void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; } 444 445 jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } 446 void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (((jint) contents) & 1); } 447 448 jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); } 449 void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; } 450 451 jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); } 452 void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;} 453 454 jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); } 455 void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; } 456 457 jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); } 458 void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } 459 460 jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } 461 void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } 462 463 address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } 464 void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } 465 466 oop oopDesc::obj_field_acquire(int offset) const { 467 return UseCompressedOops ? 468 decode_heap_oop((narrowOop) 469 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) 470 : decode_heap_oop((oop) 471 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); 472 } 473 void oopDesc::release_obj_field_put(int offset, oop value) { 474 UseCompressedOops ? 475 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) : 476 oop_store((volatile oop*) obj_field_addr<oop>(offset), value); 477 } 478 479 jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } 480 void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } 481 482 jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); } 483 void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); } 484 485 jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } 486 void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); } 487 488 jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); } 489 void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); } 490 491 jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); } 492 void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); } 493 494 jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); } 495 void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); } 496 497 jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); } 498 void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); } 499 500 jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } 501 void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } 502 503 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } 504 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } 505 506 bool oopDesc::is_locked() const { 507 return mark()->is_locked(); 508 } 509 510 bool oopDesc::is_unlocked() const { 511 return mark()->is_unlocked(); 512 } 513 514 bool oopDesc::has_bias_pattern() const { 515 return mark()->has_bias_pattern(); 516 } 517 518 // used only for asserts 519 bool oopDesc::is_oop(bool ignore_mark_word) const { 520 oop obj = (oop) this; 521 if (!check_obj_alignment(obj)) return false; 522 if (!Universe::heap()->is_in_reserved(obj)) return false; 523 // obj is aligned and accessible in heap 524 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false; 525 526 // Header verification: the mark is typically non-NULL. If we're 527 // at a safepoint, it must not be null. 528 // Outside of a safepoint, the header could be changing (for example, 529 // another thread could be inflating a lock on this object). 530 if (ignore_mark_word) { 531 return true; 532 } 533 if (mark() != NULL) { 534 return true; 535 } 536 return !SafepointSynchronize::is_at_safepoint(); 537 } 538 539 540 // used only for asserts 541 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const { 542 return this == NULL ? true : is_oop(ignore_mark_word); 543 } 544 545 #ifndef PRODUCT 546 // used only for asserts 547 bool oopDesc::is_unlocked_oop() const { 548 if (!Universe::heap()->is_in_reserved(this)) return false; 549 return mark()->is_unlocked(); 550 } 551 #endif // PRODUCT 552 553 // Used only for markSweep, scavenging 554 bool oopDesc::is_gc_marked() const { 555 return mark()->is_marked(); 556 } 557 558 bool oopDesc::is_scavengable() const { 559 return Universe::heap()->is_scavengable(this); 560 } 561 562 // Used by scavengers 563 bool oopDesc::is_forwarded() const { 564 // The extra heap check is needed since the obj might be locked, in which case the 565 // mark would point to a stack location and have the sentinel bit cleared 566 return mark()->is_marked(); 567 } 568 569 // Used by scavengers 570 void oopDesc::forward_to(oop p) { 571 assert(check_obj_alignment(p), 572 "forwarding to something not aligned"); 573 assert(Universe::heap()->is_in_reserved(p), 574 "forwarding to something not in heap"); 575 markOop m = markOopDesc::encode_pointer_as_mark(p); 576 assert(m->decode_pointer() == p, "encoding must be reversable"); 577 set_mark(m); 578 } 579 580 // Used by parallel scavengers 581 bool oopDesc::cas_forward_to(oop p, markOop compare) { 582 assert(check_obj_alignment(p), 583 "forwarding to something not aligned"); 584 assert(Universe::heap()->is_in_reserved(p), 585 "forwarding to something not in heap"); 586 markOop m = markOopDesc::encode_pointer_as_mark(p); 587 assert(m->decode_pointer() == p, "encoding must be reversable"); 588 return cas_set_mark(m, compare) == compare; 589 } 590 591 #if INCLUDE_ALL_GCS 592 oop oopDesc::forward_to_atomic(oop p) { 593 markOop oldMark = mark(); 594 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); 595 markOop curMark; 596 597 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); 598 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); 599 600 while (!oldMark->is_marked()) { 601 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark); 602 assert(is_forwarded(), "object should have been forwarded"); 603 if (curMark == oldMark) { 604 return NULL; 605 } 606 // If the CAS was unsuccessful then curMark->is_marked() 607 // should return true as another thread has CAS'd in another 608 // forwarding pointer. 609 oldMark = curMark; 610 } 611 return forwardee(); 612 } 613 #endif 614 615 // Note that the forwardee is not the same thing as the displaced_mark. 616 // The forwardee is used when copying during scavenge and mark-sweep. 617 // It does need to clear the low two locking- and GC-related bits. 618 oop oopDesc::forwardee() const { 619 return (oop) mark()->decode_pointer(); 620 } 621 622 // The following method needs to be MT safe. 623 uint oopDesc::age() const { 624 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 625 if (has_displaced_mark()) { 626 return displaced_mark()->age(); 627 } else { 628 return mark()->age(); 629 } 630 } 631 632 void oopDesc::incr_age() { 633 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 634 if (has_displaced_mark()) { 635 set_displaced_mark(displaced_mark()->incr_age()); 636 } else { 637 set_mark(mark()->incr_age()); 638 } 639 } 640 641 int oopDesc::ms_adjust_pointers() { 642 debug_only(int check_size = size()); 643 int s = klass()->oop_ms_adjust_pointers(this); 644 assert(s == check_size, "should be the same"); 645 return s; 646 } 647 648 #if INCLUDE_ALL_GCS 649 void oopDesc::pc_follow_contents(ParCompactionManager* cm) { 650 klass()->oop_pc_follow_contents(this, cm); 651 } 652 653 void oopDesc::pc_update_contents(ParCompactionManager* cm) { 654 Klass* k = klass(); 655 if (!k->is_typeArray_klass()) { 656 // It might contain oops beyond the header, so take the virtual call. 657 k->oop_pc_update_pointers(this, cm); 658 } 659 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 660 } 661 662 void oopDesc::ps_push_contents(PSPromotionManager* pm) { 663 Klass* k = klass(); 664 if (!k->is_typeArray_klass()) { 665 // It might contain oops beyond the header, so take the virtual call. 666 k->oop_ps_push_contents(this, pm); 667 } 668 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 669 } 670 #endif // INCLUDE_ALL_GCS 671 672 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 673 \ 674 void oopDesc::oop_iterate(OopClosureType* blk) { \ 675 klass()->oop_oop_iterate##nv_suffix(this, blk); \ 676 } \ 677 \ 678 void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 679 klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 680 } 681 682 #define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 683 \ 684 int oopDesc::oop_iterate_size(OopClosureType* blk) { \ 685 Klass* k = klass(); \ 686 int size = size_given_klass(k); \ 687 k->oop_oop_iterate##nv_suffix(this, blk); \ 688 return size; \ 689 } \ 690 \ 691 int oopDesc::oop_iterate_size(OopClosureType* blk, MemRegion mr) { \ 692 Klass* k = klass(); \ 693 int size = size_given_klass(k); \ 694 k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 695 return size; \ 696 } 697 698 int oopDesc::oop_iterate_no_header(OopClosure* blk) { 699 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 700 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 701 NoHeaderExtendedOopClosure cl(blk); 702 return oop_iterate_size(&cl); 703 } 704 705 int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 706 NoHeaderExtendedOopClosure cl(blk); 707 return oop_iterate_size(&cl, mr); 708 } 709 710 #if INCLUDE_ALL_GCS 711 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 712 \ 713 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 714 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 715 } 716 #else 717 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 718 #endif // INCLUDE_ALL_GCS 719 720 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ 721 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 722 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 723 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 724 725 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) 726 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) 727 728 intptr_t oopDesc::identity_hash() { 729 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 730 // Note: The mark must be read into local variable to avoid concurrent updates. 731 markOop mrk = mark(); 732 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 733 return mrk->hash(); 734 } else if (mrk->is_marked()) { 735 return mrk->hash(); 736 } else { 737 return slow_identity_hash(); 738 } 739 } 740 741 bool oopDesc::has_displaced_mark() const { 742 return mark()->has_displaced_mark_helper(); 743 } 744 745 markOop oopDesc::displaced_mark() const { 746 return mark()->displaced_mark_helper(); 747 } 748 749 void oopDesc::set_displaced_mark(markOop m) { 750 mark()->set_displaced_mark_helper(m); 751 } 752 753 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP