1 /* 2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Implementation of all inlined member functions defined in oop.hpp 26 // We need a separate file to avoid circular references 27 28 inline void oopDesc::release_set_mark(markOop m) { 29 OrderAccess::release_store_ptr(&_mark, m); 30 } 31 32 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 33 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 34 } 35 36 inline klassOop oopDesc::klass() const { 37 if (UseCompressedOops) { 38 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass); 39 } else { 40 return _metadata._klass; 41 } 42 } 43 44 inline klassOop oopDesc::klass_or_null() const volatile { 45 // can be NULL in CMS 46 if (UseCompressedOops) { 47 return (klassOop)decode_heap_oop(_metadata._compressed_klass); 48 } else { 49 return _metadata._klass; 50 } 51 } 52 53 inline int oopDesc::klass_gap_offset_in_bytes() { 54 assert(UseCompressedOops, "only applicable to compressed headers"); 55 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop); 56 } 57 58 inline oop* oopDesc::klass_addr() { 59 // Only used internally and with CMS and will not work with 60 // UseCompressedOops 61 assert(!UseCompressedOops, "only supported with uncompressed oops"); 62 return (oop*) &_metadata._klass; 63 } 64 65 inline narrowOop* oopDesc::compressed_klass_addr() { 66 assert(UseCompressedOops, "only called by compressed oops"); 67 return (narrowOop*) &_metadata._compressed_klass; 68 } 69 70 inline void oopDesc::set_klass(klassOop k) { 71 // since klasses are promoted no store check is needed 72 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); 73 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); 74 if (UseCompressedOops) { 75 oop_store_without_check(compressed_klass_addr(), (oop)k); 76 } else { 77 oop_store_without_check(klass_addr(), (oop) k); 78 } 79 } 80 81 inline int oopDesc::klass_gap() const { 82 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 83 } 84 85 inline void oopDesc::set_klass_gap(int v) { 86 if (UseCompressedOops) { 87 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 88 } 89 } 90 91 inline void oopDesc::set_klass_to_list_ptr(oop k) { 92 // This is only to be used during GC, for from-space objects, so no 93 // barrier is needed. 94 if (UseCompressedOops) { 95 _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling) 96 } else { 97 _metadata._klass = (klassOop)k; 98 } 99 } 100 101 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } 102 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); } 103 104 inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); } 105 106 inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); } 107 inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); } 108 inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); } 109 inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); } 110 inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } 111 inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } 112 inline bool oopDesc::is_symbol() const { return blueprint()->oop_is_symbol(); } 113 inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } 114 inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } 115 inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } 116 inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } 117 inline bool oopDesc::is_methodData() const { return blueprint()->oop_is_methodData(); } 118 inline bool oopDesc::is_constantPool() const { return blueprint()->oop_is_constantPool(); } 119 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); } 120 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); } 121 122 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 123 124 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } 125 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 126 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 127 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } 128 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 129 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 130 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 131 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 132 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 133 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } 134 135 136 // Functions for getting and setting oops within instance objects. 137 // If the oops are compressed, the type passed to these overloaded functions 138 // is narrowOop. All functions are overloaded so they can be called by 139 // template functions without conditionals (the compiler instantiates via 140 // the right type and inlines the appopriate code). 141 142 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } 143 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } 144 145 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 146 // offset from the heap base. Saving the check for null can save instructions 147 // in inner GC loops so these are separated. 148 149 inline bool check_obj_alignment(oop obj) { 150 return (intptr_t)obj % MinObjAlignmentInBytes == 0; 151 } 152 153 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 154 assert(!is_null(v), "oop value can never be zero"); 155 assert(check_obj_alignment(v), "Address not aligned"); 156 assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); 157 address base = Universe::narrow_oop_base(); 158 int shift = Universe::narrow_oop_shift(); 159 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 160 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); 161 uint64_t result = pd >> shift; 162 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); 163 assert(decode_heap_oop(result) == v, "reversibility"); 164 return (narrowOop)result; 165 } 166 167 inline narrowOop oopDesc::encode_heap_oop(oop v) { 168 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); 169 } 170 171 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { 172 assert(!is_null(v), "narrow oop value can never be zero"); 173 address base = Universe::narrow_oop_base(); 174 int shift = Universe::narrow_oop_shift(); 175 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 176 assert(check_obj_alignment(result), "Address not aligned"); 177 return result; 178 } 179 180 inline oop oopDesc::decode_heap_oop(narrowOop v) { 181 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); 182 } 183 184 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } 185 inline oop oopDesc::decode_heap_oop(oop v) { return v; } 186 187 // Load an oop out of the Java heap as is without decoding. 188 // Called by GC to check for null before decoding. 189 inline oop oopDesc::load_heap_oop(oop* p) { return *p; } 190 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } 191 192 // Load and decode an oop out of the Java heap into a wide oop. 193 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } 194 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { 195 return decode_heap_oop_not_null(*p); 196 } 197 198 // Load and decode an oop out of the heap accepting null 199 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } 200 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) { 201 return decode_heap_oop(*p); 202 } 203 204 // Store already encoded heap oop into the heap. 205 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } 206 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } 207 208 // Encode and store a heap oop. 209 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { 210 *p = encode_heap_oop_not_null(v); 211 } 212 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } 213 214 // Encode and store a heap oop allowing for null. 215 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { 216 *p = encode_heap_oop(v); 217 } 218 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } 219 220 // Store heap oop as is for volatile fields. 221 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { 222 OrderAccess::release_store_ptr(p, v); 223 } 224 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p, 225 narrowOop v) { 226 OrderAccess::release_store(p, v); 227 } 228 229 inline void oopDesc::release_encode_store_heap_oop_not_null( 230 volatile narrowOop* p, oop v) { 231 // heap oop is not pointer sized. 232 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); 233 } 234 235 inline void oopDesc::release_encode_store_heap_oop_not_null( 236 volatile oop* p, oop v) { 237 OrderAccess::release_store_ptr(p, v); 238 } 239 240 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p, 241 oop v) { 242 OrderAccess::release_store_ptr(p, v); 243 } 244 inline void oopDesc::release_encode_store_heap_oop( 245 volatile narrowOop* p, oop v) { 246 OrderAccess::release_store(p, encode_heap_oop(v)); 247 } 248 249 250 // These functions are only used to exchange oop fields in instances, 251 // not headers. 252 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { 253 if (UseCompressedOops) { 254 // encode exchange value from oop to T 255 narrowOop val = encode_heap_oop(exchange_value); 256 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); 257 // decode old from T to oop 258 return decode_heap_oop(old); 259 } else { 260 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 261 } 262 } 263 264 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, 265 volatile HeapWord *dest, 266 oop compare_value) { 267 if (UseCompressedOops) { 268 // encode exchange and compare value from oop to T 269 narrowOop val = encode_heap_oop(exchange_value); 270 narrowOop cmp = encode_heap_oop(compare_value); 271 272 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); 273 // decode old from T to oop 274 return decode_heap_oop(old); 275 } else { 276 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); 277 } 278 } 279 280 // In order to put or get a field out of an instance, must first check 281 // if the field has been compressed and uncompress it. 282 inline oop oopDesc::obj_field(int offset) const { 283 return UseCompressedOops ? 284 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : 285 load_decode_heap_oop(obj_field_addr<oop>(offset)); 286 } 287 inline void oopDesc::obj_field_put(int offset, oop value) { 288 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : 289 oop_store(obj_field_addr<oop>(offset), value); 290 } 291 inline void oopDesc::obj_field_raw_put(int offset, oop value) { 292 UseCompressedOops ? 293 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : 294 encode_store_heap_oop(obj_field_addr<oop>(offset), value); 295 } 296 297 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } 298 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } 299 300 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } 301 inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (jint) contents; } 302 303 inline jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); } 304 inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; } 305 306 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); } 307 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; } 308 309 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); } 310 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;} 311 312 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); } 313 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; } 314 315 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); } 316 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } 317 318 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } 319 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } 320 321 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } 322 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } 323 324 inline oop oopDesc::obj_field_acquire(int offset) const { 325 return UseCompressedOops ? 326 decode_heap_oop((narrowOop) 327 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) 328 : decode_heap_oop((oop) 329 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); 330 } 331 inline void oopDesc::release_obj_field_put(int offset, oop value) { 332 UseCompressedOops ? 333 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) : 334 oop_store((volatile oop*) obj_field_addr<oop>(offset), value); 335 } 336 337 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } 338 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } 339 340 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } 341 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), contents); } 342 343 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); } 344 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); } 345 346 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); } 347 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); } 348 349 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); } 350 inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); } 351 352 inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); } 353 inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); } 354 355 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); } 356 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); } 357 358 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } 359 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } 360 361 inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } 362 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } 363 364 inline int oopDesc::size_given_klass(Klass* klass) { 365 int lh = klass->layout_helper(); 366 int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 367 368 // lh is now a value computed at class initialization that may hint 369 // at the size. For instances, this is positive and equal to the 370 // size. For arrays, this is negative and provides log2 of the 371 // array element size. For other oops, it is zero and thus requires 372 // a virtual call. 373 // 374 // We go to all this trouble because the size computation is at the 375 // heart of phase 2 of mark-compaction, and called for every object, 376 // alive or dead. So the speed here is equal in importance to the 377 // speed of allocation. 378 379 if (lh <= Klass::_lh_neutral_value) { 380 // The most common case is instances; fall through if so. 381 if (lh < Klass::_lh_neutral_value) { 382 // Second most common case is arrays. We have to fetch the 383 // length of the array, shift (multiply) it appropriately, 384 // up to wordSize, add the header, and align to object size. 385 size_t size_in_bytes; 386 #ifdef _M_IA64 387 // The Windows Itanium Aug 2002 SDK hoists this load above 388 // the check for s < 0. An oop at the end of the heap will 389 // cause an access violation if this load is performed on a non 390 // array oop. Making the reference volatile prohibits this. 391 // (%%% please explain by what magic the length is actually fetched!) 392 volatile int *array_length; 393 array_length = (volatile int *)( (intptr_t)this + 394 arrayOopDesc::length_offset_in_bytes() ); 395 assert(array_length > 0, "Integer arithmetic problem somewhere"); 396 // Put into size_t to avoid overflow. 397 size_in_bytes = (size_t) array_length; 398 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh); 399 #else 400 size_t array_length = (size_t) ((arrayOop)this)->length(); 401 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 402 #endif 403 size_in_bytes += Klass::layout_helper_header_size(lh); 404 405 // This code could be simplified, but by keeping array_header_in_bytes 406 // in units of bytes and doing it this way we can round up just once, 407 // skipping the intermediate round to HeapWordSize. Cast the result 408 // of round_to to size_t to guarantee unsigned division == right shift. 409 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 410 HeapWordSize); 411 412 // UseParNewGC, UseParallelGC and UseG1GC can change the length field 413 // of an "old copy" of an object array in the young gen so it indicates 414 // the grey portion of an already copied array. This will cause the first 415 // disjunct below to fail if the two comparands are computed across such 416 // a concurrent change. 417 // UseParNewGC also runs with promotion labs (which look like int 418 // filler arrays) which are subject to changing their declared size 419 // when finally retiring a PLAB; this also can cause the first disjunct 420 // to fail for another worker thread that is concurrently walking the block 421 // offset table. Both these invariant failures are benign for their 422 // current uses; we relax the assertion checking to cover these two cases below: 423 // is_objArray() && is_forwarded() // covers first scenario above 424 // || is_typeArray() // covers second scenario above 425 // If and when UseParallelGC uses the same obj array oop stealing/chunking 426 // technique, we will need to suitably modify the assertion. 427 assert((s == klass->oop_size(this)) || 428 (Universe::heap()->is_gc_active() && 429 ((is_typeArray() && UseParNewGC) || 430 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))), 431 "wrong array object size"); 432 } else { 433 // Must be zero, so bite the bullet and take the virtual call. 434 s = klass->oop_size(this); 435 } 436 } 437 438 assert(s % MinObjAlignment == 0, "alignment check"); 439 assert(s > 0, "Bad size calculated"); 440 return s; 441 } 442 443 444 inline int oopDesc::size() { 445 return size_given_klass(blueprint()); 446 } 447 448 inline bool oopDesc::is_parsable() { 449 return blueprint()->oop_is_parsable(this); 450 } 451 452 inline bool oopDesc::is_conc_safe() { 453 return blueprint()->oop_is_conc_safe(this); 454 } 455 456 inline void update_barrier_set(void* p, oop v) { 457 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 458 oopDesc::bs()->write_ref_field(p, v); 459 } 460 461 template <class T> inline void update_barrier_set_pre(T* p, oop v) { 462 oopDesc::bs()->write_ref_field_pre(p, v); 463 } 464 465 template <class T> inline void oop_store(T* p, oop v) { 466 if (always_do_update_barrier) { 467 oop_store((volatile T*)p, v); 468 } else { 469 update_barrier_set_pre(p, v); 470 oopDesc::encode_store_heap_oop(p, v); 471 update_barrier_set((void*)p, v); // cast away type 472 } 473 } 474 475 template <class T> inline void oop_store(volatile T* p, oop v) { 476 update_barrier_set_pre((T*)p, v); // cast away volatile 477 // Used by release_obj_field_put, so use release_store_ptr. 478 oopDesc::release_encode_store_heap_oop(p, v); 479 update_barrier_set((void*)p, v); // cast away type 480 } 481 482 template <class T> inline void oop_store_without_check(T* p, oop v) { 483 // XXX YSR FIX ME!!! 484 if (always_do_update_barrier) { 485 oop_store(p, v); 486 } else { 487 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v), 488 "oop store without store check failed"); 489 oopDesc::encode_store_heap_oop(p, v); 490 } 491 } 492 493 // When it absolutely has to get there. 494 template <class T> inline void oop_store_without_check(volatile T* p, oop v) { 495 // XXX YSR FIX ME!!! 496 if (always_do_update_barrier) { 497 oop_store(p, v); 498 } else { 499 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v), 500 "oop store without store check failed"); 501 oopDesc::release_encode_store_heap_oop(p, v); 502 } 503 } 504 505 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 506 // (without having to remember the function name this calls). 507 inline void oop_store_raw(HeapWord* addr, oop value) { 508 if (UseCompressedOops) { 509 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 510 } else { 511 oopDesc::encode_store_heap_oop((oop*)addr, value); 512 } 513 } 514 515 // Used only for markSweep, scavenging 516 inline bool oopDesc::is_gc_marked() const { 517 return mark()->is_marked(); 518 } 519 520 inline bool oopDesc::is_locked() const { 521 return mark()->is_locked(); 522 } 523 524 inline bool oopDesc::is_unlocked() const { 525 return mark()->is_unlocked(); 526 } 527 528 inline bool oopDesc::has_bias_pattern() const { 529 return mark()->has_bias_pattern(); 530 } 531 532 533 // used only for asserts 534 inline bool oopDesc::is_oop(bool ignore_mark_word) const { 535 oop obj = (oop) this; 536 if (!check_obj_alignment(obj)) return false; 537 if (!Universe::heap()->is_in_reserved(obj)) return false; 538 // obj is aligned and accessible in heap 539 // try to find metaclass cycle safely without seg faulting on bad input 540 // we should reach klassKlassObj by following klass link at most 3 times 541 for (int i = 0; i < 3; i++) { 542 obj = obj->klass_or_null(); 543 // klass should be aligned and in permspace 544 if (!check_obj_alignment(obj)) return false; 545 if (!Universe::heap()->is_in_permanent(obj)) return false; 546 } 547 if (obj != Universe::klassKlassObj()) { 548 // During a dump, the _klassKlassObj moved to a shared space. 549 if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) { 550 return true; 551 } 552 return false; 553 } 554 555 // Header verification: the mark is typically non-NULL. If we're 556 // at a safepoint, it must not be null. 557 // Outside of a safepoint, the header could be changing (for example, 558 // another thread could be inflating a lock on this object). 559 if (ignore_mark_word) { 560 return true; 561 } 562 if (mark() != NULL) { 563 return true; 564 } 565 return !SafepointSynchronize::is_at_safepoint(); 566 } 567 568 569 // used only for asserts 570 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const { 571 return this == NULL ? true : is_oop(ignore_mark_word); 572 } 573 574 #ifndef PRODUCT 575 // used only for asserts 576 inline bool oopDesc::is_unlocked_oop() const { 577 if (!Universe::heap()->is_in_reserved(this)) return false; 578 return mark()->is_unlocked(); 579 } 580 #endif // PRODUCT 581 582 inline void oopDesc::follow_header() { 583 if (UseCompressedOops) { 584 MarkSweep::mark_and_push(compressed_klass_addr()); 585 } else { 586 MarkSweep::mark_and_push(klass_addr()); 587 } 588 } 589 590 inline void oopDesc::follow_contents(void) { 591 assert (is_gc_marked(), "should be marked"); 592 blueprint()->oop_follow_contents(this); 593 } 594 595 596 // Used by scavengers 597 598 inline bool oopDesc::is_forwarded() const { 599 // The extra heap check is needed since the obj might be locked, in which case the 600 // mark would point to a stack location and have the sentinel bit cleared 601 return mark()->is_marked(); 602 } 603 604 // Used by scavengers 605 inline void oopDesc::forward_to(oop p) { 606 assert(check_obj_alignment(p), 607 "forwarding to something not aligned"); 608 assert(Universe::heap()->is_in_reserved(p), 609 "forwarding to something not in heap"); 610 markOop m = markOopDesc::encode_pointer_as_mark(p); 611 assert(m->decode_pointer() == p, "encoding must be reversable"); 612 set_mark(m); 613 } 614 615 // Used by parallel scavengers 616 inline bool oopDesc::cas_forward_to(oop p, markOop compare) { 617 assert(check_obj_alignment(p), 618 "forwarding to something not aligned"); 619 assert(Universe::heap()->is_in_reserved(p), 620 "forwarding to something not in heap"); 621 markOop m = markOopDesc::encode_pointer_as_mark(p); 622 assert(m->decode_pointer() == p, "encoding must be reversable"); 623 return cas_set_mark(m, compare) == compare; 624 } 625 626 // Note that the forwardee is not the same thing as the displaced_mark. 627 // The forwardee is used when copying during scavenge and mark-sweep. 628 // It does need to clear the low two locking- and GC-related bits. 629 inline oop oopDesc::forwardee() const { 630 return (oop) mark()->decode_pointer(); 631 } 632 633 inline bool oopDesc::has_displaced_mark() const { 634 return mark()->has_displaced_mark_helper(); 635 } 636 637 inline markOop oopDesc::displaced_mark() const { 638 return mark()->displaced_mark_helper(); 639 } 640 641 inline void oopDesc::set_displaced_mark(markOop m) { 642 mark()->set_displaced_mark_helper(m); 643 } 644 645 // The following method needs to be MT safe. 646 inline int oopDesc::age() const { 647 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 648 if (has_displaced_mark()) { 649 return displaced_mark()->age(); 650 } else { 651 return mark()->age(); 652 } 653 } 654 655 inline void oopDesc::incr_age() { 656 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 657 if (has_displaced_mark()) { 658 set_displaced_mark(displaced_mark()->incr_age()); 659 } else { 660 set_mark(mark()->incr_age()); 661 } 662 } 663 664 665 inline intptr_t oopDesc::identity_hash() { 666 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 667 // Note: The mark must be read into local variable to avoid concurrent updates. 668 markOop mrk = mark(); 669 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 670 return mrk->hash(); 671 } else if (mrk->is_marked()) { 672 return mrk->hash(); 673 } else { 674 return slow_identity_hash(); 675 } 676 } 677 678 inline void oopDesc::oop_iterate_header(OopClosure* blk) { 679 if (UseCompressedOops) { 680 blk->do_oop(compressed_klass_addr()); 681 } else { 682 blk->do_oop(klass_addr()); 683 } 684 } 685 686 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) { 687 if (UseCompressedOops) { 688 if (mr.contains(compressed_klass_addr())) { 689 blk->do_oop(compressed_klass_addr()); 690 } 691 } else { 692 if (mr.contains(klass_addr())) blk->do_oop(klass_addr()); 693 } 694 } 695 696 inline int oopDesc::adjust_pointers() { 697 debug_only(int check_size = size()); 698 int s = blueprint()->oop_adjust_pointers(this); 699 assert(s == check_size, "should be the same"); 700 return s; 701 } 702 703 inline void oopDesc::adjust_header() { 704 if (UseCompressedOops) { 705 MarkSweep::adjust_pointer(compressed_klass_addr()); 706 } else { 707 MarkSweep::adjust_pointer(klass_addr()); 708 } 709 } 710 711 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 712 \ 713 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ 714 SpecializationStats::record_call(); \ 715 return blueprint()->oop_oop_iterate##nv_suffix(this, blk); \ 716 } \ 717 \ 718 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 719 SpecializationStats::record_call(); \ 720 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ 721 } 722 723 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) 724 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) 725 726 #ifndef SERIALGC 727 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 728 \ 729 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 730 SpecializationStats::record_call(); \ 731 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 732 } 733 734 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) 735 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) 736 #endif // !SERIALGC 737 738 inline bool oopDesc::is_shared() const { 739 return CompactingPermGenGen::is_shared(this); 740 } 741 742 inline bool oopDesc::is_shared_readonly() const { 743 return CompactingPermGenGen::is_shared_readonly(this); 744 } 745 746 inline bool oopDesc::is_shared_readwrite() const { 747 return CompactingPermGenGen::is_shared_readwrite(this); 748 }