1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP 27 28 #include "gc/shared/ageTable.hpp" 29 #include "gc/shared/barrierSet.inline.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "gc/shared/genCollectedHeap.hpp" 33 #include "gc/shared/generation.hpp" 34 #include "oops/arrayKlass.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/markOop.inline.hpp" 38 #include "oops/oop.hpp" 39 #include "runtime/atomic.inline.hpp" 40 #include "runtime/orderAccess.inline.hpp" 41 #include "runtime/os.hpp" 42 #include "utilities/macros.hpp" 43 44 // Implementation of all inlined member functions defined in oop.hpp 45 // We need a separate file to avoid circular references 46 47 inline void oopDesc::release_set_mark(markOop m) { 48 OrderAccess::release_store_ptr(&_mark, m); 49 } 50 51 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { 52 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); 53 } 54 55 inline Klass* oopDesc::klass() const { 56 if (UseCompressedClassPointers) { 57 return Klass::decode_klass_not_null(_metadata._compressed_klass); 58 } else { 59 return _metadata._klass; 60 } 61 } 62 63 inline Klass* oopDesc::klass_or_null() const volatile { 64 // can be NULL in CMS 65 if (UseCompressedClassPointers) { 66 return Klass::decode_klass(_metadata._compressed_klass); 67 } else { 68 return _metadata._klass; 69 } 70 } 71 72 inline Klass** oopDesc::klass_addr() { 73 // Only used internally and with CMS and will not work with 74 // UseCompressedOops 75 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers"); 76 return (Klass**) &_metadata._klass; 77 } 78 79 inline narrowKlass* oopDesc::compressed_klass_addr() { 80 assert(UseCompressedClassPointers, "only called by compressed klass pointers"); 81 return &_metadata._compressed_klass; 82 } 83 84 inline void oopDesc::set_klass(Klass* k) { 85 // since klasses are promoted no store check is needed 86 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*"); 87 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*"); 88 if (UseCompressedClassPointers) { 89 *compressed_klass_addr() = Klass::encode_klass_not_null(k); 90 } else { 91 *klass_addr() = k; 92 } 93 } 94 95 inline int oopDesc::klass_gap() const { 96 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); 97 } 98 99 inline void oopDesc::set_klass_gap(int v) { 100 if (UseCompressedClassPointers) { 101 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v; 102 } 103 } 104 105 inline void oopDesc::set_klass_to_list_ptr(oop k) { 106 // This is only to be used during GC, for from-space objects, so no 107 // barrier is needed. 108 if (UseCompressedClassPointers) { 109 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) 110 } else { 111 _metadata._klass = (Klass*)(address)k; 112 } 113 } 114 115 inline oop oopDesc::list_ptr_from_klass() { 116 // This is only to be used during GC, for from-space objects. 117 if (UseCompressedClassPointers) { 118 return decode_heap_oop((narrowOop)_metadata._compressed_klass); 119 } else { 120 // Special case for GC 121 return (oop)(address)_metadata._klass; 122 } 123 } 124 125 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } 126 127 inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); } 128 129 inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); } 130 inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); } 131 inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); } 132 inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); } 133 inline bool oopDesc::is_array() const { return klass()->oop_is_array(); } 134 inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); } 135 inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); } 136 137 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } 138 139 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } 140 inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); } 141 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } 142 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } 143 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } 144 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } 145 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } 146 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } 147 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } 148 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } 149 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } 150 151 152 // Functions for getting and setting oops within instance objects. 153 // If the oops are compressed, the type passed to these overloaded functions 154 // is narrowOop. All functions are overloaded so they can be called by 155 // template functions without conditionals (the compiler instantiates via 156 // the right type and inlines the appopriate code). 157 158 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } 159 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } 160 161 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 162 // offset from the heap base. Saving the check for null can save instructions 163 // in inner GC loops so these are separated. 164 165 inline bool check_obj_alignment(oop obj) { 166 return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0; 167 } 168 169 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 170 assert(!is_null(v), "oop value can never be zero"); 171 assert(check_obj_alignment(v), "Address not aligned"); 172 assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); 173 address base = Universe::narrow_oop_base(); 174 int shift = Universe::narrow_oop_shift(); 175 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 176 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); 177 uint64_t result = pd >> shift; 178 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); 179 assert(decode_heap_oop(result) == v, "reversibility"); 180 return (narrowOop)result; 181 } 182 183 inline narrowOop oopDesc::encode_heap_oop(oop v) { 184 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); 185 } 186 187 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { 188 assert(!is_null(v), "narrow oop value can never be zero"); 189 address base = Universe::narrow_oop_base(); 190 int shift = Universe::narrow_oop_shift(); 191 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 192 assert(check_obj_alignment(result), err_msg("address not aligned: " INTPTR_FORMAT, p2i((void*) result))); 193 return result; 194 } 195 196 inline oop oopDesc::decode_heap_oop(narrowOop v) { 197 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); 198 } 199 200 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } 201 inline oop oopDesc::decode_heap_oop(oop v) { return v; } 202 203 // Load an oop out of the Java heap as is without decoding. 204 // Called by GC to check for null before decoding. 205 inline oop oopDesc::load_heap_oop(oop* p) { return *p; } 206 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } 207 208 // Load and decode an oop out of the Java heap into a wide oop. 209 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } 210 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { 211 return decode_heap_oop_not_null(*p); 212 } 213 214 // Load and decode an oop out of the heap accepting null 215 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } 216 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) { 217 return decode_heap_oop(*p); 218 } 219 220 // Store already encoded heap oop into the heap. 221 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } 222 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } 223 224 // Encode and store a heap oop. 225 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { 226 *p = encode_heap_oop_not_null(v); 227 } 228 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } 229 230 // Encode and store a heap oop allowing for null. 231 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { 232 *p = encode_heap_oop(v); 233 } 234 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } 235 236 // Store heap oop as is for volatile fields. 237 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { 238 OrderAccess::release_store_ptr(p, v); 239 } 240 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p, 241 narrowOop v) { 242 OrderAccess::release_store(p, v); 243 } 244 245 inline void oopDesc::release_encode_store_heap_oop_not_null( 246 volatile narrowOop* p, oop v) { 247 // heap oop is not pointer sized. 248 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); 249 } 250 251 inline void oopDesc::release_encode_store_heap_oop_not_null( 252 volatile oop* p, oop v) { 253 OrderAccess::release_store_ptr(p, v); 254 } 255 256 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p, 257 oop v) { 258 OrderAccess::release_store_ptr(p, v); 259 } 260 inline void oopDesc::release_encode_store_heap_oop( 261 volatile narrowOop* p, oop v) { 262 OrderAccess::release_store(p, encode_heap_oop(v)); 263 } 264 265 266 // These functions are only used to exchange oop fields in instances, 267 // not headers. 268 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { 269 if (UseCompressedOops) { 270 // encode exchange value from oop to T 271 narrowOop val = encode_heap_oop(exchange_value); 272 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); 273 // decode old from T to oop 274 return decode_heap_oop(old); 275 } else { 276 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); 277 } 278 } 279 280 // In order to put or get a field out of an instance, must first check 281 // if the field has been compressed and uncompress it. 282 inline oop oopDesc::obj_field(int offset) const { 283 oop p = bs()->resolve_oop((oop) this); 284 return UseCompressedOops ? 285 load_decode_heap_oop(p->obj_field_addr<narrowOop>(offset)) : 286 load_decode_heap_oop(p->obj_field_addr<oop>(offset)); 287 } 288 289 inline void oopDesc::obj_field_put(int offset, oop value) { 290 oop p = bs()->resolve_and_maybe_copy_oop(this); 291 value = bs()->resolve_oop(value); 292 UseCompressedOops ? oop_store(p->obj_field_addr<narrowOop>(offset), value) : 293 oop_store(p->obj_field_addr<oop>(offset), value); 294 } 295 296 inline Metadata* oopDesc::metadata_field(int offset) const { 297 oop p = bs()->resolve_oop((oop) this); 298 return *p->metadata_field_addr(offset); 299 } 300 301 inline void oopDesc::metadata_field_put(int offset, Metadata* value) { 302 oop p = bs()->resolve_and_maybe_copy_oop(this); 303 *p->metadata_field_addr(offset) = value; 304 } 305 306 inline void oopDesc::obj_field_put_raw(int offset, oop value) { 307 oop p = bs()->resolve_and_maybe_copy_oop(this); 308 value = bs()->resolve_oop(value); 309 UseCompressedOops ? 310 encode_store_heap_oop(p->obj_field_addr<narrowOop>(offset), value) : 311 encode_store_heap_oop(p->obj_field_addr<oop>(offset), value); 312 } 313 inline void oopDesc::obj_field_put_volatile(int offset, oop value) { 314 OrderAccess::release(); 315 obj_field_put(offset, value); 316 OrderAccess::fence(); 317 } 318 319 inline jbyte oopDesc::byte_field(int offset) const { 320 oop p = bs()->resolve_oop((oop) this); 321 return (jbyte) *p->byte_field_addr(offset); 322 } 323 inline void oopDesc::byte_field_put(int offset, jbyte contents) { 324 oop p = bs()->resolve_and_maybe_copy_oop(this); 325 *p->byte_field_addr(offset) = (jint) contents; 326 } 327 328 inline jboolean oopDesc::bool_field(int offset) const { 329 oop p = bs()->resolve_oop((oop) this); 330 return (jboolean) *p->bool_field_addr(offset); 331 } 332 inline void oopDesc::bool_field_put(int offset, jboolean contents) { 333 oop p = bs()->resolve_and_maybe_copy_oop(this); 334 *p->bool_field_addr(offset) = (jint) contents; 335 } 336 337 inline jchar oopDesc::char_field(int offset) const { 338 oop p = bs()->resolve_oop((oop) this); 339 return (jchar) *p->char_field_addr(offset); 340 } 341 inline void oopDesc::char_field_put(int offset, jchar contents) { 342 oop p = bs()->resolve_and_maybe_copy_oop(this); 343 *p->char_field_addr(offset) = (jint) contents; 344 } 345 346 inline jint oopDesc::int_field(int offset) const { 347 oop p = bs()->resolve_oop((oop) this); 348 return *p->int_field_addr(offset); 349 } 350 inline void oopDesc::int_field_put(int offset, jint contents) { 351 oop p = bs()->resolve_and_maybe_copy_oop(this); 352 *p->int_field_addr(offset) = contents; 353 } 354 355 inline jshort oopDesc::short_field(int offset) const { 356 oop p = bs()->resolve_oop((oop) this); 357 return (jshort) *p->short_field_addr(offset); 358 } 359 inline void oopDesc::short_field_put(int offset, jshort contents) { 360 oop p = bs()->resolve_and_maybe_copy_oop(this); 361 *p->short_field_addr(offset) = (jint) contents; 362 } 363 364 inline jlong oopDesc::long_field(int offset) const { 365 oop p = bs()->resolve_oop((oop) this); 366 return *p->long_field_addr(offset); 367 } 368 inline void oopDesc::long_field_put(int offset, jlong contents) { 369 oop p = bs()->resolve_and_maybe_copy_oop(this); 370 *p->long_field_addr(offset) = contents; 371 } 372 373 inline jfloat oopDesc::float_field(int offset) const { 374 oop p = bs()->resolve_oop((oop) this); 375 return *p->float_field_addr(offset); 376 } 377 inline void oopDesc::float_field_put(int offset, jfloat contents) { 378 oop p = bs()->resolve_and_maybe_copy_oop(this); 379 *p->float_field_addr(offset) = contents; 380 } 381 382 inline jdouble oopDesc::double_field(int offset) const { 383 oop p = bs()->resolve_oop((oop) this); 384 return *p->double_field_addr(offset); 385 } 386 inline void oopDesc::double_field_put(int offset, jdouble contents) { 387 oop p = bs()->resolve_and_maybe_copy_oop(this); 388 *p->double_field_addr(offset) = contents; 389 } 390 391 inline address oopDesc::address_field(int offset) const { 392 oop p = bs()->resolve_oop((oop) this); 393 return *p->address_field_addr(offset); 394 } 395 inline void oopDesc::address_field_put(int offset, address contents) { 396 oop p = bs()->resolve_and_maybe_copy_oop(this); 397 *p->address_field_addr(offset) = contents; 398 } 399 400 inline oop oopDesc::obj_field_acquire(int offset) const { 401 oop p = bs()->resolve_oop((oop) this); 402 return UseCompressedOops ? 403 decode_heap_oop((narrowOop) 404 OrderAccess::load_acquire(p->obj_field_addr<narrowOop>(offset))) 405 : decode_heap_oop((oop) 406 OrderAccess::load_ptr_acquire(p->obj_field_addr<oop>(offset))); 407 } 408 inline void oopDesc::release_obj_field_put(int offset, oop value) { 409 oop p = bs()->resolve_and_maybe_copy_oop(this); 410 value = bs()->resolve_oop(value); 411 UseCompressedOops ? 412 oop_store((volatile narrowOop*)p->obj_field_addr<narrowOop>(offset), value) : 413 oop_store((volatile oop*) p->obj_field_addr<oop>(offset), value); 414 } 415 416 inline jbyte oopDesc::byte_field_acquire(int offset) const { 417 oop p = bs()->resolve_oop((oop) this); 418 return OrderAccess::load_acquire(p->byte_field_addr(offset)); 419 } 420 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { 421 oop p = bs()->resolve_and_maybe_copy_oop(this); 422 OrderAccess::release_store(p->byte_field_addr(offset), contents); 423 } 424 425 inline jboolean oopDesc::bool_field_acquire(int offset) const { 426 oop p = bs()->resolve_oop((oop) this); 427 return OrderAccess::load_acquire(p->bool_field_addr(offset)); 428 } 429 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { 430 oop p = bs()->resolve_and_maybe_copy_oop(this); 431 OrderAccess::release_store(p->bool_field_addr(offset), contents); 432 } 433 434 inline jchar oopDesc::char_field_acquire(int offset) const { 435 oop p = bs()->resolve_oop((oop) this); 436 return OrderAccess::load_acquire(p->char_field_addr(offset)); 437 } 438 inline void oopDesc::release_char_field_put(int offset, jchar contents) { 439 oop p = bs()->resolve_and_maybe_copy_oop(this); 440 OrderAccess::release_store(p->char_field_addr(offset), contents); 441 } 442 443 inline jint oopDesc::int_field_acquire(int offset) const { 444 oop p = bs()->resolve_oop((oop) this); 445 return OrderAccess::load_acquire(p->int_field_addr(offset)); 446 } 447 inline void oopDesc::release_int_field_put(int offset, jint contents) { 448 oop p = bs()->resolve_and_maybe_copy_oop(this); 449 OrderAccess::release_store(p->int_field_addr(offset), contents); 450 } 451 452 inline jshort oopDesc::short_field_acquire(int offset) const { 453 oop p = bs()->resolve_oop((oop) this); 454 return (jshort)OrderAccess::load_acquire(p->short_field_addr(offset)); 455 } 456 inline void oopDesc::release_short_field_put(int offset, jshort contents) { 457 oop p = bs()->resolve_and_maybe_copy_oop(this); 458 OrderAccess::release_store(p->short_field_addr(offset), contents); 459 } 460 461 inline jlong oopDesc::long_field_acquire(int offset) const { 462 oop p = bs()->resolve_oop((oop) this); 463 return OrderAccess::load_acquire(p->long_field_addr(offset)); 464 } 465 inline void oopDesc::release_long_field_put(int offset, jlong contents) { 466 oop p = bs()->resolve_and_maybe_copy_oop(this); 467 OrderAccess::release_store(p->long_field_addr(offset), contents); 468 } 469 470 inline jfloat oopDesc::float_field_acquire(int offset) const { 471 oop p = bs()->resolve_oop((oop) this); 472 return OrderAccess::load_acquire(p->float_field_addr(offset)); 473 } 474 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { 475 oop p = bs()->resolve_and_maybe_copy_oop(this); 476 OrderAccess::release_store(p->float_field_addr(offset), contents); 477 } 478 479 inline jdouble oopDesc::double_field_acquire(int offset) const { 480 oop p = bs()->resolve_oop((oop) this); 481 return OrderAccess::load_acquire(p->double_field_addr(offset)); 482 } 483 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { 484 oop p = bs()->resolve_and_maybe_copy_oop(this); 485 OrderAccess::release_store(p->double_field_addr(offset), contents); 486 } 487 488 inline address oopDesc::address_field_acquire(int offset) const { 489 oop p = bs()->resolve_oop((oop) this); 490 return (address) OrderAccess::load_ptr_acquire(p->address_field_addr(offset)); 491 } 492 inline void oopDesc::release_address_field_put(int offset, address contents) { 493 oop p = bs()->resolve_and_maybe_copy_oop(this); 494 OrderAccess::release_store_ptr(p->address_field_addr(offset), contents); 495 } 496 497 inline int oopDesc::size_given_klass(Klass* klass) { 498 int lh = klass->layout_helper(); 499 int s; 500 501 // lh is now a value computed at class initialization that may hint 502 // at the size. For instances, this is positive and equal to the 503 // size. For arrays, this is negative and provides log2 of the 504 // array element size. For other oops, it is zero and thus requires 505 // a virtual call. 506 // 507 // We go to all this trouble because the size computation is at the 508 // heart of phase 2 of mark-compaction, and called for every object, 509 // alive or dead. So the speed here is equal in importance to the 510 // speed of allocation. 511 512 if (lh > Klass::_lh_neutral_value) { 513 if (!Klass::layout_helper_needs_slow_path(lh)) { 514 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 515 } else { 516 s = klass->oop_size(this); 517 } 518 } else if (lh <= Klass::_lh_neutral_value) { 519 // The most common case is instances; fall through if so. 520 if (lh < Klass::_lh_neutral_value) { 521 // Second most common case is arrays. We have to fetch the 522 // length of the array, shift (multiply) it appropriately, 523 // up to wordSize, add the header, and align to object size. 524 size_t size_in_bytes; 525 #ifdef _M_IA64 526 // The Windows Itanium Aug 2002 SDK hoists this load above 527 // the check for s < 0. An oop at the end of the heap will 528 // cause an access violation if this load is performed on a non 529 // array oop. Making the reference volatile prohibits this. 530 // (%%% please explain by what magic the length is actually fetched!) 531 volatile int *array_length; 532 array_length = (volatile int *)( (intptr_t)this + 533 arrayOopDesc::length_offset_in_bytes() ); 534 assert(array_length > 0, "Integer arithmetic problem somewhere"); 535 // Put into size_t to avoid overflow. 536 size_in_bytes = (size_t) array_length; 537 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh); 538 #else 539 size_t array_length = (size_t) ((arrayOop)this)->length(); 540 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh); 541 #endif 542 size_in_bytes += Klass::layout_helper_header_size(lh); 543 544 // This code could be simplified, but by keeping array_header_in_bytes 545 // in units of bytes and doing it this way we can round up just once, 546 // skipping the intermediate round to HeapWordSize. Cast the result 547 // of round_to to size_t to guarantee unsigned division == right shift. 548 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) / 549 HeapWordSize); 550 551 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field 552 // of an "old copy" of an object array in the young gen so it indicates 553 // the grey portion of an already copied array. This will cause the first 554 // disjunct below to fail if the two comparands are computed across such 555 // a concurrent change. 556 // ParNew also runs with promotion labs (which look like int 557 // filler arrays) which are subject to changing their declared size 558 // when finally retiring a PLAB; this also can cause the first disjunct 559 // to fail for another worker thread that is concurrently walking the block 560 // offset table. Both these invariant failures are benign for their 561 // current uses; we relax the assertion checking to cover these two cases below: 562 // is_objArray() && is_forwarded() // covers first scenario above 563 // || is_typeArray() // covers second scenario above 564 // If and when UseParallelGC uses the same obj array oop stealing/chunking 565 // technique, we will need to suitably modify the assertion. 566 assert((s == klass->oop_size(this)) || 567 (Universe::heap()->is_gc_active() && 568 ((is_typeArray() && UseConcMarkSweepGC) || 569 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))), 570 "wrong array object size"); 571 } else { 572 // Must be zero, so bite the bullet and take the virtual call. 573 s = klass->oop_size(this); 574 } 575 } 576 577 assert(s % MinObjAlignment == 0, "alignment check"); 578 assert(s > 0, "Bad size calculated"); 579 return s; 580 } 581 582 583 inline int oopDesc::size() { 584 return size_given_klass(klass()); 585 } 586 587 inline void update_barrier_set(void* p, oop v, bool release = false) { 588 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 589 oopDesc::bs()->write_ref_field(p, v, release); 590 } 591 592 template <class T> inline void update_barrier_set_pre(T* p, oop v) { 593 oopDesc::bs()->write_ref_field_pre(p, v); 594 } 595 596 template <class T> inline void oop_store(T* p, oop v) { 597 if (always_do_update_barrier) { 598 oop_store((volatile T*)p, v); 599 } else { 600 update_barrier_set_pre(p, v); 601 oopDesc::encode_store_heap_oop(p, v); 602 // always_do_update_barrier == false => 603 // Either we are at a safepoint (in GC) or CMS is not used. In both 604 // cases it's unnecessary to mark the card as dirty with release sematics. 605 update_barrier_set((void*)p, v, false /* release */); // cast away type 606 } 607 } 608 609 template <class T> inline void oop_store(volatile T* p, oop v) { 610 update_barrier_set_pre((T*)p, v); // cast away volatile 611 // Used by release_obj_field_put, so use release_store_ptr. 612 oopDesc::release_encode_store_heap_oop(p, v); 613 // When using CMS we must mark the card corresponding to p as dirty 614 // with release sematics to prevent that CMS sees the dirty card but 615 // not the new value v at p due to reordering of the two 616 // stores. Note that CMS has a concurrent precleaning phase, where 617 // it reads the card table while the Java threads are running. 618 update_barrier_set((void*)p, v, true /* release */); // cast away type 619 } 620 621 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops 622 // (without having to remember the function name this calls). 623 inline void oop_store_raw(HeapWord* addr, oop value) { 624 if (UseCompressedOops) { 625 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); 626 } else { 627 oopDesc::encode_store_heap_oop((oop*)addr, value); 628 } 629 } 630 631 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, 632 volatile HeapWord *dest, 633 oop compare_value, 634 bool prebarrier) { 635 if (UseCompressedOops) { 636 if (prebarrier) { 637 update_barrier_set_pre((narrowOop*)dest, exchange_value); 638 } 639 // encode exchange and compare value from oop to T 640 narrowOop val = encode_heap_oop(exchange_value); 641 narrowOop cmp = encode_heap_oop(compare_value); 642 643 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); 644 // decode old from T to oop 645 return decode_heap_oop(old); 646 } else { 647 if (prebarrier) { 648 update_barrier_set_pre((oop*)dest, exchange_value); 649 } 650 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); 651 } 652 } 653 654 // Used only for markSweep, scavenging 655 inline bool oopDesc::is_gc_marked() const { 656 return mark()->is_marked(); 657 } 658 659 inline bool oopDesc::is_locked() const { 660 return mark()->is_locked(); 661 } 662 663 inline bool oopDesc::is_unlocked() const { 664 return mark()->is_unlocked(); 665 } 666 667 inline bool oopDesc::has_bias_pattern() const { 668 return mark()->has_bias_pattern(); 669 } 670 671 672 // used only for asserts 673 inline bool oopDesc::is_oop(bool ignore_mark_word) const { 674 oop obj = (oop) this; 675 if (!check_obj_alignment(obj)) return false; 676 if (!Universe::heap()->is_in_reserved(obj)) return false; 677 // obj is aligned and accessible in heap 678 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false; 679 680 // Header verification: the mark is typically non-NULL. If we're 681 // at a safepoint, it must not be null. 682 // Outside of a safepoint, the header could be changing (for example, 683 // another thread could be inflating a lock on this object). 684 if (ignore_mark_word) { 685 return true; 686 } 687 if (mark() != NULL) { 688 return true; 689 } 690 return !SafepointSynchronize::is_at_safepoint(); 691 } 692 693 694 // used only for asserts 695 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const { 696 return this == NULL ? true : is_oop(ignore_mark_word); 697 } 698 699 #ifndef PRODUCT 700 // used only for asserts 701 inline bool oopDesc::is_unlocked_oop() const { 702 if (!Universe::heap()->is_in_reserved(this)) return false; 703 return mark()->is_unlocked(); 704 } 705 #endif // PRODUCT 706 707 inline bool oopDesc::is_scavengable() const { 708 return Universe::heap()->is_scavengable(this); 709 } 710 711 // Used by scavengers 712 inline bool oopDesc::is_forwarded() const { 713 // The extra heap check is needed since the obj might be locked, in which case the 714 // mark would point to a stack location and have the sentinel bit cleared 715 return mark()->is_marked(); 716 } 717 718 // Used by scavengers 719 inline void oopDesc::forward_to(oop p) { 720 assert(check_obj_alignment(p), 721 "forwarding to something not aligned"); 722 assert(Universe::heap()->is_in_reserved(p), 723 "forwarding to something not in heap"); 724 markOop m = markOopDesc::encode_pointer_as_mark(p); 725 assert(m->decode_pointer() == p, "encoding must be reversable"); 726 set_mark(m); 727 } 728 729 // Used by parallel scavengers 730 inline bool oopDesc::cas_forward_to(oop p, markOop compare) { 731 assert(check_obj_alignment(p), 732 "forwarding to something not aligned"); 733 assert(Universe::heap()->is_in_reserved(p), 734 "forwarding to something not in heap"); 735 markOop m = markOopDesc::encode_pointer_as_mark(p); 736 assert(m->decode_pointer() == p, "encoding must be reversable"); 737 return cas_set_mark(m, compare) == compare; 738 } 739 740 #if INCLUDE_ALL_GCS 741 inline oop oopDesc::forward_to_atomic(oop p) { 742 markOop oldMark = mark(); 743 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p); 744 markOop curMark; 745 746 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable"); 747 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this."); 748 749 while (!oldMark->is_marked()) { 750 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark); 751 assert(is_forwarded(), "object should have been forwarded"); 752 if (curMark == oldMark) { 753 return NULL; 754 } 755 // If the CAS was unsuccessful then curMark->is_marked() 756 // should return true as another thread has CAS'd in another 757 // forwarding pointer. 758 oldMark = curMark; 759 } 760 return forwardee(); 761 } 762 #endif 763 764 // Note that the forwardee is not the same thing as the displaced_mark. 765 // The forwardee is used when copying during scavenge and mark-sweep. 766 // It does need to clear the low two locking- and GC-related bits. 767 inline oop oopDesc::forwardee() const { 768 return (oop) mark()->decode_pointer(); 769 } 770 771 inline bool oopDesc::has_displaced_mark() const { 772 return mark()->has_displaced_mark_helper(); 773 } 774 775 inline markOop oopDesc::displaced_mark() const { 776 return mark()->displaced_mark_helper(); 777 } 778 779 inline void oopDesc::set_displaced_mark(markOop m) { 780 mark()->set_displaced_mark_helper(m); 781 } 782 783 // The following method needs to be MT safe. 784 inline uint oopDesc::age() const { 785 assert(!is_forwarded(), "Attempt to read age from forwarded mark"); 786 if (has_displaced_mark()) { 787 return displaced_mark()->age(); 788 } else { 789 return mark()->age(); 790 } 791 } 792 793 inline void oopDesc::incr_age() { 794 assert(!is_forwarded(), "Attempt to increment age of forwarded mark"); 795 if (has_displaced_mark()) { 796 set_displaced_mark(displaced_mark()->incr_age()); 797 } else { 798 set_mark(mark()->incr_age()); 799 } 800 } 801 802 803 inline intptr_t oopDesc::identity_hash() { 804 // Fast case; if the object is unlocked and the hash value is set, no locking is needed 805 // Note: The mark must be read into local variable to avoid concurrent updates. 806 markOop mrk = mark(); 807 if (mrk->is_unlocked() && !mrk->has_no_hash()) { 808 return mrk->hash(); 809 } else if (mrk->is_marked()) { 810 return mrk->hash(); 811 } else { 812 return slow_identity_hash(); 813 } 814 } 815 816 inline int oopDesc::ms_adjust_pointers() { 817 debug_only(int check_size = size()); 818 int s = klass()->oop_ms_adjust_pointers(this); 819 assert(s == check_size, "should be the same"); 820 return s; 821 } 822 823 #if INCLUDE_ALL_GCS 824 inline void oopDesc::pc_follow_contents(ParCompactionManager* cm) { 825 klass()->oop_pc_follow_contents(this, cm); 826 } 827 828 inline void oopDesc::pc_update_contents() { 829 Klass* k = klass(); 830 if (!k->oop_is_typeArray()) { 831 // It might contain oops beyond the header, so take the virtual call. 832 k->oop_pc_update_pointers(this); 833 } 834 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 835 } 836 837 inline void oopDesc::ps_push_contents(PSPromotionManager* pm) { 838 Klass* k = klass(); 839 if (!k->oop_is_typeArray()) { 840 // It might contain oops beyond the header, so take the virtual call. 841 k->oop_ps_push_contents(this, pm); 842 } 843 // Else skip it. The TypeArrayKlass in the header never needs scavenging. 844 } 845 #endif 846 847 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 848 \ 849 inline void oopDesc::oop_iterate(OopClosureType* blk) { \ 850 klass()->oop_oop_iterate##nv_suffix(this, blk); \ 851 } \ 852 \ 853 inline void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ 854 klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 855 } 856 857 #define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 858 \ 859 inline int oopDesc::oop_iterate_size(OopClosureType* blk) { \ 860 Klass* k = klass(); \ 861 int size = size_given_klass(k); \ 862 k->oop_oop_iterate##nv_suffix(this, blk); \ 863 return size; \ 864 } \ 865 \ 866 inline int oopDesc::oop_iterate_size(OopClosureType* blk, \ 867 MemRegion mr) { \ 868 Klass* k = klass(); \ 869 int size = size_given_klass(k); \ 870 k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ 871 return size; \ 872 } 873 874 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) { 875 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all 876 // the do_oop calls, but turns off all other features in ExtendedOopClosure. 877 NoHeaderExtendedOopClosure cl(blk); 878 return oop_iterate_size(&cl); 879 } 880 881 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { 882 NoHeaderExtendedOopClosure cl(blk); 883 return oop_iterate_size(&cl, mr); 884 } 885 886 #if INCLUDE_ALL_GCS 887 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 888 \ 889 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ 890 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ 891 } 892 #else 893 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 894 #endif 895 896 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ 897 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 898 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \ 899 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) 900 901 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) 902 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) 903 904 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP