--- old/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-03-22 16:37:28.428886028 +0100 +++ new/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-03-22 16:37:28.228886034 +0100 @@ -35,8 +35,9 @@ #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_aarch64.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" #include "opto/compile.hpp" #include "opto/intrinsicnode.hpp" #include "opto/node.hpp" @@ -46,7 +47,6 @@ #include "runtime/jniHandles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.hpp" - #if INCLUDE_ALL_GCS #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CardTable.hpp" @@ -173,7 +173,7 @@ // instruction. if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { // Move narrow OOP - narrowOop n = oopDesc::encode_heap_oop((oop)o); + narrowOop n = CompressedOops::encode((oop)o); Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); instructions = 2; @@ -3712,7 +3712,7 @@ } } -// Algorithm must match oop.inline.hpp encode_heap_oop. +// Algorithm must match CompressedOops::encode. void MacroAssembler::encode_heap_oop(Register d, Register s) { #ifdef ASSERT verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); --- old/src/hotspot/cpu/arm/nativeInst_arm_64.cpp 2018-03-22 16:37:28.840886013 +0100 +++ new/src/hotspot/cpu/arm/nativeInst_arm_64.cpp 2018-03-22 16:37:28.644886020 +0100 @@ -27,8 +27,9 @@ #include "code/codeCache.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_arm.hpp" +#include "compressedOops.inline.hpp" #include "oops/klass.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" #include "runtime/handles.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" @@ -105,7 +106,7 @@ uintptr_t nx = 0; int val_size = 32; if (oop_addr != NULL) { - narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr); + narrowOop encoded_oop = CompressedOops::encode(*oop_addr); nx = encoded_oop; } else if (metadata_addr != NULL) { assert((*metadata_addr)->is_klass(), "expected Klass"); @@ -240,4 +241,3 @@ assert(NativeCall::is_call_before(return_address), "must be"); return nativeCall_at(call_for(return_address)); } - --- old/src/hotspot/cpu/arm/relocInfo_arm.cpp 2018-03-22 16:37:29.160886002 +0100 +++ new/src/hotspot/cpu/arm/relocInfo_arm.cpp 2018-03-22 16:37:28.956886009 +0100 @@ -27,7 +27,8 @@ #include "assembler_arm.inline.hpp" #include "code/relocInfo.hpp" #include "nativeInst_arm.hpp" -#include "oops/oop.inline.hpp" +#include "compressedOops.inline.hpp" +#include "oops/oop.hpp" #include "runtime/safepoint.hpp" void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { @@ -40,7 +41,7 @@ uintptr_t d = ni->data(); guarantee((d >> 32) == 0, "not narrow oop"); narrowOop no = d; - oop o = oopDesc::decode_heap_oop(no); + oop o = CompressedOops::decode(no); guarantee(cast_from_oop(o) == (intptr_t)x, "instructions must match"); } else { ni->set_data((intptr_t)x); --- old/src/hotspot/cpu/ppc/nativeInst_ppc.cpp 2018-03-22 16:37:29.440885992 +0100 +++ new/src/hotspot/cpu/ppc/nativeInst_ppc.cpp 2018-03-22 16:37:29.244885999 +0100 @@ -27,7 +27,8 @@ #include "asm/macroAssembler.inline.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" -#include "oops/oop.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.hpp" #include "runtime/handles.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" @@ -194,7 +195,7 @@ CodeBlob* cb = CodeCache::find_blob_unsafe(addr); if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) { narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin()); - return cast_from_oop(oopDesc::decode_heap_oop(no)); + return cast_from_oop(CompressedOops::decode(no)); } else { assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool"); @@ -415,4 +416,3 @@ *(address*)(ctable + destination_toc_offset()) = new_destination; } - --- old/src/hotspot/cpu/ppc/relocInfo_ppc.cpp 2018-03-22 16:37:29.736885982 +0100 +++ new/src/hotspot/cpu/ppc/relocInfo_ppc.cpp 2018-03-22 16:37:29.540885989 +0100 @@ -27,8 +27,9 @@ #include "asm/assembler.inline.hpp" #include "code/relocInfo.hpp" #include "nativeInst_ppc.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" #include "runtime/safepoint.hpp" void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { @@ -57,7 +58,7 @@ assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "how to encode else?"); narrowOop no = (type() == relocInfo::oop_type) ? - oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x); + CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x); nativeMovConstReg_at(addr())->set_narrow_oop(no, code()); } } else { --- old/src/hotspot/cpu/s390/macroAssembler_s390.cpp 2018-03-22 16:37:30.052885971 +0100 +++ new/src/hotspot/cpu/s390/macroAssembler_s390.cpp 2018-03-22 16:37:29.844885978 +0100 @@ -33,6 +33,7 @@ #include "gc/shared/cardTableBarrierSet.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "opto/compile.hpp" #include "opto/intrinsicnode.hpp" @@ -1286,7 +1287,7 @@ int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { assert(UseCompressedOops, "Can only patch compressed oops"); - narrowOop no = oopDesc::encode_heap_oop(o); + narrowOop no = CompressedOops::encode(o); return patch_load_const_32to64(pos, no); } @@ -1304,7 +1305,7 @@ int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { assert(UseCompressedOops, "Can only patch compressed oops"); - narrowOop no = oopDesc::encode_heap_oop(o); + narrowOop no = CompressedOops::encode(o); return patch_compare_immediate_32(pos, no); } --- old/src/hotspot/cpu/sparc/relocInfo_sparc.cpp 2018-03-22 16:37:30.488885956 +0100 +++ new/src/hotspot/cpu/sparc/relocInfo_sparc.cpp 2018-03-22 16:37:30.288885963 +0100 @@ -26,8 +26,9 @@ #include "asm/assembler.hpp" #include "code/relocInfo.hpp" #include "nativeInst_sparc.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" #include "runtime/safepoint.hpp" void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { @@ -97,7 +98,7 @@ guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); if (format() != 0) { assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case"); - jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x); + jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x); inst &= ~Assembler::hi22(-1); inst |= Assembler::hi22((intptr_t)np); if (verify_only) { --- old/src/hotspot/cpu/x86/relocInfo_x86.cpp 2018-03-22 16:37:30.808885945 +0100 +++ new/src/hotspot/cpu/x86/relocInfo_x86.cpp 2018-03-22 16:37:30.612885951 +0100 @@ -26,6 +26,7 @@ #include "asm/macroAssembler.hpp" #include "code/relocInfo.hpp" #include "nativeInst_x86.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" @@ -51,9 +52,9 @@ // both compressed oops and compressed classes look the same if (Universe::heap()->is_in_reserved((oop)x)) { if (verify_only) { - guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match"); + guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match"); } else { - *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); + *(int32_t*) disp = CompressedOops::encode((oop)x); } } else { if (verify_only) { --- old/src/hotspot/share/classfile/compactHashtable.cpp 2018-03-22 16:37:31.108885934 +0100 +++ new/src/hotspot/share/classfile/compactHashtable.cpp 2018-03-22 16:37:30.908885941 +0100 @@ -29,6 +29,7 @@ #include "logging/logMessage.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" +#include "oops/compressedOops.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/numberSeq.hpp" #include @@ -182,7 +183,7 @@ } void CompactStringTableWriter::add(unsigned int hash, oop string) { - CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string)); + CompactHashtableWriter::add(hash, CompressedOops::encode(string)); } void CompactSymbolTableWriter::dump(CompactHashtable *cht) { --- old/src/hotspot/share/classfile/compactHashtable.inline.hpp 2018-03-22 16:37:31.420885923 +0100 +++ new/src/hotspot/share/classfile/compactHashtable.inline.hpp 2018-03-22 16:37:31.224885930 +0100 @@ -26,8 +26,10 @@ #define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP #include "classfile/compactHashtable.hpp" +#include "classfile/javaClasses.hpp" #include "memory/allocation.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.hpp" template inline Symbol* CompactHashtable::decode_entry(CompactHashtable* const t, @@ -45,7 +47,7 @@ inline oop CompactHashtable::decode_entry(CompactHashtable* const t, u4 offset, const char* name, int len) { narrowOop obj = (narrowOop)offset; - oop string = oopDesc::decode_heap_oop(obj); + oop string = CompressedOops::decode(obj); if (java_lang_String::equals(string, (jchar*)name, len)) { return string; } --- old/src/hotspot/share/classfile/javaClasses.cpp 2018-03-22 16:37:31.708885913 +0100 +++ new/src/hotspot/share/classfile/javaClasses.cpp 2018-03-22 16:37:31.508885920 +0100 @@ -3504,7 +3504,7 @@ // Support for java_lang_ref_Reference bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) { - assert(!oopDesc::is_null(obj), "sanity"); + assert(obj != NULL, "sanity"); if (offset != java_lang_ref_Reference::referent_offset) { return false; } @@ -4131,7 +4131,7 @@ bool java_lang_System::has_security_manager() { InstanceKlass* ik = SystemDictionary::System_klass(); oop base = ik->static_field_base_raw(); - return !oopDesc::is_null(base->obj_field(static_security_offset)); + return base->obj_field(static_security_offset) != NULL; } int java_lang_Class::_klass_offset; --- old/src/hotspot/share/classfile/systemDictionary.cpp 2018-03-22 16:37:32.100885900 +0100 +++ new/src/hotspot/share/classfile/systemDictionary.cpp 2018-03-22 16:37:31.900885907 +0100 @@ -53,6 +53,7 @@ #include "memory/metaspaceClosure.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/klass.inline.hpp" @@ -1829,7 +1830,7 @@ BoolObjectClosure* _is_alive; template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live"); } @@ -2699,7 +2700,7 @@ mirror = ss.as_java_mirror(class_loader, protection_domain, SignatureStream::NCDFError, CHECK_(empty)); } - assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string()); + assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string()); if (ss.at_return_type()) rt = Handle(THREAD, mirror); else --- old/src/hotspot/share/code/dependencies.cpp 2018-03-22 16:37:32.456885887 +0100 +++ new/src/hotspot/share/code/dependencies.cpp 2018-03-22 16:37:32.256885894 +0100 @@ -1812,8 +1812,8 @@ } Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) { - assert(!oopDesc::is_null(call_site), "sanity"); - assert(!oopDesc::is_null(method_handle), "sanity"); + assert(call_site != NULL, "sanity"); + assert(method_handle != NULL, "sanity"); assert(call_site->is_a(SystemDictionary::CallSite_klass()), "sanity"); if (changes == NULL) { --- old/src/hotspot/share/code/relocInfo.cpp 2018-03-22 16:37:32.796885875 +0100 +++ new/src/hotspot/share/code/relocInfo.cpp 2018-03-22 16:37:32.596885882 +0100 @@ -28,6 +28,7 @@ #include "code/nmethod.hpp" #include "code/relocInfo.hpp" #include "memory/resourceArea.hpp" +#include "oops/compressedOops.inline.hpp" #include "runtime/stubCodeGenerator.hpp" #include "utilities/copy.hpp" #include "oops/oop.inline.hpp" @@ -307,7 +308,7 @@ void Relocation::const_set_data_value(address x) { #ifdef _LP64 if (format() == relocInfo::narrow_oop_in_const) { - *(narrowOop*)addr() = oopDesc::encode_heap_oop((oop) x); + *(narrowOop*)addr() = CompressedOops::encode((oop) x); } else { #endif *(address*)addr() = x; @@ -319,7 +320,7 @@ void Relocation::const_verify_data_value(address x) { #ifdef _LP64 if (format() == relocInfo::narrow_oop_in_const) { - guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree"); + guarantee(*(narrowOop*)addr() == CompressedOops::encode((oop) x), "must agree"); } else { #endif guarantee(*(address*)addr() == x, "must agree"); --- old/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp 2018-03-22 16:37:33.136885864 +0100 +++ new/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp 2018-03-22 16:37:32.932885871 +0100 @@ -28,6 +28,8 @@ #include "gc/cms/cmsOopClosures.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/shared/taskqueue.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" // MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated, @@ -45,13 +47,13 @@ } // Decode the oop and call do_oop on it. -#define DO_OOP_WORK_IMPL(cls) \ - template void cls::do_oop_work(T* p) { \ - T heap_oop = oopDesc::load_heap_oop(p); \ - if (!oopDesc::is_null(heap_oop)) { \ - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ - do_oop(obj); \ - } \ +#define DO_OOP_WORK_IMPL(cls) \ + template void cls::do_oop_work(T* p) { \ + T heap_oop = RawAccess<>::oop_load(p); \ + if (!CompressedOops::is_null(heap_oop)) { \ + oop obj = CompressedOops::decode_not_null(heap_oop); \ + do_oop(obj); \ + } \ } #define DO_OOP_WORK_NV_IMPL(cls) \ --- old/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2018-03-22 16:37:33.440885853 +0100 +++ new/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2018-03-22 16:37:33.244885860 +0100 @@ -37,6 +37,8 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" @@ -2250,9 +2252,9 @@ } template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); do_oop(p, obj); } } --- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp 2018-03-22 16:37:33.784885841 +0100 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp 2018-03-22 16:37:33.588885848 +0100 @@ -38,6 +38,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/virtualspace.hpp" +#include "oops/access.inline.hpp" #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" #include "utilities/bitMap.hpp" @@ -1320,7 +1321,7 @@ protected: void do_oop(oop p); template inline void do_oop_work(T *p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); do_oop(obj); } public: --- old/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-03-22 16:37:34.116885829 +0100 +++ new/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-03-22 16:37:33.916885836 +0100 @@ -51,6 +51,8 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" @@ -679,8 +681,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT { - assert(!oopDesc::is_null(*p), "expected non-null ref"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // We never expect to see a null reference being processed // as a weak reference. assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); @@ -690,7 +691,7 @@ _par_cl->do_oop_nv(p); if (CMSHeap::heap()->is_in_reserved(p)) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p);; _rs->write_ref_field_gc_par(p, obj); } } @@ -706,8 +707,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT { - assert(!oopDesc::is_null(*p), "expected non-null ref"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // We never expect to see a null reference being processed // as a weak reference. assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); @@ -717,7 +717,7 @@ _cl->do_oop_nv(p); if (CMSHeap::heap()->is_in_reserved(p)) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); _rs->write_ref_field_gc_par(p, obj); } } @@ -726,15 +726,15 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } template void ScanClosureWithParBarrier::do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->DefNewGeneration::copy_to_survivor_space(obj); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess<>::oop_store(p, new_obj); } if (_gc_barrier) { // If p points to a younger generation, mark the card. --- old/src/hotspot/share/gc/cms/parOopClosures.inline.hpp 2018-03-22 16:37:34.460885817 +0100 +++ new/src/hotspot/share/gc/cms/parOopClosures.inline.hpp 2018-03-22 16:37:34.256885825 +0100 @@ -32,10 +32,11 @@ #include "gc/shared/genOopClosures.inline.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" template inline void ParScanWeakRefClosure::do_oop_work(T* p) { - assert (!oopDesc::is_null(*p), "null weak reference?"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // weak references are sometimes scanned twice; must check // that to-space doesn't already contain this object if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) { @@ -51,7 +52,7 @@ new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m); } - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess<>::oop_store(p, new_obj); } } @@ -60,8 +61,7 @@ template inline void ParScanClosure::par_do_barrier(T* p) { assert(generation()->is_in_reserved(p), "expected ref in generation"); - assert(!oopDesc::is_null(*p), "expected non-null object"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // If p points to a younger generation, mark the card. if ((HeapWord*)obj < gen_boundary()) { rs()->write_ref_field_gc_par(p, obj); @@ -77,9 +77,9 @@ && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier), "The gen must be right, and we must be doing the barrier " "in older generations."); - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { #ifndef PRODUCT if (_g->to()->is_in_reserved(obj)) { @@ -111,14 +111,14 @@ oop new_obj; if (m->is_marked()) { // Contains forwarding pointer. new_obj = ParNewGeneration::real_forwardee(obj); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", "forwarded ", new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size()); } else { size_t obj_sz = obj->size_given_klass(objK); new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); if (root_scan) { // This may have pushed an object. If we have a root // category with a lot of roots, can't let the queue get too --- old/src/hotspot/share/gc/cms/promotionInfo.cpp 2018-03-22 16:37:34.776885806 +0100 +++ new/src/hotspot/share/gc/cms/promotionInfo.cpp 2018-03-22 16:37:34.576885813 +0100 @@ -26,8 +26,9 @@ #include "gc/cms/compactibleFreeListSpace.hpp" #include "gc/cms/promotionInfo.hpp" #include "gc/shared/genOopClosures.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/markOop.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" ///////////////////////////////////////////////////////////////////////// //// PromotionInfo @@ -39,7 +40,7 @@ PromotedObject* res; if (UseCompressedOops) { // The next pointer is a compressed oop stored in the top 32 bits - res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next); + res = (PromotedObject*)CompressedOops::decode(_data._narrow_next); } else { res = (PromotedObject*)(_next & next_mask); } @@ -52,7 +53,7 @@ "or insufficient alignment of objects"); if (UseCompressedOops) { assert(_data._narrow_next == 0, "Overwrite?"); - _data._narrow_next = oopDesc::encode_heap_oop(oop(x)); + _data._narrow_next = CompressedOops::encode(oop(x)); } else { _next |= (intptr_t)x; } --- old/src/hotspot/share/gc/g1/g1BarrierSet.cpp 2018-03-22 16:37:35.100885795 +0100 +++ new/src/hotspot/share/gc/g1/g1BarrierSet.cpp 2018-03-22 16:37:34.900885802 +0100 @@ -30,6 +30,8 @@ #include "gc/g1/heapRegion.hpp" #include "gc/g1/satbMarkQueue.hpp" #include "logging/log.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/thread.inline.hpp" @@ -77,9 +79,9 @@ if (!JavaThread::satb_mark_queue_set().is_active()) return; T* elem_ptr = dst; for (size_t i = 0; i < count; i++, elem_ptr++) { - T heap_oop = oopDesc::load_heap_oop(elem_ptr); - if (!oopDesc::is_null(heap_oop)) { - enqueue(oopDesc::decode_heap_oop_not_null(heap_oop)); + T heap_oop = RawAccess<>::oop_load(elem_ptr); + if (!CompressedOops::is_null(heap_oop)) { + enqueue(CompressedOops::decode_not_null(heap_oop)); } } } --- old/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp 2018-03-22 16:37:35.404885785 +0100 +++ new/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp 2018-03-22 16:37:35.208885791 +0100 @@ -28,7 +28,9 @@ #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CardTable.hpp" #include "gc/shared/accessBarrierSupport.inline.hpp" -#include "oops/oop.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.hpp" template inline void G1BarrierSet::write_ref_field_pre(T* field) { @@ -38,8 +40,8 @@ } T heap_oop = RawAccess::oop_load(field); - if (!oopDesc::is_null(heap_oop)) { - enqueue(oopDesc::decode_heap_oop_not_null(heap_oop)); + if (!CompressedOops::is_null(heap_oop)) { + enqueue(CompressedOops::decode_not_null(heap_oop)); } } --- old/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp 2018-03-22 16:37:35.720885774 +0100 +++ new/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp 2018-03-22 16:37:35.520885780 +0100 @@ -28,14 +28,16 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegionRemSet.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" template void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) { _work->do_oop(p); - T oop_or_narrowoop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(oop_or_narrowoop)) { - oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); + T oop_or_narrowoop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(oop_or_narrowoop)) { + oop o = CompressedOops::decode_not_null(oop_or_narrowoop); HeapRegion* hr = _g1h->heap_region_containing(o); assert(!_g1h->is_in_cset(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset"); hr->add_strong_code_root(_nm); --- old/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp 2018-03-22 16:37:36.020885763 +0100 +++ new/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp 2018-03-22 16:37:35.820885770 +0100 @@ -30,6 +30,7 @@ #include "gc/g1/heapRegion.hpp" #include "memory/heap.hpp" #include "memory/iterator.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/hashtable.inline.hpp" #include "utilities/stack.inline.hpp" @@ -274,7 +275,7 @@ template void do_oop_work(T* p) { - if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) { + if (_hr->is_in(RawAccess<>::oop_load(p))) { _points_into = true; } } --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-03-22 16:37:36.320885753 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-03-22 16:37:36.120885760 +0100 @@ -77,6 +77,8 @@ #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/atomic.hpp" @@ -3810,7 +3812,7 @@ virtual void do_oop( oop* p) { do_oop_work(p); } template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); if (_g1h->is_in_cset_or_humongous(obj)) { // If the referent object has been forwarded (either copied @@ -5215,9 +5217,9 @@ nmethod* _nm; template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); assert(!hr->is_continues_humongous(), "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT @@ -5242,9 +5244,9 @@ nmethod* _nm; template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); assert(!hr->is_continues_humongous(), "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-03-22 16:37:36.740885738 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-03-22 16:37:36.540885745 +0100 @@ -53,6 +53,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" @@ -1368,7 +1369,7 @@ template void do_oop_work(T* p) { if (!_cm->has_overflown()) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); _task->deal_with_reference(obj); _ref_counter--; --- old/src/hotspot/share/gc/g1/g1EvacFailure.cpp 2018-03-22 16:37:37.112885725 +0100 +++ new/src/hotspot/share/gc/g1/g1EvacFailure.cpp 2018-03-22 16:37:36.912885732 +0100 @@ -34,6 +34,8 @@ #include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" class UpdateRSetDeferred : public ExtendedOopClosure { private: @@ -51,12 +53,12 @@ assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia"); assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region"); - T const o = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(o)) { + T const o = RawAccess<>::oop_load(p); + if (CompressedOops::is_null(o)) { return; } - if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) { + if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) { return; } size_t card_index = _ct->index_for(p); --- old/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp 2018-03-22 16:37:37.432885714 +0100 +++ new/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp 2018-03-22 16:37:37.228885721 +0100 @@ -31,6 +31,8 @@ #include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1StringDedupQueue.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "utilities/debug.hpp" inline bool G1FullGCMarker::mark_object(oop obj) { @@ -60,9 +62,9 @@ } template inline void G1FullGCMarker::mark_and_push(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if (mark_object(obj)) { _oop_stack.push(obj); assert(_bitmap->is_marked(obj), "Must be marked now - map self"); --- old/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp 2018-03-22 16:37:37.752885703 +0100 +++ new/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp 2018-03-22 16:37:37.548885710 +0100 @@ -28,6 +28,8 @@ #include "gc/g1/g1FullGCOopClosures.inline.hpp" #include "gc/g1/g1_specialized_oop_closures.hpp" #include "logging/logStream.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" void G1MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); @@ -99,10 +101,10 @@ } template void G1VerifyOopClosure::do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { _cc++; - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); bool failed = false; if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) { MutexLockerEx x(ParGCRareEvent_lock, --- old/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp 2018-03-22 16:37:38.056885692 +0100 +++ new/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp 2018-03-22 16:37:37.852885699 +0100 @@ -31,6 +31,8 @@ #include "gc/g1/g1FullGCOopClosures.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "memory/iterator.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" template inline void G1MarkAndPushClosure::do_oop_nv(T* p) { @@ -50,13 +52,13 @@ } template inline oop G1AdjustClosure::adjust_pointer(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(heap_oop)) { + T heap_oop = RawAccess<>::oop_load(p); + if (CompressedOops::is_null(heap_oop)) { // NULL reference, return NULL. return NULL; } - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in(obj), "should be in heap"); if (G1ArchiveAllocator::is_archive_object(obj)) { // Never forwarding archive objects, return current reference. @@ -76,7 +78,7 @@ // Forwarded, update and return new reference. assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space"); - oopDesc::encode_store_heap_oop_not_null(p, forwardee); + RawAccess::oop_store(p, forwardee); return forwardee; } --- old/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-03-22 16:37:38.348885682 +0100 +++ new/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-03-22 16:37:38.152885689 +0100 @@ -38,6 +38,8 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" @@ -58,9 +60,9 @@ bool failures() { return _failures; } template void do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { Log(gc, verify) log; log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); @@ -101,9 +103,9 @@ // in the code root list of the heap region containing the // object referenced by p. - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); // Now fetch the region containing the object HeapRegion* hr = _g1h->heap_region_containing(obj); @@ -186,7 +188,7 @@ void do_oop( oop *p) { do_oop_work(p); } template void do_oop_work(T *p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), "Dead object referenced by a not dead object"); } @@ -240,7 +242,7 @@ void do_oop( oop *p) { do_oop_work(p); } template void do_oop_work(T *p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); if (_hr->is_open_archive()) { guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj), --- old/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2018-03-22 16:37:38.684885670 +0100 +++ new/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2018-03-22 16:37:38.484885677 +0100 @@ -34,6 +34,8 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "memory/iterator.inline.hpp" #include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oopsHierarchy.hpp" #include "runtime/prefetch.inline.hpp" template @@ -49,9 +51,9 @@ // slightly paranoid test; I'm trying to catch potential // problems before we go into push_on_queue to know where the // problem is coming from - assert((obj == oopDesc::load_decode_heap_oop(p)) || + assert((obj == RawAccess<>::oop_load(p)) || (obj->is_forwarded() && - obj->forwardee() == oopDesc::load_decode_heap_oop(p)), + obj->forwardee() == RawAccess<>::oop_load(p)), "p should still be pointing to obj or to its forwardee"); _par_scan_state->push_on_queue(p); @@ -66,12 +68,12 @@ template inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); - if (oopDesc::is_null(heap_oop)) { + if (CompressedOops::is_null(heap_oop)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); const InCSetState state = _g1->in_cset_state(obj); if (state.is_in_cset()) { prefetch_and_push(p, obj); @@ -93,10 +95,10 @@ template inline void G1RootRegionScanClosure::do_oop_nv(T* p) { T heap_oop = RawAccess::oop_load(p); - if (oopDesc::is_null(heap_oop)) { + if (CompressedOops::is_null(heap_oop)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); _cm->mark_in_next_bitmap(obj); } @@ -124,10 +126,10 @@ template inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) { T o = RawAccess::oop_load(p); - if (oopDesc::is_null(o)) { + if (CompressedOops::is_null(o)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(o); + oop obj = CompressedOops::decode_not_null(o); check_obj_during_refinement(p, obj); @@ -150,11 +152,11 @@ template inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) { - T o = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(o)) { + T o = RawAccess<>::oop_load(p); + if (CompressedOops::is_null(o)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(o); + oop obj = CompressedOops::decode_not_null(o); check_obj_during_refinement(p, obj); @@ -176,11 +178,11 @@ template inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (oopDesc::is_null(heap_oop)) { + T heap_oop = RawAccess<>::oop_load(p); + if (CompressedOops::is_null(heap_oop)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); const InCSetState state = _g1->in_cset_state(obj); if (state.is_in_cset()) { @@ -219,13 +221,13 @@ template template void G1ParCopyClosure::do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); - if (oopDesc::is_null(heap_oop)) { + if (CompressedOops::is_null(heap_oop)) { return; } - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + oop obj = CompressedOops::decode_not_null(heap_oop); assert(_worker_id == _par_scan_state->worker_id(), "sanity"); @@ -239,7 +241,7 @@ forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); - oopDesc::encode_store_heap_oop(p, forwardee); + RawAccess<>::oop_store(p, forwardee); if (do_mark_object != G1MarkNone && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly // mark it, the evacuation failure protocol will do so. --- old/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp 2018-03-22 16:37:39.020885659 +0100 +++ new/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp 2018-03-22 16:37:38.816885666 +0100 @@ -33,6 +33,7 @@ #include "gc/shared/gcTrace.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "memory/allocation.inline.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" @@ -104,7 +105,7 @@ assert(ref != NULL, "invariant"); assert(UseCompressedOops, "sanity"); assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref)); - oop p = oopDesc::load_decode_heap_oop(ref); + oop p = RawAccess<>::oop_load(ref); assert(_g1h->is_in_g1_reserved(p), "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); return true; @@ -118,7 +119,7 @@ assert(_g1h->is_in_cset(p), "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); } else { - oop p = oopDesc::load_decode_heap_oop(ref); + oop p = RawAccess<>::oop_load(ref); assert(_g1h->is_in_g1_reserved(p), "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); } --- old/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp 2018-03-22 16:37:39.336885648 +0100 +++ new/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp 2018-03-22 16:37:39.140885654 +0100 @@ -27,12 +27,12 @@ #include "gc/g1/g1ParScanThreadState.hpp" #include "gc/g1/g1RemSet.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" template void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) { - assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), - "Reference should not be NULL here as such are never pushed to the task queue."); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + // Reference should not be NULL here as such are never pushed to the task queue. + oop obj = RawAccess::oop_load(p); // Although we never intentionally push references outside of the collection // set, due to (benign) races in the claim mechanism during RSet scanning more @@ -46,7 +46,7 @@ } else { obj = copy_to_survivor_space(in_cset_state, obj, m); } - oopDesc::encode_store_heap_oop(p, obj); + RawAccess<>::oop_store(p, obj); } else if (in_cset_state.is_humongous()) { _g1h->set_humongous_is_live(obj); } else { @@ -146,4 +146,3 @@ } #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP - --- old/src/hotspot/share/gc/g1/g1StringDedupThread.cpp 2018-03-22 16:37:39.652885637 +0100 +++ new/src/hotspot/share/gc/g1/g1StringDedupThread.cpp 2018-03-22 16:37:39.456885643 +0100 @@ -30,6 +30,7 @@ #include "gc/g1/g1StringDedupThread.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "logging/log.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" @@ -66,7 +67,7 @@ virtual void do_oop(oop* p) { ShouldNotReachHere(); } virtual void do_oop(narrowOop* p) { - oop java_string = oopDesc::load_decode_heap_oop(p); + oop java_string = RawAccess<>::oop_load(p); G1StringDedupTable::deduplicate(java_string, _stat); } }; --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2018-03-22 16:37:39.972885625 +0100 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2018-03-22 16:37:39.776885632 +0100 @@ -39,6 +39,8 @@ #include "logging/logStream.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/orderAccess.inline.hpp" @@ -325,9 +327,9 @@ bool _has_oops_in_region; template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); // Note: not all the oops embedded in the nmethod are in the // current region. We only look at those which are. @@ -506,10 +508,10 @@ template void verify_liveness(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); Log(gc, verify) log; - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); bool failed = false; if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { MutexLockerEx x(ParGCRareEvent_lock, @@ -562,10 +564,10 @@ template void verify_remembered_set(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); Log(gc, verify) log; - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); HeapRegion* to = _g1h->heap_region_containing(obj); if (from != NULL && to != NULL && --- old/src/hotspot/share/gc/parallel/psCardTable.cpp 2018-03-22 16:37:40.312885614 +0100 +++ new/src/hotspot/share/gc/parallel/psCardTable.cpp 2018-03-22 16:37:40.112885621 +0100 @@ -31,6 +31,7 @@ #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psTasks.hpp" #include "gc/parallel/psYoungGen.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/align.hpp" @@ -45,7 +46,7 @@ protected: template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); if (_young_gen->is_in_reserved(obj) && !_card_table->addr_is_marked_imprecise(p)) { // Don't overwrite the first missing card mark @@ -102,7 +103,7 @@ protected: template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); if (_young_gen->is_in_reserved(obj)) { assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); _card_table->set_card_newgen(p); --- old/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2018-03-22 16:37:40.608885603 +0100 +++ new/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2018-03-22 16:37:40.408885610 +0100 @@ -34,6 +34,8 @@ #include "gc/shared/taskqueue.inline.hpp" #include "logging/log.hpp" #include "memory/iterator.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" @@ -182,10 +184,10 @@ template static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) { T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj); - T heap_oop = oopDesc::load_heap_oop(referent_addr); + T heap_oop = RawAccess<>::oop_load(referent_addr); log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj)); - if (!oopDesc::is_null(heap_oop)) { - oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!CompressedOops::is_null(heap_oop)) { + oop referent = CompressedOops::decode_not_null(heap_oop); if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) && PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) { // reference already enqueued, referent will be traversed later @@ -201,8 +203,8 @@ T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); // Treat discovered as normal oop, if ref is not "active", // i.e. if next is non-NULL. - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T next_oop = RawAccess<>::oop_load(next_addr); + if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active" T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); cm->mark_and_push(discovered_addr); --- old/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp 2018-03-22 16:37:40.936885592 +0100 +++ new/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp 2018-03-22 16:37:40.736885599 +0100 @@ -29,7 +29,9 @@ #include "gc/parallel/psCompactionManager.hpp" #include "gc/parallel/psParallelCompact.inline.hpp" #include "gc/shared/taskqueue.inline.hpp" +#include "oops/access.inline.hpp" #include "oops/arrayOop.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" @@ -71,9 +73,9 @@ template inline void ParCompactionManager::mark_and_push(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) { --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-03-22 16:37:41.220885582 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-03-22 16:37:41.020885589 +0100 @@ -55,6 +55,7 @@ #include "gc/shared/weakProcessor.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/methodData.hpp" @@ -3078,11 +3079,11 @@ T* discovered_addr) { log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj)); log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL); + p2i(referent_addr), referent_addr ? p2i((oop)RawAccess<>::oop_load(referent_addr)) : NULL); log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL); + p2i(next_addr), next_addr ? p2i((oop)RawAccess<>::oop_load(next_addr)) : NULL); log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL); + p2i(discovered_addr), discovered_addr ? p2i((oop)RawAccess<>::oop_load(discovered_addr)) : NULL); } #endif --- old/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp 2018-03-22 16:37:41.556885570 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp 2018-03-22 16:37:41.360885577 +0100 @@ -29,6 +29,8 @@ #include "gc/parallel/parMarkBitMap.inline.hpp" #include "gc/parallel/psParallelCompact.hpp" #include "gc/shared/collectedHeap.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.hpp" #include "oops/oop.inline.hpp" @@ -105,9 +107,9 @@ template inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm); @@ -117,7 +119,7 @@ if (new_obj != NULL) { assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj), "should be in object space"); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); } } } --- old/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2018-03-22 16:37:41.868885559 +0100 +++ new/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2018-03-22 16:37:41.672885566 +0100 @@ -38,7 +38,9 @@ #include "memory/memRegion.hpp" #include "memory/padded.inline.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "oops/arrayOop.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" @@ -451,8 +453,8 @@ // Treat discovered as normal oop, if ref is not "active", // i.e. if next is non-NULL. T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj); - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T next_oop = RawAccess<>::oop_load(next_addr); + if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active" T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj); log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); if (PSScavenge::should_scavenge(discovered_addr)) { --- old/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp 2018-03-22 16:37:42.192885548 +0100 +++ new/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp 2018-03-22 16:37:41.992885555 +0100 @@ -33,6 +33,7 @@ #include "gc/parallel/psScavenge.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "logging/log.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" inline PSPromotionManager* PSPromotionManager::manager_array(uint index) { @@ -49,14 +50,14 @@ template inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { if (p != NULL) { // XXX: error if p != NULL here - oop o = oopDesc::load_decode_heap_oop_not_null(p); + oop o = RawAccess::oop_load(p); if (o->is_forwarded()) { o = o->forwardee(); // Card mark if (PSScavenge::is_obj_in_young(o)) { PSScavenge::card_table()->inline_write_ref_field_gc(p, o); } - oopDesc::encode_store_heap_oop_not_null(p, o); + RawAccess::oop_store(p, o); } else { push_depth(p); } @@ -278,7 +279,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) { assert(should_scavenge(p, true), "revisiting object?"); - oop o = oopDesc::load_decode_heap_oop_not_null(p); + oop o = RawAccess::oop_load(p); oop new_obj = o->is_forwarded() ? o->forwardee() : copy_to_survivor_space(o); @@ -291,7 +292,7 @@ new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); } - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); // We cannot mark without test, as some code passes us pointers // that are outside the heap. These pointers are either from roots --- old/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-03-22 16:37:42.480885538 +0100 +++ new/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-03-22 16:37:42.284885545 +0100 @@ -47,6 +47,8 @@ #include "gc/shared/weakProcessor.hpp" #include "memory/resourceArea.hpp" #include "logging/log.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/handles.inline.hpp" @@ -93,8 +95,7 @@ } template void do_oop_work(T* p) { - assert (!oopDesc::is_null(*p), "expected non-null ref"); - assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)), + assert (oopDesc::is_oop(RawAccess::oop_load(p)), "expected an oop while scanning weak refs"); // Weak refs may be visited more than once. @@ -738,7 +739,7 @@ void PSScavenge::set_young_generation_boundary(HeapWord* v) { _young_generation_boundary = v; if (UseCompressedOops) { - _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v); + _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v); } } --- old/src/hotspot/share/gc/parallel/psScavenge.inline.hpp 2018-03-22 16:37:42.772885528 +0100 +++ new/src/hotspot/share/gc/parallel/psScavenge.inline.hpp 2018-03-22 16:37:42.576885535 +0100 @@ -31,6 +31,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "utilities/globalDefinitions.hpp" inline void PSScavenge::save_to_space_top_before_gc() { @@ -39,14 +40,14 @@ } template inline bool PSScavenge::should_scavenge(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); return PSScavenge::is_obj_in_young(heap_oop); } template inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) { if (should_scavenge(p)) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // Skip objects copied to to_space since the scavenge started. HeapWord* const addr = (HeapWord*)obj; return addr < to_space_top_before_gc() || addr >= to_space->end(); @@ -107,7 +108,7 @@ } else { new_obj = _pm->copy_to_survivor_space(o); } - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess<>::oop_store(p, new_obj); if (PSScavenge::is_obj_in_young(new_obj)) { do_cld_barrier(); --- old/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp 2018-03-22 16:37:43.088885517 +0100 +++ new/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp 2018-03-22 16:37:42.888885524 +0100 @@ -30,6 +30,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/space.hpp" +#include "oops/access.inline.hpp" // Methods of protected closure types @@ -39,8 +40,7 @@ { // We never expect to see a null reference being processed // as a weak reference. - assert (!oopDesc::is_null(*p), "expected non-null ref"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); } #endif // ASSERT @@ -61,7 +61,7 @@ // dirty cards in the young gen are never scanned, so the // extra check probably isn't worthwhile. if (GenCollectedHeap::heap()->is_in_reserved(p)) { - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); _rs->inline_write_ref_field_gc(p, obj); } } @@ -72,8 +72,7 @@ { // We never expect to see a null reference being processed // as a weak reference. - assert (!oopDesc::is_null(*p), "expected non-null ref"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs"); } #endif // ASSERT @@ -83,7 +82,7 @@ // Optimized for Defnew generation if it's the youngest generation: // we set a younger_gen card if we have an older->youngest // generation pointer. - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) { _rs->inline_write_ref_field_gc(p, obj); } --- old/src/hotspot/share/gc/serial/markSweep.cpp 2018-03-22 16:37:43.404885506 +0100 +++ new/src/hotspot/share/gc/serial/markSweep.cpp 2018-03-22 16:37:43.204885513 +0100 @@ -30,6 +30,8 @@ #include "gc/shared/gcTrace.hpp" #include "gc/shared/specialized_oop_closures.hpp" #include "memory/iterator.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceClassLoaderKlass.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" @@ -73,9 +75,9 @@ } template inline void MarkSweep::mark_and_push(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if (!obj->mark()->is_marked()) { mark_object(obj); _marking_stack.push(obj); @@ -169,9 +171,9 @@ template inline void MarkSweep::follow_root(T* p) { assert(!Universe::heap()->is_in_reserved(p), "roots shouldn't be things within the heap"); - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if (!obj->mark()->is_marked()) { mark_object(obj); follow_object(obj); --- old/src/hotspot/share/gc/serial/markSweep.inline.hpp 2018-03-22 16:37:43.724885495 +0100 +++ new/src/hotspot/share/gc/serial/markSweep.inline.hpp 2018-03-22 16:37:43.524885502 +0100 @@ -29,6 +29,8 @@ #include "memory/metaspaceShared.hpp" #include "memory/universe.hpp" #include "oops/markOop.inline.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" inline int MarkSweep::adjust_pointers(oop obj) { @@ -36,9 +38,9 @@ } template inline void MarkSweep::adjust_pointer(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in(obj), "should be in heap"); oop new_obj = oop(obj->mark()->decode_pointer()); @@ -52,7 +54,7 @@ if (new_obj != NULL) { assert(Universe::heap()->is_in_reserved(new_obj), "should be in object space"); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); } } } --- old/src/hotspot/share/gc/shared/cardTableRS.cpp 2018-03-22 16:37:44.044885484 +0100 +++ new/src/hotspot/share/gc/shared/cardTableRS.cpp 2018-03-22 16:37:43.844885491 +0100 @@ -28,6 +28,7 @@ #include "gc/shared/generation.hpp" #include "gc/shared/space.inline.hpp" #include "memory/allocation.inline.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/java.hpp" @@ -351,7 +352,7 @@ "Error: jp " PTR_FORMAT " should be within " "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(jp), p2i(_begin), p2i(_end)); - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); guarantee(obj == NULL || (HeapWord*)obj >= _boundary, "pointer " PTR_FORMAT " at " PTR_FORMAT " on " "clean card crosses boundary" PTR_FORMAT, --- old/src/hotspot/share/gc/shared/genOopClosures.inline.hpp 2018-03-22 16:37:44.376885472 +0100 +++ new/src/hotspot/share/gc/shared/genOopClosures.inline.hpp 2018-03-22 16:37:44.176885479 +0100 @@ -31,6 +31,8 @@ #include "gc/shared/genOopClosures.hpp" #include "gc/shared/generation.hpp" #include "gc/shared/space.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" inline OopsInGenClosure::OopsInGenClosure(Generation* gen) : ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) { @@ -48,9 +50,9 @@ template inline void OopsInGenClosure::do_barrier(T* p) { assert(generation()->is_in_reserved(p), "expected ref in generation"); - T heap_oop = oopDesc::load_heap_oop(p); - assert(!oopDesc::is_null(heap_oop), "expected non-null oop"); - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + assert(!CompressedOops::is_null(heap_oop), "expected non-null oop"); + oop obj = CompressedOops::decode_not_null(heap_oop); // If p points to a younger generation, mark the card. if ((HeapWord*)obj < _gen_boundary) { _rs->inline_write_ref_field_gc(p, obj); @@ -59,9 +61,9 @@ template inline void OopsInGenClosure::par_do_barrier(T* p) { assert(generation()->is_in_reserved(p), "expected ref in generation"); - T heap_oop = oopDesc::load_heap_oop(p); - assert(!oopDesc::is_null(heap_oop), "expected non-null oop"); - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + assert(!CompressedOops::is_null(heap_oop), "expected non-null oop"); + oop obj = CompressedOops::decode_not_null(heap_oop); // If p points to a younger generation, mark the card. if ((HeapWord*)obj < gen_boundary()) { rs()->write_ref_field_gc_par(p, obj); @@ -78,15 +80,15 @@ // NOTE! Any changes made here should also be made // in FastScanClosure::do_oop_work() template inline void ScanClosure::do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); // Should we copy the obj? - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); } if (is_scanning_a_cld()) { @@ -104,15 +106,15 @@ // NOTE! Any changes made here should also be made // in ScanClosure::do_oop_work() template inline void FastScanClosure::do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); + T heap_oop = RawAccess<>::oop_load(p); // Should we copy the obj? - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); if (is_scanning_a_cld()) { do_cld_barrier(); } else if (_gc_barrier) { @@ -127,9 +129,9 @@ inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); } template void FilteringClosure::do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop obj = CompressedOops::decode_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { _cl->do_oop(p); } @@ -142,14 +144,13 @@ // Note similarity to ScanClosure; the difference is that // the barrier set is taken care of outside this closure. template inline void ScanWeakRefClosure::do_oop_work(T* p) { - assert(!oopDesc::is_null(*p), "null weak reference?"); - oop obj = oopDesc::load_decode_heap_oop_not_null(p); + oop obj = RawAccess::oop_load(p); // weak references are sometimes scanned twice; must check // that to-space doesn't already contain this object if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) { oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); + RawAccess::oop_store(p, new_obj); } } --- old/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp 2018-03-22 16:37:44.704885461 +0100 +++ new/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp 2018-03-22 16:37:44.500885468 +0100 @@ -27,6 +27,7 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shared/modRefBarrierSet.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.hpp" @@ -105,7 +106,7 @@ T* end = from + length; for (T* p = dst; from < end; from++, p++) { T element = *from; - if (bound->is_instanceof_or_null(element)) { + if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) { bs->template write_ref_field_pre(p); *p = element; } else { --- old/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp 2018-03-22 16:37:45.004885450 +0100 +++ new/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp 2018-03-22 16:37:44.804885457 +0100 @@ -26,17 +26,18 @@ #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP #include "gc/shared/referenceProcessor.hpp" -#include "oops/oop.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.hpp" oop DiscoveredList::head() const { - return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : + return UseCompressedOops ? CompressedOops::decode(_compressed_head) : _oop_head; } void DiscoveredList::set_head(oop o) { if (UseCompressedOops) { // Must compress the head ptr. - _compressed_head = oopDesc::encode_heap_oop(o); + _compressed_head = CompressedOops::encode(o); } else { _oop_head = o; } --- old/src/hotspot/share/gc/shared/space.hpp 2018-03-22 16:37:45.312885439 +0100 +++ new/src/hotspot/share/gc/shared/space.hpp 2018-03-22 16:37:45.112885446 +0100 @@ -145,6 +145,9 @@ bool is_in(const void* p) const { return used_region().contains(p); } + bool is_in(oop obj) const { + return is_in((void*)obj); + } // Returns true iff the given reserved memory of the space contains the // given address. --- old/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp 2018-03-22 16:37:45.636885428 +0100 +++ new/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp 2018-03-22 16:37:45.436885435 +0100 @@ -634,7 +634,7 @@ if (!compiled_code->is_a(HotSpotCompiledNmethod::klass())) { oop stubName = HotSpotCompiledCode::name(compiled_code_obj); - if (oopDesc::is_null(stubName)) { + if (stubName == NULL) { JVMCI_ERROR_OK("stub should have a name"); } char* name = strdup(java_lang_String::as_utf8_string(stubName)); --- old/src/hotspot/share/memory/filemap.cpp 2018-03-22 16:37:45.976885416 +0100 +++ new/src/hotspot/share/memory/filemap.cpp 2018-03-22 16:37:45.776885423 +0100 @@ -31,9 +31,6 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionaryShared.hpp" #include "classfile/altHashing.hpp" -#if INCLUDE_ALL_GCS -#include "gc/g1/g1CollectedHeap.hpp" -#endif #include "logging/log.hpp" #include "logging/logStream.hpp" #include "logging/logMessage.hpp" @@ -42,6 +39,7 @@ #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/objArrayOop.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" @@ -51,6 +49,9 @@ #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/defaultStream.hpp" +#if INCLUDE_ALL_GCS +#include "gc/g1/g1CollectedHeap.hpp" +#endif # include # include @@ -468,7 +469,7 @@ if (MetaspaceShared::is_heap_region(region)) { assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity"); if (base != NULL) { - si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base); + si->_addr._offset = (intx)CompressedOops::encode_not_null((oop)base); } else { si->_addr._offset = 0; } @@ -783,7 +784,7 @@ size_t used = si->_used; if (used > 0) { size_t size = used; - char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null( + char* requested_addr = (char*)((void*)CompressedOops::decode_not_null( (narrowOop)si->_addr._offset)); regions[region_num] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize); region_num ++; @@ -964,7 +965,7 @@ char* FileMapInfo::FileMapHeader::region_addr(int idx) { if (MetaspaceShared::is_heap_region(idx)) { return _space[idx]._used > 0 ? - (char*)((void*)oopDesc::decode_heap_oop_not_null((narrowOop)_space[idx]._addr._offset)) : NULL; + (char*)((void*)CompressedOops::decode_not_null((narrowOop)_space[idx]._addr._offset)) : NULL; } else { return _space[idx]._addr._base; } --- old/src/hotspot/share/memory/iterator.inline.hpp 2018-03-22 16:37:46.316885404 +0100 +++ new/src/hotspot/share/memory/iterator.inline.hpp 2018-03-22 16:37:46.116885411 +0100 @@ -27,6 +27,8 @@ #include "classfile/classLoaderData.hpp" #include "memory/iterator.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" @@ -52,9 +54,9 @@ template void ExtendedOopClosure::verify(T* p) { if (should_verify_oops()) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + T heap_oop = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(heap_oop)) { + oop o = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in_closed_subset(o), "should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o)); } --- old/src/hotspot/share/memory/metaspaceShared.cpp 2018-03-22 16:37:46.636885393 +0100 +++ new/src/hotspot/share/memory/metaspaceShared.cpp 2018-03-22 16:37:46.436885400 +0100 @@ -35,10 +35,6 @@ #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" #include "code/codeCache.hpp" -#if INCLUDE_ALL_GCS -#include "gc/g1/g1Allocator.inline.hpp" -#include "gc/g1/g1CollectedHeap.hpp" -#endif #include "gc/shared/gcLocker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodes.hpp" @@ -49,6 +45,7 @@ #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceRefKlass.hpp" @@ -66,6 +63,10 @@ #include "utilities/align.hpp" #include "utilities/defaultStream.hpp" #include "utilities/hashtable.inline.hpp" +#if INCLUDE_ALL_GCS +#include "gc/g1/g1Allocator.inline.hpp" +#include "gc/g1/g1CollectedHeap.hpp" +#endif ReservedSpace MetaspaceShared::_shared_rs; VirtualSpace MetaspaceShared::_shared_vs; @@ -844,7 +845,7 @@ assert(MetaspaceShared::is_heap_object_archiving_allowed(), "Archiving heap object is not allowed"); _dump_region->append_intptr_t( - (intptr_t)oopDesc::encode_heap_oop_not_null(*o)); + (intptr_t)CompressedOops::encode_not_null(*o)); } } @@ -1936,7 +1937,7 @@ "Archived heap object is not allowed"); assert(MetaspaceShared::open_archive_heap_region_mapped(), "Open archive heap region is not mapped"); - RootAccess::oop_store(p, oopDesc::decode_heap_oop_not_null(o)); + RootAccess::oop_store(p, CompressedOops::decode(o)); } } --- old/src/hotspot/share/oops/accessBackend.inline.hpp 2018-03-22 16:37:46.996885381 +0100 +++ new/src/hotspot/share/oops/accessBackend.inline.hpp 2018-03-22 16:37:46.796885388 +0100 @@ -27,7 +27,8 @@ #include "oops/access.hpp" #include "oops/accessBackend.hpp" -#include "oops/oop.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oopsHierarchy.hpp" template template @@ -35,9 +36,9 @@ AccessInternal::MustConvertCompressedOop::value, T>::type RawAccessBarrier::decode_internal(typename HeapOopType::type value) { if (HasDecorator::value) { - return oopDesc::decode_heap_oop_not_null(value); + return CompressedOops::decode_not_null(value); } else { - return oopDesc::decode_heap_oop(value); + return CompressedOops::decode(value); } } @@ -48,9 +49,9 @@ typename HeapOopType::type>::type RawAccessBarrier::encode_internal(T value) { if (HasDecorator::value) { - return oopDesc::encode_heap_oop_not_null(value); + return CompressedOops::encode_not_null(value); } else { - return oopDesc::encode_heap_oop(value); + return CompressedOops::encode(value); } } --- old/src/hotspot/share/oops/instanceKlass.cpp 2018-03-22 16:37:47.292885370 +0100 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2018-03-22 16:37:47.092885377 +0100 @@ -3210,7 +3210,7 @@ class VerifyFieldClosure: public OopClosure { protected: template void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); if (!oopDesc::is_oop_or_null(obj)) { tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj)); Universe::print_on(tty); --- old/src/hotspot/share/oops/instanceRefKlass.inline.hpp 2018-03-22 16:37:47.668885357 +0100 +++ new/src/hotspot/share/oops/instanceRefKlass.inline.hpp 2018-03-22 16:37:47.472885364 +0100 @@ -28,6 +28,8 @@ #include "classfile/javaClasses.inline.hpp" #include "gc/shared/referenceProcessor.hpp" #include "logging/log.hpp" +#include "oops/access.inline.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" @@ -63,9 +65,9 @@ bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) { ReferenceProcessor* rp = closure->ref_processor(); if (rp != NULL) { - T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr_raw(obj)); - if (!oopDesc::is_null(referent_oop)) { - oop referent = oopDesc::decode_heap_oop_not_null(referent_oop); + T referent_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::referent_addr_raw(obj)); + if (!CompressedOops::is_null(referent_oop)) { + oop referent = CompressedOops::decode_not_null(referent_oop); if (!referent->is_gc_marked()) { // Only try to discover if not yet marked. return rp->discover_reference(obj, type); @@ -86,8 +88,8 @@ do_referent(obj, closure, contains); // Treat discovered as normal oop, if ref is not "active" (next non-NULL). - T next_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr_raw(obj)); - if (!oopDesc::is_null(next_oop)) { + T next_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::next_addr_raw(obj)); + if (!CompressedOops::is_null(next_oop)) { do_discovered(obj, closure, contains); } @@ -195,11 +197,11 @@ log_develop_trace(gc, ref)("InstanceRefKlass %s for obj " PTR_FORMAT, s, p2i(obj)); log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL)); + p2i(referent_addr), p2i(referent_addr ? RawAccess<>::oop_load(referent_addr) : (oop)NULL)); log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL)); + p2i(next_addr), p2i(next_addr ? RawAccess<>::oop_load(next_addr) : (oop)NULL)); log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT, - p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL)); + p2i(discovered_addr), p2i(discovered_addr ? RawAccess<>::oop_load(discovered_addr) : (oop)NULL)); } #endif --- old/src/hotspot/share/oops/klass.cpp 2018-03-22 16:37:47.980885346 +0100 +++ new/src/hotspot/share/oops/klass.cpp 2018-03-22 16:37:47.780885353 +0100 @@ -35,6 +35,7 @@ #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" @@ -569,7 +570,7 @@ oop Klass::archived_java_mirror_raw() { assert(DumpSharedSpaces, "called only during runtime"); assert(has_raw_archived_mirror(), "must have raw archived mirror"); - return oopDesc::decode_heap_oop(_archived_mirror); + return CompressedOops::decode(_archived_mirror); } // Used at CDS runtime to get the archived mirror from shared class. Uses GC barrier. @@ -582,7 +583,7 @@ // No GC barrier void Klass::set_archived_java_mirror_raw(oop m) { assert(DumpSharedSpaces, "called only during runtime"); - _archived_mirror = oopDesc::encode_heap_oop(m); + _archived_mirror = CompressedOops::encode(m); } #endif // INCLUDE_CDS_JAVA_HEAP --- old/src/hotspot/share/oops/klass.hpp 2018-03-22 16:37:48.312885335 +0100 +++ new/src/hotspot/share/oops/klass.hpp 2018-03-22 16:37:48.112885342 +0100 @@ -447,10 +447,6 @@ } } - // Is an oop/narrowOop null or subtype of this Klass? - template - bool is_instanceof_or_null(T element); - bool search_secondary_supers(Klass* k) const; // Find LCA in class hierarchy --- old/src/hotspot/share/oops/klass.inline.hpp 2018-03-22 16:37:48.632885324 +0100 +++ new/src/hotspot/share/oops/klass.inline.hpp 2018-03-22 16:37:48.428885331 +0100 @@ -71,13 +71,4 @@ return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v); } -template -bool Klass::is_instanceof_or_null(T element) { - if (oopDesc::is_null(element)) { - return true; - } - oop obj = oopDesc::decode_heap_oop_not_null(element); - return obj->klass()->is_subtype_of(this); -} - #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP --- old/src/hotspot/share/oops/oop.cpp 2018-03-22 16:37:48.952885313 +0100 +++ new/src/hotspot/share/oops/oop.cpp 2018-03-22 16:37:48.748885320 +0100 @@ -26,6 +26,7 @@ #include "classfile/altHashing.hpp" #include "classfile/javaClasses.inline.hpp" #include "memory/resourceArea.hpp" +#include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "runtime/handles.inline.hpp" @@ -155,7 +156,7 @@ VerifyOopClosure VerifyOopClosure::verify_oop; template void VerifyOopClosure::do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj)); } --- old/src/hotspot/share/oops/oop.hpp 2018-03-22 16:37:49.264885302 +0100 +++ new/src/hotspot/share/oops/oop.hpp 2018-03-22 16:37:49.064885309 +0100 @@ -127,9 +127,6 @@ // Need this as public for garbage collection. template inline T* obj_field_addr_raw(int offset) const; - inline static bool is_null(oop obj) { return obj == NULL; } - inline static bool is_null(narrowOop obj) { return obj == 0; } - // Standard compare function returns negative value if o1 < o2 // 0 if o1 == o2 // positive value if o1 > o2 @@ -145,41 +142,6 @@ } } - // Decode an oop pointer from a narrowOop if compressed. - // These are overloaded for oop and narrowOop as are the other functions - // below so that they can be called in template functions. - static inline oop decode_heap_oop_not_null(oop v) { return v; } - static inline oop decode_heap_oop_not_null(narrowOop v); - static inline oop decode_heap_oop(oop v) { return v; } - static inline oop decode_heap_oop(narrowOop v); - - // Encode an oop pointer to a narrow oop. The or_null versions accept - // null oop pointer, others do not in order to eliminate the - // null checking branches. - static inline narrowOop encode_heap_oop_not_null(oop v); - static inline narrowOop encode_heap_oop(oop v); - - // Load an oop out of the Java heap as is without decoding. - // Called by GC to check for null before decoding. - static inline narrowOop load_heap_oop(narrowOop* p); - static inline oop load_heap_oop(oop* p); - - // Load an oop out of Java heap and decode it to an uncompressed oop. - static inline oop load_decode_heap_oop_not_null(narrowOop* p); - static inline oop load_decode_heap_oop_not_null(oop* p); - static inline oop load_decode_heap_oop(narrowOop* p); - static inline oop load_decode_heap_oop(oop* p); - - // Store already encoded heap oop into the heap. - static inline void store_heap_oop(narrowOop* p, narrowOop v); - static inline void store_heap_oop(oop* p, oop v); - - // Encode oop if UseCompressedOops and store into the heap. - static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v); - static inline void encode_store_heap_oop_not_null(oop* p, oop v); - static inline void encode_store_heap_oop(narrowOop* p, oop v); - static inline void encode_store_heap_oop(oop* p, oop v); - // Access to fields in a instanceOop through these methods. template oop obj_field_access(int offset) const; @@ -347,6 +309,8 @@ inline int oop_iterate_no_header(OopClosure* bk); inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr); + inline static bool is_instanceof_or_null(oop obj, Klass* klass); + // identity hash; returns the identity hash key (computes it if necessary) // NOTE with the introduction of UseBiasedLocking that identity_hash() might reach a // safepoint if called on a biased object. Calling code must be aware of that. --- old/src/hotspot/share/oops/oop.inline.hpp 2018-03-22 16:37:49.584885291 +0100 +++ new/src/hotspot/share/oops/oop.inline.hpp 2018-03-22 16:37:49.388885297 +0100 @@ -32,6 +32,7 @@ #include "oops/access.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" +#include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "oops/markOop.inline.hpp" #include "oops/oop.hpp" @@ -136,7 +137,7 @@ // This is only to be used during GC, for from-space objects, so no // barrier is needed. if (UseCompressedClassPointers) { - _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) + _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling) } else { _metadata._klass = (Klass*)(address)k; } @@ -145,7 +146,7 @@ oop oopDesc::list_ptr_from_klass() { // This is only to be used during GC, for from-space objects. if (UseCompressedClassPointers) { - return decode_heap_oop((narrowOop)_metadata._compressed_klass); + return CompressedOops::decode((narrowOop)_metadata._compressed_klass); } else { // Special case for GC return (oop)(address)_metadata._klass; @@ -239,83 +240,6 @@ template T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); } -// Functions for getting and setting oops within instance objects. -// If the oops are compressed, the type passed to these overloaded functions -// is narrowOop. All functions are overloaded so they can be called by -// template functions without conditionals (the compiler instantiates via -// the right type and inlines the appopriate code). - -// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit -// offset from the heap base. Saving the check for null can save instructions -// in inner GC loops so these are separated. - -inline bool check_obj_alignment(oop obj) { - return (cast_from_oop(obj) & MinObjAlignmentInBytesMask) == 0; -} - -oop oopDesc::decode_heap_oop_not_null(narrowOop v) { - assert(!is_null(v), "narrow oop value can never be zero"); - address base = Universe::narrow_oop_base(); - int shift = Universe::narrow_oop_shift(); - oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); - assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result)); - return result; -} - -oop oopDesc::decode_heap_oop(narrowOop v) { - return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); -} - -narrowOop oopDesc::encode_heap_oop_not_null(oop v) { - assert(!is_null(v), "oop value can never be zero"); - assert(check_obj_alignment(v), "Address not aligned"); - assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); - address base = Universe::narrow_oop_base(); - int shift = Universe::narrow_oop_shift(); - uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); - assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); - uint64_t result = pd >> shift; - assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); - assert(decode_heap_oop(result) == v, "reversibility"); - return (narrowOop)result; -} - -narrowOop oopDesc::encode_heap_oop(oop v) { - return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); -} - -narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } -oop oopDesc::load_heap_oop(oop* p) { return *p; } - -void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } -void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } - -// Load and decode an oop out of the Java heap into a wide oop. -oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { - return decode_heap_oop_not_null(load_heap_oop(p)); -} - -// Load and decode an oop out of the heap accepting null -oop oopDesc::load_decode_heap_oop(narrowOop* p) { - return decode_heap_oop(load_heap_oop(p)); -} - -oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } -oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } - -void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } -void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } - -// Encode and store a heap oop. -void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { - *p = encode_heap_oop_not_null(v); -} - -// Encode and store a heap oop allowing for null. -void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { - *p = encode_heap_oop(v); -} - template inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess::oop_load_at(as_oop(), offset); } inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); } @@ -525,6 +449,10 @@ ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) +bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) { + return obj == NULL || obj->klass()->is_subtype_of(klass); +} + intptr_t oopDesc::identity_hash() { // Fast case; if the object is unlocked and the hash value is set, no locking is needed // Note: The mark must be read into local variable to avoid concurrent updates. --- old/src/hotspot/share/oops/oopsHierarchy.hpp 2018-03-22 16:37:49.912885279 +0100 +++ new/src/hotspot/share/oops/oopsHierarchy.hpp 2018-03-22 16:37:49.712885286 +0100 @@ -192,6 +192,10 @@ return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o); } +inline bool check_obj_alignment(oop obj) { + return (cast_from_oop(obj) & MinObjAlignmentInBytesMask) == 0; +} + // The metadata hierarchy is separate from the oop hierarchy // class MetaspaceObj --- old/src/hotspot/share/prims/unsafe.cpp 2018-03-22 16:37:50.220885268 +0100 +++ new/src/hotspot/share/prims/unsafe.cpp 2018-03-22 16:37:50.020885275 +0100 @@ -209,7 +209,7 @@ } T get() { - if (oopDesc::is_null(_obj)) { + if (_obj == NULL) { GuardUnsafeAccess guard(_thread); T ret = RawAccess<>::load(addr()); return normalize_for_read(ret); @@ -220,7 +220,7 @@ } void put(T x) { - if (oopDesc::is_null(_obj)) { + if (_obj == NULL) { GuardUnsafeAccess guard(_thread); RawAccess<>::store(addr(), normalize_for_write(x)); } else { @@ -230,7 +230,7 @@ T get_volatile() { - if (oopDesc::is_null(_obj)) { + if (_obj == NULL) { GuardUnsafeAccess guard(_thread); volatile T ret = RawAccess::load(addr()); return normalize_for_read(ret); @@ -241,7 +241,7 @@ } void put_volatile(T x) { - if (oopDesc::is_null(_obj)) { + if (_obj == NULL) { GuardUnsafeAccess guard(_thread); RawAccess::store(addr(), normalize_for_write(x)); } else { @@ -871,7 +871,7 @@ UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { oop p = JNIHandles::resolve(obj); - if (oopDesc::is_null(p)) { + if (p == NULL) { volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); return RawAccess<>::atomic_cmpxchg(x, addr, e); } else { @@ -882,7 +882,7 @@ UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { oop p = JNIHandles::resolve(obj); - if (oopDesc::is_null(p)) { + if (p == NULL) { volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); return RawAccess<>::atomic_cmpxchg(x, addr, e); } else { @@ -902,7 +902,7 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { oop p = JNIHandles::resolve(obj); - if (oopDesc::is_null(p)) { + if (p == NULL) { volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; } else { @@ -913,7 +913,7 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { oop p = JNIHandles::resolve(obj); - if (oopDesc::is_null(p)) { + if (p == NULL) { volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); return RawAccess<>::atomic_cmpxchg(x, addr, e) == e; } else { --- old/src/hotspot/share/runtime/stackValue.cpp 2018-03-22 16:37:50.560885257 +0100 +++ new/src/hotspot/share/runtime/stackValue.cpp 2018-03-22 16:37:50.360885264 +0100 @@ -24,7 +24,8 @@ #include "precompiled.hpp" #include "code/debugInfo.hpp" -#include "oops/oop.inline.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/stackValue.hpp" @@ -103,7 +104,7 @@ value.noop = *(narrowOop*) value_addr; } // Decode narrowoop and wrap a handle around the oop - Handle h(Thread::current(), oopDesc::decode_heap_oop(value.noop)); + Handle h(Thread::current(), CompressedOops::decode(value.noop)); return new StackValue(h); } #endif --- old/src/hotspot/share/runtime/thread.cpp 2018-03-22 16:37:50.884885245 +0100 +++ new/src/hotspot/share/runtime/thread.cpp 2018-03-22 16:37:50.680885252 +0100 @@ -48,6 +48,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/access.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" @@ -3219,7 +3220,7 @@ class PrintAndVerifyOopClosure: public OopClosure { protected: template inline void do_oop_work(T* p) { - oop obj = oopDesc::load_decode_heap_oop(p); + oop obj = RawAccess<>::oop_load(p); if (obj == NULL) return; tty->print(INTPTR_FORMAT ": ", p2i(p)); if (oopDesc::is_oop_or_null(obj)) { --- /dev/null 2018-03-12 09:19:30.040007636 +0100 +++ new/src/hotspot/share/oops/compressedOops.inline.hpp 2018-03-22 16:37:51.080885238 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP +#define SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP + +#include "gc/shared/collectedHeap.hpp" +#include "memory/universe.hpp" +#include "oops/oop.hpp" + +// Functions for encoding and decoding compressed oops. +// If the oops are compressed, the type passed to these overloaded functions +// is narrowOop. All functions are overloaded so they can be called by +// template functions without conditionals (the compiler instantiates via +// the right type and inlines the appropriate code). + +// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit +// offset from the heap base. Saving the check for null can save instructions +// in inner GC loops so these are separated. + +namespace CompressedOops { + inline bool is_null(oop obj) { return obj == NULL; } + inline bool is_null(narrowOop obj) { return obj == 0; } + + inline oop decode_not_null(narrowOop v) { + assert(!is_null(v), "narrow oop value can never be zero"); + address base = Universe::narrow_oop_base(); + int shift = Universe::narrow_oop_shift(); + oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); + assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result)); + return result; + } + + inline oop decode(narrowOop v) { + return is_null(v) ? (oop)NULL : decode_not_null(v); + } + + inline narrowOop encode_not_null(oop v) { + assert(!is_null(v), "oop value can never be zero"); + assert(check_obj_alignment(v), "Address not aligned"); + assert(Universe::heap()->is_in_reserved(v), "Address not in heap"); + address base = Universe::narrow_oop_base(); + int shift = Universe::narrow_oop_shift(); + uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); + assert(OopEncodingHeapMax > pd, "change encoding max if new encoding"); + uint64_t result = pd >> shift; + assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow"); + assert(decode(result) == v, "reversibility"); + return (narrowOop)result; + } + + inline narrowOop encode(oop v) { + return is_null(v) ? (narrowOop)0 : encode_not_null(v); + } + + // No conversions needed for these overloads + inline oop decode_not_null(oop v) { return v; } + inline oop decode(oop v) { return v; } + inline narrowOop encode_not_null(narrowOop v) { return v; } + inline narrowOop encode(narrowOop v) { return v; } +} + +#endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP