src/share/vm/compiler/oopMap.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/share/vm/compiler/oopMap.cpp	Wed Sep 16 15:18:28 2015
--- new/src/share/vm/compiler/oopMap.cpp	Wed Sep 16 15:18:28 2015

*** 37,46 **** --- 37,49 ---- #include "c1/c1_Defs.hpp" #endif #ifdef COMPILER2 #include "opto/optoreg.hpp" #endif + #ifdef SPARC + #include "vmreg_sparc.inline.hpp" + #endif // OopMapStream OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) { _stream = new CompressedReadStream(oop_map->write_stream()->buffer());
*** 274,296 **** --- 277,304 ---- static DoNothingClosure do_nothing; static void add_derived_oop(oop* base, oop* derived) { #ifndef TIERED COMPILER1_PRESENT(ShouldNotReachHere();) + #if INCLUDE_JVMCI + if (UseJVMCICompiler) { + ShouldNotReachHere(); + } + #endif #endif // TIERED ! #ifdef COMPILER2 ! #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTable::add(derived, base); ! #endif // COMPILER2 || JVMCI } #ifndef PRODUCT static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) { // Print oopmap and regmap tty->print_cr("------ "); CodeBlob* cb = fr->cb(); ! const ImmutableOopMapSet* maps = cb->oop_maps(); const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); map->print(); if( cb->is_nmethod() ) { nmethod* nm = (nmethod*)cb; // native wrappers have no scope data, it is implied
*** 323,333 **** --- 331,341 ---- CodeBlob* cb = fr->cb(); assert(cb != NULL, "no codeblob"); NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);) ! const ImmutableOopMapSet* maps = cb->oop_maps(); const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); assert(map != NULL, "no ptr map found"); // handle derived pointers first (otherwise base pointer may be // changed before derived pointer offset has been collected)
*** 335,344 **** --- 343,357 ---- { OopMapStream oms(map,OopMapValue::derived_oop_value); if (!oms.is_done()) { #ifndef TIERED COMPILER1_PRESENT(ShouldNotReachHere();) + #if INCLUDE_JVMCI + if (UseJVMCICompiler) { + ShouldNotReachHere(); + } + #endif #endif // !TIERED // Protect the operation on the derived pointers. This // protects the addition of derived pointers to the shared // derived pointer table in DerivedPointerTable::add(). MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
*** 400,410 **** --- 413,425 ---- "found invalid value pointer"); value_fn->do_oop(loc); } else if ( omv.type() == OopMapValue::narrowoop_value ) { narrowOop *nl = (narrowOop*)loc; #ifndef VM_LITTLE_ENDIAN if (!omv.reg()->is_stack()) { + VMReg vmReg = omv.reg(); + // Don't do this on SPARC float registers as they can be individually addressed + if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) { // compressed oops in registers only take up 4 bytes of an // 8 byte register but they are in the wrong part of the // word so adjust loc to point at the right place. nl = (narrowOop*)((address)nl + 4); }
*** 449,459 **** --- 464,474 ---- DEBUG_ONLY(nof_callee++;) } // Check that runtime stubs save all callee-saved registers #ifdef COMPILER2 ! assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() || (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT), "must save all"); #endif // COMPILER2 }
*** 463,479 **** --- 478,499 ---- #ifndef PRODUCT bool ImmutableOopMap::has_derived_pointer() const { #ifndef TIERED COMPILER1_PRESENT(return false); + #if INCLUDE_JVMCI + if (UseJVMCICompiler) { + return false; + } + #endif #endif // !TIERED ! #ifdef COMPILER2 - OopMapStream oms((OopMap*)this,OopMapValue::derived_oop_value); ! #if defined(COMPILER2) || INCLUDE_JVMCI ! OopMapStream oms(this,OopMapValue::derived_oop_value); return oms.is_done(); #else return false; ! #endif // COMPILER2 || JVMCI } #endif //PRODUCT // Printing code is present in product build for -XX:+PrintAssembly.
*** 605,682 **** --- 625,637 ---- } return sizeof(ImmutableOopMap) + oms.stream_position(); } #endif ! class ImmutableOopMapBuilder { private: class Mapping; private: const OopMapSet* _set; const OopMap* _empty; const OopMap* _last; int _empty_offset; int _last_offset; int _offset; Mapping* _mapping; ImmutableOopMapSet* _new_set; /* Used for bookkeeping when building ImmutableOopMaps */ class Mapping : public ResourceObj { public: enum kind_t { OOPMAP_UNKNOWN = 0, OOPMAP_NEW = 1, OOPMAP_EMPTY = 2, OOPMAP_DUPLICATE = 3 }; kind_t _kind; int _offset; int _size; const OopMap* _map; const OopMap* _other; Mapping() : _kind(OOPMAP_UNKNOWN), _offset(-1), _size(-1), _map(NULL) {} void set(kind_t kind, int offset, int size, const OopMap* map = 0, const OopMap* other = 0) { _kind = kind; _offset = offset; _size = size; _map = map; _other = other; } }; public: ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _new_set(NULL), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0) { ! ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _new_set(NULL), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1) { _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size()); - } int heap_size(); ImmutableOopMapSet* build(); private: bool is_empty(const OopMap* map) const { return map->count() == 0; } bool is_last_duplicate(const OopMap* map) { if (_last != NULL && _last->count() > 0 && _last->equals(map)) { return true; } return false; } #ifdef ASSERT void verify(address buffer, int size, const ImmutableOopMapSet* set); #endif bool has_empty() const { return _empty_offset != -1; } int size_for(const OopMap* map) const; void fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set); int fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set); void fill(ImmutableOopMapSet* set, int size); }; int ImmutableOopMapBuilder::size_for(const OopMap* map) const { return align_size_up(sizeof(ImmutableOopMap) + map->data_size(), 8); }
*** 717,726 **** --- 672,682 ---- _offset += size; } int total = base + pairs + _offset; DEBUG_ONLY(total += 8); + _required = total; return total; } void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { assert(offset < set->nr_of_bytes(), "check");
*** 768,802 **** --- 724,762 ---- assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size"); } } #endif ! ImmutableOopMapSet* ImmutableOopMapBuilder::build() { ! int required = heap_size(); // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, required, mtCode); DEBUG_ONLY(memset(&buffer[required-8], 0xff, 8)); ! ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) { ! DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8)); ! _new_set = new (buffer) ImmutableOopMapSet(_set, _required); ! fill(_new_set, _required); ! DEBUG_ONLY(verify(buffer, _required, _new_set)); return _new_set; } + ImmutableOopMapSet* ImmutableOopMapBuilder::build() { + _required = heap_size(); + + // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps + address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); + return generate_into(buffer); + } + ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) { ResourceMark mark; ImmutableOopMapBuilder builder(oopmap_set); return builder.build(); } //------------------------------DerivedPointerTable--------------------------- ! #ifdef COMPILER2 ! #if defined(COMPILER2) || INCLUDE_JVMCI class DerivedPointerEntry : public CHeapObj<mtCompiler> { private: oop* _location; // Location of derived pointer (also pointing to the base) intptr_t _offset; // Offset from base pointer
*** 885,890 **** --- 845,850 ---- } _list->clear(); _active = false; } ! #endif // COMPILER2 || JVMCI

src/share/vm/compiler/oopMap.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File