< prev index next >

src/share/vm/interpreter/rewriter.hpp

Print this page
rev 10555 : imported patch primitive arrays

*** 35,80 **** class Rewriter: public StackObj { private: instanceKlassHandle _klass; constantPoolHandle _pool; Array<Method*>* _methods; ! intArray _cp_map; ! intStack _cp_cache_map; // for Methodref, Fieldref, // InterfaceMethodref and InvokeDynamic ! intArray _reference_map; // maps from cp index to resolved_refs index (or -1) ! intStack _resolved_references_map; // for strings, methodHandle, methodType ! intStack _invokedynamic_references_map; // for invokedynamic resolved refs ! intArray _method_handle_invokers; int _resolved_reference_limit; // For mapping invokedynamic bytecodes, which are discovered during method // scanning. The invokedynamic entries are added at the end of the cpCache. // If there are any invokespecial/InterfaceMethodref special case bytecodes, // these entries are added before invokedynamic entries so that the // invokespecial bytecode 16 bit index doesn't overflow. ! intStack _invokedynamic_cp_cache_map; // For patching. GrowableArray<address>* _patch_invokedynamic_bcps; GrowableArray<int>* _patch_invokedynamic_refs; void init_maps(int length) { ! _cp_map.initialize(length, -1); ! // Choose an initial value large enough that we don't get frequent ! // calls to grow(). ! _cp_cache_map.initialize(length/2); // Also cache resolved objects, in another different cache. ! _reference_map.initialize(length, -1); ! _resolved_references_map.initialize(length/2); ! _invokedynamic_references_map.initialize(length/2); _resolved_reference_limit = -1; _first_iteration_cp_cache_limit = -1; // invokedynamic specific fields ! _invokedynamic_cp_cache_map.initialize(length/4); ! _patch_invokedynamic_bcps = new GrowableArray<address>(length/4); ! _patch_invokedynamic_refs = new GrowableArray<int>(length/4); } int _first_iteration_cp_cache_limit; void record_map_limits() { // Record initial size of the two arrays generated for the CP cache --- 35,83 ---- class Rewriter: public StackObj { private: instanceKlassHandle _klass; constantPoolHandle _pool; Array<Method*>* _methods; ! GrowableArray<int> _cp_map; ! GrowableArray<int> _cp_cache_map; // for Methodref, Fieldref, // InterfaceMethodref and InvokeDynamic ! GrowableArray<int> _reference_map; // maps from cp index to resolved_refs index (or -1) ! GrowableArray<int> _resolved_references_map; // for strings, methodHandle, methodType ! GrowableArray<int> _invokedynamic_references_map; // for invokedynamic resolved refs ! GrowableArray<int> _method_handle_invokers; int _resolved_reference_limit; // For mapping invokedynamic bytecodes, which are discovered during method // scanning. The invokedynamic entries are added at the end of the cpCache. // If there are any invokespecial/InterfaceMethodref special case bytecodes, // these entries are added before invokedynamic entries so that the // invokespecial bytecode 16 bit index doesn't overflow. ! GrowableArray<int> _invokedynamic_cp_cache_map; // For patching. GrowableArray<address>* _patch_invokedynamic_bcps; GrowableArray<int>* _patch_invokedynamic_refs; void init_maps(int length) { ! _cp_map.trunc_to(0); ! _cp_map.at_grow(length, -1); ! ! _cp_cache_map.trunc_to(0); // Also cache resolved objects, in another different cache. ! _reference_map.trunc_to(0); ! _reference_map.at_grow(length, -1); ! ! _method_handle_invokers.trunc_to(0); ! _resolved_references_map.trunc_to(0); ! _invokedynamic_references_map.trunc_to(0); _resolved_reference_limit = -1; _first_iteration_cp_cache_limit = -1; // invokedynamic specific fields ! _invokedynamic_cp_cache_map.trunc_to(0); ! _patch_invokedynamic_bcps = new GrowableArray<address>(length / 4); ! _patch_invokedynamic_refs = new GrowableArray<int>(length / 4); } int _first_iteration_cp_cache_limit; void record_map_limits() { // Record initial size of the two arrays generated for the CP cache
*** 88,101 **** // cp cache initialization? assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); return _cp_cache_map.length() - _first_iteration_cp_cache_limit; } ! int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } ! bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } ! int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); int cache_index = cp_cache_map->append(cp_index); cp_map->at_put(cp_index, cache_index); return cache_index; } --- 91,104 ---- // cp cache initialization? assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); return _cp_cache_map.length() - _first_iteration_cp_cache_limit; } ! int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map.at(i); } ! bool has_cp_cache(int i) { return (uint) i < (uint) _cp_map.length() && _cp_map.at(i) >= 0; } ! int add_map_entry(int cp_index, GrowableArray<int>* cp_map, GrowableArray<int>* cp_cache_map) { assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); int cache_index = cp_cache_map->append(cp_index); cp_map->at_put(cp_index, cache_index); return cache_index; }
*** 119,129 **** // this index starts at one but in the bytecode it's appended to the end. return cache_index + _first_iteration_cp_cache_limit; } int invokedynamic_cp_cache_entry_pool_index(int cache_index) { ! int cp_index = _invokedynamic_cp_cache_map[cache_index]; return cp_index; } // add a new CP cache entry beyond the normal cache for the special case of // invokespecial with InterfaceMethodref as cpool operand. --- 122,132 ---- // this index starts at one but in the bytecode it's appended to the end. return cache_index + _first_iteration_cp_cache_limit; } int invokedynamic_cp_cache_entry_pool_index(int cache_index) { ! int cp_index = _invokedynamic_cp_cache_map.at(cache_index); return cp_index; } // add a new CP cache entry beyond the normal cache for the special case of // invokespecial with InterfaceMethodref as cpool operand.
*** 142,155 **** return cache_index; } int cp_entry_to_resolved_references(int cp_index) const { assert(has_entry_in_resolved_references(cp_index), "oob"); ! return _reference_map[cp_index]; } bool has_entry_in_resolved_references(int cp_index) const { ! return (uint)cp_index < (uint)_reference_map.length() && _reference_map[cp_index] >= 0; } // add a new entry to the resolved_references map int add_resolved_references_entry(int cp_index) { int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); --- 145,158 ---- return cache_index; } int cp_entry_to_resolved_references(int cp_index) const { assert(has_entry_in_resolved_references(cp_index), "oob"); ! return _reference_map.at(cp_index); } bool has_entry_in_resolved_references(int cp_index) const { ! return (uint) cp_index < (uint) _reference_map.length() && _reference_map.at(cp_index) >= 0; } // add a new entry to the resolved_references map int add_resolved_references_entry(int cp_index) { int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
*** 172,188 **** } return ref_index; } int resolved_references_entry_to_pool_index(int ref_index) { ! int cp_index = _resolved_references_map[ref_index]; return cp_index; } // Access the contents of _cp_cache_map to determine CP cache layout. int cp_cache_entry_pool_index(int cache_index) { ! int cp_index = _cp_cache_map[cache_index]; return cp_index; } // All the work goes in here: Rewriter(instanceKlassHandle klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS); --- 175,191 ---- } return ref_index; } int resolved_references_entry_to_pool_index(int ref_index) { ! int cp_index = _resolved_references_map.at(ref_index); return cp_index; } // Access the contents of _cp_cache_map to determine CP cache layout. int cp_cache_entry_pool_index(int cache_index) { ! int cp_index = _cp_cache_map.at(cache_index); return cp_index; } // All the work goes in here: Rewriter(instanceKlassHandle klass, const constantPoolHandle& cpool, Array<Method*>* methods, TRAPS);
< prev index next >