1 /* 2 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_CPCACHEOOP_HPP 26 #define SHARE_VM_OOPS_CPCACHEOOP_HPP 27 28 #include "interpreter/bytecodes.hpp" 29 #include "memory/allocation.hpp" 30 #include "utilities/array.hpp" 31 32 class PSPromotionManager; 33 34 // A ConstantPoolCacheEntry describes an individual entry of the constant 35 // pool cache. There's 2 principal kinds of entries: field entries for in- 36 // stance & static field access, and method entries for invokes. Some of 37 // the entry layout is shared and looks as follows: 38 // 39 // bit number |31 0| 40 // bit length |-8--|-8--|---16----| 41 // -------------------------------- 42 // _indices [ b2 | b1 | index ] index = constant_pool_index 43 // _f1 [ entry specific ] metadata ptr (method or klass) 44 // _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr 45 // _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries) 46 // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|----16-----] 47 // _flags [tos|0|F=0|M|A|I|f|0|vf|0000|00000|psize] (for method entries) 48 // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--] 49 50 // -------------------------------- 51 // 52 // with: 53 // index = original constant pool index 54 // b1 = bytecode 1 55 // b2 = bytecode 2 56 // psize = parameters size (method entries only) 57 // field_index = index into field information in holder InstanceKlass 58 // The index max is 0xffff (max number of fields in constant pool) 59 // and is multiplied by (InstanceKlass::next_offset) when accessing. 60 // tos = TosState 61 // F = the entry is for a field (or F=0 for a method) 62 // A = call site has an appendix argument (loaded from resolved references) 63 // I = interface call is forced virtual (must use a vtable index or vfinal) 64 // f = field or method is final 65 // v = field is volatile 66 // vf = virtual but final (method entries only: is_vfinal()) 67 // 68 // The flags after TosState have the following interpretation: 69 // bit 27: 0 for fields, 1 for methods 70 // f flag true if field is marked final 71 // v flag true if field is volatile (only for fields) 72 // f2 flag true if f2 contains an oop (e.g., virtual final method) 73 // fv flag true if invokeinterface used for method in class Object 74 // 75 // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the 76 // following mapping to the TosState states: 77 // 78 // btos: 0 79 // ctos: 1 80 // stos: 2 81 // itos: 3 82 // ltos: 4 83 // ftos: 5 84 // dtos: 6 85 // atos: 7 86 // vtos: 8 87 // 88 // Entry specific: field entries: 89 // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index 90 // _f1 = field holder (as a java.lang.Class, not a Klass*) 91 // _f2 = field offset in bytes 92 // _flags = field type information, original FieldInfo index in field holder 93 // (field_index section) 94 // 95 // Entry specific: method entries: 96 // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section), 97 // original constant pool index 98 // _f1 = Method* for non-virtual calls, unused by virtual calls. 99 // for interface calls, which are essentially virtual but need a klass, 100 // contains Klass* for the corresponding interface. 101 // for invokedynamic, f1 contains a site-specific CallSite object (as an appendix) 102 // for invokehandle, f1 contains a site-specific MethodType object (as an appendix) 103 // (upcoming metadata changes will move the appendix to a separate array) 104 // _f2 = vtable/itable index (or final Method*) for virtual calls only, 105 // unused by non-virtual. The is_vfinal flag indicates this is a 106 // method pointer for a final method, not an index. 107 // _flags = method type info (t section), 108 // virtual final bit (vfinal), 109 // parameter size (psize section) 110 // 111 // Note: invokevirtual & invokespecial bytecodes can share the same constant 112 // pool entry and thus the same constant pool cache entry. All invoke 113 // bytecodes but invokevirtual use only _f1 and the corresponding b1 114 // bytecode, while invokevirtual uses only _f2 and the corresponding 115 // b2 bytecode. The value of _flags is shared for both types of entries. 116 // 117 // The fields are volatile so that they are stored in the order written in the 118 // source code. The _indices field with the bytecode must be written last. 119 120 class CallInfo; 121 122 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { 123 friend class VMStructs; 124 friend class constantPoolCacheKlass; 125 friend class ConstantPool; 126 friend class InterpreterRuntime; 127 128 private: 129 volatile intx _indices; // constant pool index & rewrite bytecodes 130 volatile Metadata* _f1; // entry specific metadata field 131 volatile intx _f2; // entry specific int/metadata field 132 volatile intx _flags; // flags 133 134 135 void set_bytecode_1(Bytecodes::Code code); 136 void set_bytecode_2(Bytecodes::Code code); 137 void set_f1(Metadata* f1) { 138 Metadata* existing_f1 = (Metadata*)_f1; // read once 139 assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); 140 _f1 = f1; 141 } 142 void release_set_f1(Metadata* f1); 143 void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } 144 void set_f2_as_vfinal_method(Method* f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; } 145 int make_flags(TosState state, int option_bits, int field_index_or_method_params); 146 void set_flags(intx flags) { _flags = flags; } 147 bool init_flags_atomic(intx flags); 148 void set_field_flags(TosState field_type, int option_bits, int field_index) { 149 assert((field_index & field_index_mask) == field_index, "field_index in range"); 150 set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); 151 } 152 void set_method_flags(TosState return_type, int option_bits, int method_params) { 153 assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 154 set_flags(make_flags(return_type, option_bits, method_params)); 155 } 156 bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) { 157 assert((method_params & parameter_size_mask) == method_params, "method_params in range"); 158 return init_flags_atomic(make_flags(return_type, option_bits, method_params)); 159 } 160 161 public: 162 // specific bit definitions for the flags field: 163 // (Note: the interpreter must use these definitions to access the CP cache.) 164 enum { 165 // high order bits are the TosState corresponding to field type or method return type 166 tos_state_bits = 4, 167 tos_state_mask = right_n_bits(tos_state_bits), 168 tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below 169 // misc. option bits; can be any bit position in [16..27] 170 is_field_entry_shift = 26, // (F) is it a field or a method? 171 has_method_type_shift = 25, // (M) does the call site have a MethodType? 172 has_appendix_shift = 24, // (A) does the call site have an appendix argument? 173 is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode? 174 is_final_shift = 22, // (f) is the field or method final? 175 is_volatile_shift = 21, // (v) is the field volatile? 176 is_vfinal_shift = 20, // (vf) did the call resolve to a final method? 177 // low order bits give field index (for FieldInfo) or method parameter size: 178 field_index_bits = 16, 179 field_index_mask = right_n_bits(field_index_bits), 180 parameter_size_bits = 8, // subset of field_index_mask, range is 0..255 181 parameter_size_mask = right_n_bits(parameter_size_bits), 182 option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask)) 183 }; 184 185 // specific bit definitions for the indices field: 186 enum { 187 cp_index_bits = 2*BitsPerByte, 188 cp_index_mask = right_n_bits(cp_index_bits), 189 bytecode_1_shift = cp_index_bits, 190 bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF 191 bytecode_2_shift = cp_index_bits + BitsPerByte, 192 bytecode_2_mask = right_n_bits(BitsPerByte) // == (u1)0xFF 193 }; 194 195 196 // Initialization 197 void initialize_entry(int original_index); // initialize primary entry 198 void initialize_resolved_reference_index(int ref_index) { 199 assert(_f2 == 0, "set once"); // note: ref_index might be zero also 200 _f2 = ref_index; 201 } 202 203 void set_field( // sets entry to resolved field state 204 Bytecodes::Code get_code, // the bytecode used for reading the field 205 Bytecodes::Code put_code, // the bytecode used for writing the field 206 KlassHandle field_holder, // the object/klass holding the field 207 int orig_field_index, // the original field index in the field holder 208 int field_offset, // the field offset in words in the field holder 209 TosState field_type, // the (machine) field type 210 bool is_final, // the field is final 211 bool is_volatile, // the field is volatile 212 Klass* root_klass // needed by the GC to dirty the klass 213 ); 214 215 void set_method( // sets entry to resolved method entry 216 Bytecodes::Code invoke_code, // the bytecode used for invoking the method 217 methodHandle method, // the method/prototype if any (NULL, otherwise) 218 int vtable_index // the vtable index if any, else negative 219 ); 220 221 void set_interface_call( 222 methodHandle method, // Resolved method 223 int index // Method index into interface 224 ); 225 226 void set_method_handle( 227 constantPoolHandle cpool, // holding constant pool (required for locking) 228 const CallInfo &call_info // Call link information 229 ); 230 231 void set_dynamic_call( 232 constantPoolHandle cpool, // holding constant pool (required for locking) 233 const CallInfo &call_info // Call link information 234 ); 235 236 // Common code for invokedynamic and MH invocations. 237 238 // The "appendix" is an optional call-site-specific parameter which is 239 // pushed by the JVM at the end of the argument list. This argument may 240 // be a MethodType for the MH.invokes and a CallSite for an invokedynamic 241 // instruction. However, its exact type and use depends on the Java upcall, 242 // which simply returns a compiled LambdaForm along with any reference 243 // that LambdaForm needs to complete the call. If the upcall returns a 244 // null appendix, the argument is not passed at all. 245 // 246 // The appendix is *not* represented in the signature of the symbolic 247 // reference for the call site, but (if present) it *is* represented in 248 // the Method* bound to the site. This means that static and dynamic 249 // resolution logic needs to make slightly different assessments about the 250 // number and types of arguments. 251 void set_method_handle_common( 252 constantPoolHandle cpool, // holding constant pool (required for locking) 253 Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic 254 const CallInfo &call_info // Call link information 255 ); 256 257 // invokedynamic and invokehandle call sites have two entries in the 258 // resolved references array: 259 // appendix (at index+0) 260 // MethodType (at index+1) 261 enum { 262 _indy_resolved_references_appendix_offset = 0, 263 _indy_resolved_references_method_type_offset = 1, 264 _indy_resolved_references_entries 265 }; 266 267 Method* method_if_resolved(constantPoolHandle cpool); 268 oop appendix_if_resolved(constantPoolHandle cpool); 269 oop method_type_if_resolved(constantPoolHandle cpool); 270 271 void set_parameter_size(int value); 272 273 // Which bytecode number (1 or 2) in the index field is valid for this bytecode? 274 // Returns -1 if neither is valid. 275 static int bytecode_number(Bytecodes::Code code) { 276 switch (code) { 277 case Bytecodes::_getstatic : // fall through 278 case Bytecodes::_getfield : // fall through 279 case Bytecodes::_invokespecial : // fall through 280 case Bytecodes::_invokestatic : // fall through 281 case Bytecodes::_invokehandle : // fall through 282 case Bytecodes::_invokedynamic : // fall through 283 case Bytecodes::_invokeinterface : return 1; 284 case Bytecodes::_putstatic : // fall through 285 case Bytecodes::_putfield : // fall through 286 case Bytecodes::_invokevirtual : return 2; 287 default : break; 288 } 289 return -1; 290 } 291 292 // Has this bytecode been resolved? Only valid for invokes and get/put field/static. 293 bool is_resolved(Bytecodes::Code code) const { 294 switch (bytecode_number(code)) { 295 case 1: return (bytecode_1() == code); 296 case 2: return (bytecode_2() == code); 297 } 298 return false; // default: not resolved 299 } 300 301 // Accessors 302 int indices() const { return _indices; } 303 int constant_pool_index() const { return (indices() & cp_index_mask); } 304 Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices() >> bytecode_1_shift) & bytecode_1_mask); } 305 Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices() >> bytecode_2_shift) & bytecode_2_mask); } 306 Method* f1_as_method() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; } 307 Klass* f1_as_klass() const { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; } 308 bool is_f1_null() const { Metadata* f1 = (Metadata*)_f1; return f1 == NULL; } // classifies a CPC entry as unbound 309 int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } 310 Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; } 311 int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } 312 int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } 313 bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } 314 bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } 315 bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } 316 bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } 317 bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } 318 bool has_method_type() const { return (_flags & (1 << has_method_type_shift)) != 0; } 319 bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } 320 bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } 321 bool is_byte() const { return flag_state() == btos; } 322 bool is_char() const { return flag_state() == ctos; } 323 bool is_short() const { return flag_state() == stos; } 324 bool is_int() const { return flag_state() == itos; } 325 bool is_long() const { return flag_state() == ltos; } 326 bool is_float() const { return flag_state() == ftos; } 327 bool is_double() const { return flag_state() == dtos; } 328 bool is_object() const { return flag_state() == atos; } 329 TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); 330 return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } 331 332 // Code generation support 333 static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); } 334 static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); } 335 static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); } 336 static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); } 337 static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); } 338 static ByteSize flags_offset() { return byte_offset_of(ConstantPoolCacheEntry, _flags); } 339 340 // RedefineClasses() API support: 341 // If this constantPoolCacheEntry refers to old_method then update it 342 // to refer to new_method. 343 // trace_name_printed is set to true if the current call has 344 // printed the klass name so that other routines in the adjust_* 345 // group don't print the klass name. 346 bool adjust_method_entry(Method* old_method, Method* new_method, 347 bool * trace_name_printed); 348 NOT_PRODUCT(bool check_no_old_entries();) 349 bool is_interesting_method_entry(Klass* k); 350 351 // Debugging & Printing 352 void print (outputStream* st, int index) const; 353 void verify(outputStream* st) const; 354 355 static void verify_tos_state_shift() { 356 // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state: 357 assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask"); 358 } 359 }; 360 361 362 // A constant pool cache is a runtime data structure set aside to a constant pool. The cache 363 // holds interpreter runtime information for all field access and invoke bytecodes. The cache 364 // is created and initialized before a class is actively used (i.e., initialized), the indivi- 365 // dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*). 366 367 class ConstantPoolCache: public MetaspaceObj { 368 friend class VMStructs; 369 friend class MetadataFactory; 370 private: 371 int _length; 372 ConstantPool* _constant_pool; // the corresponding constant pool 373 374 // Sizing 375 debug_only(friend class ClassVerifier;) 376 377 // Constructor 378 ConstantPoolCache(int length) : _length(length), _constant_pool(NULL) { 379 for (int i = 0; i < length; i++) { 380 assert(entry_at(i)->is_f1_null(), "Failed to clear?"); 381 } 382 } 383 384 public: 385 static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, TRAPS); 386 bool is_constantPoolCache() const { return true; } 387 388 int length() const { return _length; } 389 private: 390 void set_length(int length) { _length = length; } 391 392 static int header_size() { return sizeof(ConstantPoolCache) / HeapWordSize; } 393 static int size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); } 394 public: 395 int size() const { return size(length()); } 396 private: 397 398 // Helpers 399 ConstantPool** constant_pool_addr() { return &_constant_pool; } 400 ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); } 401 402 friend class constantPoolCacheKlass; 403 friend class ConstantPoolCacheEntry; 404 405 public: 406 // Initialization 407 void initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map); 408 409 // Accessors 410 void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; } 411 ConstantPool* constant_pool() const { return _constant_pool; } 412 // Fetches the entry at the given index. 413 // In either case the index must not be encoded or byte-swapped in any way. 414 ConstantPoolCacheEntry* entry_at(int i) const { 415 assert(0 <= i && i < length(), "index out of bounds"); 416 return base() + i; 417 } 418 419 // Code generation 420 static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); } 421 static ByteSize entry_offset(int raw_index) { 422 int index = raw_index; 423 return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); 424 } 425 426 // RedefineClasses() API support: 427 // If any entry of this constantPoolCache points to any of 428 // old_methods, replace it with the corresponding new_method. 429 // trace_name_printed is set to true if the current call has 430 // printed the klass name so that other routines in the adjust_* 431 // group don't print the klass name. 432 void adjust_method_entries(Method** old_methods, Method** new_methods, 433 int methods_length, bool * trace_name_printed); 434 NOT_PRODUCT(bool check_no_old_entries();) 435 436 // Deallocate - no fields to deallocate 437 DEBUG_ONLY(bool on_stack() { return false; }) 438 void deallocate_contents(ClassLoaderData* data) {} 439 bool is_klass() const { return false; } 440 441 // Printing 442 void print_on(outputStream* st) const; 443 void print_value_on(outputStream* st) const; 444 445 const char* internal_name() const { return "{constant pool cache}"; } 446 447 // Verify 448 void verify_on(outputStream* st); 449 }; 450 451 #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP