1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)cpCacheOop.cpp 1.79 07/05/29 09:44:19 JVM" 3 #endif 4 /* 5 * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 #include "incls/_precompiled.incl" 29 #include "incls/_cpCacheOop.cpp.incl" 30 31 32 // Implememtation of ConstantPoolCacheEntry 33 34 void ConstantPoolCacheEntry::set_initial_state(int index) { 35 assert(0 <= index && index < 0x10000, "sanity check"); 36 _indices = index; 37 } 38 39 40 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, 41 bool is_vfinal, bool is_volatile, 42 bool is_method_interface, bool is_method) { 43 int f = state; 44 45 assert( state < number_of_states, "Invalid state in as_flags"); 46 47 f <<= 1; 48 if (is_final) f |= 1; 49 f <<= 1; 50 if (is_vfinal) f |= 1; 51 f <<= 1; 52 if (is_volatile) f |= 1; 53 f <<= 1; 54 if (is_method_interface) f |= 1; 55 f <<= 1; 56 if (is_method) f |= 1; 57 f <<= ConstantPoolCacheEntry::hotSwapBit; 58 // Preserve existing flag bit values 59 #ifdef ASSERT 60 int old_state = ((_flags >> tosBits) & 0x0F); 61 assert(old_state == 0 || old_state == state, 62 "inconsistent cpCache flags state"); 63 #endif 64 return (_flags | f) ; 65 } 66 67 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { 68 #ifdef ASSERT 69 // Read once. 70 volatile Bytecodes::Code c = bytecode_1(); 71 assert(c == 0 || c == code || code == 0, "update must be consistent"); 72 #endif 73 // Need to flush pending stores here before bytecode is written. 74 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16)); 75 } 76 77 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { 78 #ifdef ASSERT 79 // Read once. 80 volatile Bytecodes::Code c = bytecode_2(); 81 assert(c == 0 || c == code || code == 0, "update must be consistent"); 82 #endif 83 // Need to flush pending stores here before bytecode is written. 84 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24)); 85 } 86 87 #ifdef ASSERT 88 // It is possible to have two different dummy methodOops created 89 // when the resolve code for invoke interface executes concurrently 90 // Hence the assertion below is weakened a bit for the invokeinterface 91 // case. 92 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) { 93 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() == 94 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() == 95 ((methodOop)f1)->signature()); 96 } 97 #endif 98 99 // Note that concurrent update of both bytecodes can leave one of them 100 // reset to zero. This is harmless; the interpreter will simply re-resolve 101 // the damaged entry. More seriously, the memory synchronization is needed 102 // to flush other fields (f1, f2) completely to memory before the bytecodes 103 // are updated, lest other processors see a non-zero bytecode but zero f1/f2. 104 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, 105 Bytecodes::Code put_code, 106 KlassHandle field_holder, 107 int orig_field_index, 108 int field_offset, 109 TosState field_type, 110 bool is_final, 111 bool is_volatile) { 112 set_f1(field_holder()); 113 set_f2(field_offset); 114 // The field index is used by jvm/ti and is the index into fields() array 115 // in holder instanceKlass. This is scaled by instanceKlass::next_offset. 116 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index"); 117 const int field_index = orig_field_index / instanceKlass::next_offset; 118 assert(field_index <= field_index_mask, 119 "field index does not fit in low flag bits"); 120 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | 121 (field_index & field_index_mask)); 122 set_bytecode_1(get_code); 123 set_bytecode_2(put_code); 124 NOT_PRODUCT(verify(tty)); 125 } 126 127 int ConstantPoolCacheEntry::field_index() const { 128 return (_flags & field_index_mask) * instanceKlass::next_offset; 129 } 130 131 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, 132 methodHandle method, 133 int vtable_index) { 134 135 assert(method->interpreter_entry() != NULL, "should have been set at this point"); 136 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); 137 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); 138 139 int byte_no = -1; 140 bool needs_vfinal_flag = false; 141 switch (invoke_code) { 142 case Bytecodes::_invokevirtual: 143 case Bytecodes::_invokeinterface: { 144 if (method->can_be_statically_bound()) { 145 set_f2((intptr_t)method()); 146 needs_vfinal_flag = true; 147 } else { 148 assert(vtable_index >= 0, "valid index"); 149 set_f2(vtable_index); 150 } 151 byte_no = 2; 152 break; 153 } 154 case Bytecodes::_invokespecial: 155 // Preserve the value of the vfinal flag on invokevirtual bytecode 156 // which may be shared with this constant pool cache entry. 157 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal(); 158 // fall through 159 case Bytecodes::_invokestatic: 160 set_f1(method()); 161 byte_no = 1; 162 break; 163 default: 164 ShouldNotReachHere(); 165 break; 166 } 167 168 set_flags(as_flags(as_TosState(method->result_type()), 169 method->is_final_method(), 170 needs_vfinal_flag, 171 false, 172 change_to_virtual, 173 true)| 174 method()->size_of_parameters()); 175 176 // Note: byte_no also appears in TemplateTable::resolve. 177 if (byte_no == 1) { 178 set_bytecode_1(invoke_code); 179 } else if (byte_no == 2) { 180 if (change_to_virtual) { 181 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! 182 // 183 // Workaround for the case where we encounter an invokeinterface, but we 184 // should really have an _invokevirtual since the resolved method is a 185 // virtual method in java.lang.Object. This is a corner case in the spec 186 // but is presumably legal. javac does not generate this code. 187 // 188 // We set bytecode_1() to _invokeinterface, because that is the 189 // bytecode # used by the interpreter to see if it is resolved. 190 // We set bytecode_2() to _invokevirtual. 191 // See also interpreterRuntime.cpp. (8/25/2000) 192 // Only set resolved for the invokeinterface case if method is public. 193 // Otherwise, the method needs to be reresolved with caller for each 194 // interface call. 195 if (method->is_public()) set_bytecode_1(invoke_code); 196 set_bytecode_2(Bytecodes::_invokevirtual); 197 } else { 198 set_bytecode_2(invoke_code); 199 } 200 } else { 201 ShouldNotReachHere(); 202 } 203 NOT_PRODUCT(verify(tty)); 204 } 205 206 207 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) { 208 klassOop interf = method->method_holder(); 209 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); 210 set_f1(interf); 211 set_f2(index); 212 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); 213 set_bytecode_1(Bytecodes::_invokeinterface); 214 } 215 216 217 class LocalOopClosure: public OopClosure { 218 private: 219 void (*_f)(oop*); 220 221 public: 222 LocalOopClosure(void f(oop*)) { _f = f; } 223 virtual void do_oop(oop* o) { _f(o); } 224 }; 225 226 227 void ConstantPoolCacheEntry::oops_do(void f(oop*)) { 228 LocalOopClosure blk(f); 229 oop_iterate(&blk); 230 } 231 232 233 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) { 234 assert(in_words(size()) == 4, "check code below - may need adjustment"); 235 // field[1] is always oop or NULL 236 blk->do_oop((oop*)&_f1); 237 if (is_vfinal()) { 238 blk->do_oop((oop*)&_f2); 239 } 240 } 241 242 243 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) { 244 assert(in_words(size()) == 4, "check code below - may need adjustment"); 245 // field[1] is always oop or NULL 246 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1); 247 if (is_vfinal()) { 248 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2); 249 } 250 } 251 252 253 void ConstantPoolCacheEntry::follow_contents() { 254 assert(in_words(size()) == 4, "check code below - may need adjustment"); 255 // field[1] is always oop or NULL 256 MarkSweep::mark_and_push((oop*)&_f1); 257 if (is_vfinal()) { 258 MarkSweep::mark_and_push((oop*)&_f2); 259 } 260 } 261 262 #ifndef SERIALGC 263 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) { 264 assert(in_words(size()) == 4, "check code below - may need adjustment"); 265 // field[1] is always oop or NULL 266 PSParallelCompact::mark_and_push(cm, (oop*)&_f1); 267 if (is_vfinal()) { 268 PSParallelCompact::mark_and_push(cm, (oop*)&_f2); 269 } 270 } 271 #endif // SERIALGC 272 273 void ConstantPoolCacheEntry::adjust_pointers() { 274 assert(in_words(size()) == 4, "check code below - may need adjustment"); 275 // field[1] is always oop or NULL 276 MarkSweep::adjust_pointer((oop*)&_f1); 277 if (is_vfinal()) { 278 MarkSweep::adjust_pointer((oop*)&_f2); 279 } 280 } 281 282 #ifndef SERIALGC 283 void ConstantPoolCacheEntry::update_pointers() { 284 assert(in_words(size()) == 4, "check code below - may need adjustment"); 285 // field[1] is always oop or NULL 286 PSParallelCompact::adjust_pointer((oop*)&_f1); 287 if (is_vfinal()) { 288 PSParallelCompact::adjust_pointer((oop*)&_f2); 289 } 290 } 291 292 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr, 293 HeapWord* end_addr) { 294 assert(in_words(size()) == 4, "check code below - may need adjustment"); 295 // field[1] is always oop or NULL 296 PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr); 297 if (is_vfinal()) { 298 PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr); 299 } 300 } 301 #endif // SERIALGC 302 303 // RedefineClasses() API support: 304 // If this constantPoolCacheEntry refers to old_method then update it 305 // to refer to new_method. 306 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, 307 methodOop new_method, bool * trace_name_printed) { 308 309 if (is_vfinal()) { 310 // virtual and final so f2() contains method ptr instead of vtable index 311 if (f2() == (intptr_t)old_method) { 312 // match old_method so need an update 313 _f2 = (intptr_t)new_method; 314 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { 315 if (!(*trace_name_printed)) { 316 // RC_TRACE_MESG macro has an embedded ResourceMark 317 RC_TRACE_MESG(("adjust: name=%s", 318 Klass::cast(old_method->method_holder())->external_name())); 319 *trace_name_printed = true; 320 } 321 // RC_TRACE macro has an embedded ResourceMark 322 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", 323 new_method->name()->as_C_string(), 324 new_method->signature()->as_C_string())); 325 } 326 327 return true; 328 } 329 330 // f1() is not used with virtual entries so bail out 331 return false; 332 } 333 334 if ((oop)_f1 == NULL) { 335 // NULL f1() means this is a virtual entry so bail out 336 // We are assuming that the vtable index does not need change. 337 return false; 338 } 339 340 if ((oop)_f1 == old_method) { 341 _f1 = new_method; 342 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { 343 if (!(*trace_name_printed)) { 344 // RC_TRACE_MESG macro has an embedded ResourceMark 345 RC_TRACE_MESG(("adjust: name=%s", 346 Klass::cast(old_method->method_holder())->external_name())); 347 *trace_name_printed = true; 348 } 349 // RC_TRACE macro has an embedded ResourceMark 350 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", 351 new_method->name()->as_C_string(), 352 new_method->signature()->as_C_string())); 353 } 354 355 return true; 356 } 357 358 return false; 359 } 360 361 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { 362 if (!is_method_entry()) { 363 // not a method entry so not interesting by default 364 return false; 365 } 366 367 methodOop m = NULL; 368 if (is_vfinal()) { 369 // virtual and final so _f2 contains method ptr instead of vtable index 370 m = (methodOop)_f2; 371 } else if ((oop)_f1 == NULL) { 372 // NULL _f1 means this is a virtual entry so also not interesting 373 return false; 374 } else { 375 if (!((oop)_f1)->is_method()) { 376 // _f1 can also contain a klassOop for an interface 377 return false; 378 } 379 m = (methodOop)_f1; 380 } 381 382 assert(m != NULL && m->is_method(), "sanity check"); 383 if (m == NULL || !m->is_method() || m->method_holder() != k) { 384 // robustness for above sanity checks or method is not in 385 // the interesting class 386 return false; 387 } 388 389 // the method is in the interesting class so the entry is interesting 390 return true; 391 } 392 393 void ConstantPoolCacheEntry::print(outputStream* st, int index) const { 394 // print separator 395 if (index == 0) tty->print_cr(" -------------"); 396 // print entry 397 tty->print_cr("%3d (%08x) [%02x|%02x|%5d]", index, this, bytecode_2(), bytecode_1(), constant_pool_index()); 398 tty->print_cr(" [ %08x]", (address)(oop)_f1); 399 tty->print_cr(" [ %08x]", _f2); 400 tty->print_cr(" [ %08x]", _flags); 401 tty->print_cr(" -------------"); 402 } 403 404 void ConstantPoolCacheEntry::verify(outputStream* st) const { 405 // not implemented yet 406 } 407 408 // Implementation of ConstantPoolCache 409 410 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { 411 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache"); 412 for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]); 413 } 414 415 // RedefineClasses() API support: 416 // If any entry of this constantPoolCache points to any of 417 // old_methods, replace it with the corresponding new_method. 418 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, 419 int methods_length, bool * trace_name_printed) { 420 421 if (methods_length == 0) { 422 // nothing to do if there are no methods 423 return; 424 } 425 426 // get shorthand for the interesting class 427 klassOop old_holder = old_methods[0]->method_holder(); 428 429 for (int i = 0; i < length(); i++) { 430 if (!entry_at(i)->is_interesting_method_entry(old_holder)) { 431 // skip uninteresting methods 432 continue; 433 } 434 435 // The constantPoolCache contains entries for several different 436 // things, but we only care about methods. In fact, we only care 437 // about methods in the same class as the one that contains the 438 // old_methods. At this point, we have an interesting entry. 439 440 for (int j = 0; j < methods_length; j++) { 441 methodOop old_method = old_methods[j]; 442 methodOop new_method = new_methods[j]; 443 444 if (entry_at(i)->adjust_method_entry(old_method, new_method, 445 trace_name_printed)) { 446 // current old_method matched this entry and we updated it so 447 // break out and get to the next interesting entry if there one 448 break; 449 } 450 } 451 } 452 }