Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/oops/cpCacheOop.cpp
+++ new/src/share/vm/oops/cpCacheOop.cpp
1 1 /*
2 - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/shared/markSweep.inline.hpp"
27 27 #include "interpreter/interpreter.hpp"
28 28 #include "interpreter/rewriter.hpp"
29 29 #include "memory/universe.inline.hpp"
30 30 #include "oops/cpCacheOop.hpp"
31 31 #include "oops/objArrayOop.hpp"
32 32 #include "oops/oop.inline.hpp"
33 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 34 #include "runtime/handles.inline.hpp"
35 35
36 36
37 37 // Implememtation of ConstantPoolCacheEntry
38 38
39 39 void ConstantPoolCacheEntry::initialize_entry(int index) {
40 40 assert(0 < index && index < 0x10000, "sanity check");
41 41 _indices = index;
42 42 assert(constant_pool_index() == index, "");
43 43 }
44 44
45 45 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
46 46 assert(0 <= main_index && main_index < 0x10000, "sanity check");
47 47 _indices = (main_index << 16);
48 48 assert(main_entry_index() == main_index, "");
49 49 }
50 50
51 51 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
52 52 bool is_vfinal, bool is_volatile,
53 53 bool is_method_interface, bool is_method) {
54 54 int f = state;
55 55
56 56 assert( state < number_of_states, "Invalid state in as_flags");
57 57
58 58 f <<= 1;
59 59 if (is_final) f |= 1;
60 60 f <<= 1;
61 61 if (is_vfinal) f |= 1;
62 62 f <<= 1;
63 63 if (is_volatile) f |= 1;
64 64 f <<= 1;
65 65 if (is_method_interface) f |= 1;
66 66 f <<= 1;
67 67 if (is_method) f |= 1;
68 68 f <<= ConstantPoolCacheEntry::hotSwapBit;
69 69 // Preserve existing flag bit values
70 70 #ifdef ASSERT
71 71 int old_state = ((_flags >> tosBits) & 0x0F);
72 72 assert(old_state == 0 || old_state == state,
73 73 "inconsistent cpCache flags state");
74 74 #endif
75 75 return (_flags | f) ;
76 76 }
77 77
78 78 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
79 79 #ifdef ASSERT
80 80 // Read once.
81 81 volatile Bytecodes::Code c = bytecode_1();
82 82 assert(c == 0 || c == code || code == 0, "update must be consistent");
83 83 #endif
84 84 // Need to flush pending stores here before bytecode is written.
85 85 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
86 86 }
87 87
88 88 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
89 89 #ifdef ASSERT
90 90 // Read once.
91 91 volatile Bytecodes::Code c = bytecode_2();
92 92 assert(c == 0 || c == code || code == 0, "update must be consistent");
93 93 #endif
94 94 // Need to flush pending stores here before bytecode is written.
95 95 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
96 96 }
97 97
98 98 // Atomically sets f1 if it is still NULL, otherwise it keeps the
99 99 // current value.
100 100 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
101 101 // Use barriers as in oop_store
102 102 oop* f1_addr = (oop*) &_f1;
103 103 update_barrier_set_pre(f1_addr, f1);
104 104 void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
105 105 bool success = (result == NULL);
106 106 if (success) {
107 107 update_barrier_set((void*) f1_addr, f1);
108 108 }
109 109 }
110 110
111 111 #ifdef ASSERT
112 112 // It is possible to have two different dummy methodOops created
113 113 // when the resolve code for invoke interface executes concurrently
114 114 // Hence the assertion below is weakened a bit for the invokeinterface
115 115 // case.
116 116 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
117 117 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
118 118 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
119 119 ((methodOop)f1)->signature());
120 120 }
121 121 #endif
122 122
123 123 // Note that concurrent update of both bytecodes can leave one of them
124 124 // reset to zero. This is harmless; the interpreter will simply re-resolve
125 125 // the damaged entry. More seriously, the memory synchronization is needed
126 126 // to flush other fields (f1, f2) completely to memory before the bytecodes
127 127 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
128 128 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
129 129 Bytecodes::Code put_code,
130 130 KlassHandle field_holder,
131 131 int field_index,
132 132 int field_offset,
133 133 TosState field_type,
134 134 bool is_final,
135 135 bool is_volatile) {
136 136 set_f1(field_holder()->java_mirror());
137 137 set_f2(field_offset);
138 138 assert(field_index <= field_index_mask,
139 139 "field index does not fit in low flag bits");
140 140 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
141 141 (field_index & field_index_mask));
142 142 set_bytecode_1(get_code);
143 143 set_bytecode_2(put_code);
144 144 NOT_PRODUCT(verify(tty));
145 145 }
146 146
147 147 int ConstantPoolCacheEntry::field_index() const {
148 148 return (_flags & field_index_mask);
149 149 }
150 150
151 151 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
152 152 methodHandle method,
153 153 int vtable_index) {
154 154 assert(!is_secondary_entry(), "");
155 155 assert(method->interpreter_entry() != NULL, "should have been set at this point");
156 156 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
157 157 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
158 158
159 159 int byte_no = -1;
160 160 bool needs_vfinal_flag = false;
161 161 switch (invoke_code) {
162 162 case Bytecodes::_invokevirtual:
163 163 case Bytecodes::_invokeinterface: {
164 164 if (method->can_be_statically_bound()) {
165 165 set_f2((intptr_t)method());
166 166 needs_vfinal_flag = true;
167 167 } else {
168 168 assert(vtable_index >= 0, "valid index");
169 169 set_f2(vtable_index);
170 170 }
171 171 byte_no = 2;
172 172 break;
173 173 }
174 174
175 175 case Bytecodes::_invokedynamic: // similar to _invokevirtual
176 176 if (TraceInvokeDynamic) {
177 177 tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
178 178 (is_secondary_entry() ? " secondary" : ""),
179 179 (intptr_t)method(), vtable_index);
180 180 method->print();
181 181 this->print(tty, 0);
182 182 }
183 183 assert(method->can_be_statically_bound(), "must be a MH invoker method");
184 184 assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
185 185 // SystemDictionary::find_method_handle_invoke only caches
186 186 // methods which signature classes are on the boot classpath,
187 187 // otherwise the newly created method is returned. To avoid
188 188 // races in that case we store the first one coming in into the
189 189 // cp-cache atomically if it's still unset.
190 190 set_f1_if_null_atomic(method());
191 191 needs_vfinal_flag = false; // _f2 is not an oop
192 192 assert(!is_vfinal(), "f2 not an oop");
193 193 byte_no = 1; // coordinate this with bytecode_number & is_resolved
194 194 break;
195 195
196 196 case Bytecodes::_invokespecial:
197 197 // Preserve the value of the vfinal flag on invokevirtual bytecode
198 198 // which may be shared with this constant pool cache entry.
199 199 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
200 200 // fall through
201 201 case Bytecodes::_invokestatic:
202 202 set_f1(method());
203 203 byte_no = 1;
204 204 break;
205 205 default:
206 206 ShouldNotReachHere();
207 207 break;
208 208 }
209 209
210 210 set_flags(as_flags(as_TosState(method->result_type()),
211 211 method->is_final_method(),
212 212 needs_vfinal_flag,
213 213 false,
214 214 change_to_virtual,
215 215 true)|
216 216 method()->size_of_parameters());
217 217
218 218 // Note: byte_no also appears in TemplateTable::resolve.
219 219 if (byte_no == 1) {
220 220 set_bytecode_1(invoke_code);
221 221 } else if (byte_no == 2) {
222 222 if (change_to_virtual) {
223 223 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
224 224 //
225 225 // Workaround for the case where we encounter an invokeinterface, but we
226 226 // should really have an _invokevirtual since the resolved method is a
227 227 // virtual method in java.lang.Object. This is a corner case in the spec
228 228 // but is presumably legal. javac does not generate this code.
229 229 //
230 230 // We set bytecode_1() to _invokeinterface, because that is the
231 231 // bytecode # used by the interpreter to see if it is resolved.
232 232 // We set bytecode_2() to _invokevirtual.
233 233 // See also interpreterRuntime.cpp. (8/25/2000)
234 234 // Only set resolved for the invokeinterface case if method is public.
235 235 // Otherwise, the method needs to be reresolved with caller for each
236 236 // interface call.
237 237 if (method->is_public()) set_bytecode_1(invoke_code);
238 238 set_bytecode_2(Bytecodes::_invokevirtual);
239 239 } else {
240 240 set_bytecode_2(invoke_code);
241 241 }
242 242 } else {
243 243 ShouldNotReachHere();
244 244 }
245 245 NOT_PRODUCT(verify(tty));
246 246 }
247 247
248 248
249 249 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
250 250 assert(!is_secondary_entry(), "");
251 251 klassOop interf = method->method_holder();
252 252 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
253 253 set_f1(interf);
254 254 set_f2(index);
255 255 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
256 256 set_bytecode_1(Bytecodes::_invokeinterface);
257 257 }
258 258
259 259
260 260 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
261 261 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
262 262 assert(_f2 == 0, "initialize once");
263 263 assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
264 264 set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
265 265 }
266 266
267 267 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
268 268 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
269 269 intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
270 270 assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
271 271 return (int) bsm_cache_index;
272 272 }
273 273
274 274 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
275 275 assert(is_secondary_entry(), "");
276 276 // NOTE: it's important that all other values are set before f1 is
277 277 // set since some users short circuit on f1 being set
278 278 // (i.e. non-null) and that may result in uninitialized values for
279 279 // other racing threads (e.g. flags).
280 280 int param_size = signature_invoker->size_of_parameters();
281 281 assert(param_size >= 1, "method argument size must include MH.this");
282 282 param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
283 283 bool is_final = true;
284 284 assert(signature_invoker->is_final_method(), "is_final");
285 285 int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
286 286 assert(_flags == 0 || _flags == flags, "flags should be the same");
287 287 set_flags(flags);
288 288 // do not do set_bytecode on a secondary CP cache entry
289 289 //set_bytecode_1(Bytecodes::_invokedynamic);
290 290 set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)!
291 291 }
292 292
293 293
294 294 methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) {
295 295 assert(invoke_code > (Bytecodes::Code)0, "bad query");
296 296 if (is_secondary_entry()) {
297 297 return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool);
298 298 }
299 299 // Decode the action of set_method and set_interface_call
300 300 if (bytecode_1() == invoke_code) {
301 301 oop f1 = _f1;
302 302 if (f1 != NULL) {
303 303 switch (invoke_code) {
304 304 case Bytecodes::_invokeinterface:
305 305 assert(f1->is_klass(), "");
306 306 return klassItable::method_for_itable_index(klassOop(f1), (int) f2());
307 307 case Bytecodes::_invokestatic:
308 308 case Bytecodes::_invokespecial:
309 309 assert(f1->is_method(), "");
310 310 return methodOop(f1);
311 311 }
312 312 }
313 313 }
314 314 if (bytecode_2() == invoke_code) {
315 315 switch (invoke_code) {
316 316 case Bytecodes::_invokevirtual:
317 317 if (is_vfinal()) {
318 318 // invokevirtual
319 319 methodOop m = methodOop((intptr_t) f2());
320 320 assert(m->is_method(), "");
321 321 return m;
322 322 } else {
323 323 int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
324 324 if (cpool->tag_at(holder_index).is_klass()) {
325 325 klassOop klass = cpool->resolved_klass_at(holder_index);
326 326 if (!Klass::cast(klass)->oop_is_instance())
327 327 klass = SystemDictionary::Object_klass();
328 328 return instanceKlass::cast(klass)->method_at_vtable((int) f2());
329 329 }
330 330 }
331 331 }
332 332 }
333 333 return NULL;
334 334 }
335 335
336 336
337 337
338 338 class LocalOopClosure: public OopClosure {
339 339 private:
340 340 void (*_f)(oop*);
341 341
342 342 public:
343 343 LocalOopClosure(void f(oop*)) { _f = f; }
344 344 virtual void do_oop(oop* o) { _f(o); }
345 345 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
346 346 };
347 347
348 348
349 349 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
350 350 LocalOopClosure blk(f);
351 351 oop_iterate(&blk);
352 352 }
353 353
354 354
355 355 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
356 356 assert(in_words(size()) == 4, "check code below - may need adjustment");
357 357 // field[1] is always oop or NULL
358 358 blk->do_oop((oop*)&_f1);
359 359 if (is_vfinal()) {
360 360 blk->do_oop((oop*)&_f2);
361 361 }
362 362 }
363 363
364 364
365 365 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
366 366 assert(in_words(size()) == 4, "check code below - may need adjustment");
367 367 // field[1] is always oop or NULL
368 368 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
369 369 if (is_vfinal()) {
370 370 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
371 371 }
372 372 }
373 373
374 374
375 375 void ConstantPoolCacheEntry::follow_contents() {
376 376 assert(in_words(size()) == 4, "check code below - may need adjustment");
377 377 // field[1] is always oop or NULL
378 378 MarkSweep::mark_and_push((oop*)&_f1);
379 379 if (is_vfinal()) {
380 380 MarkSweep::mark_and_push((oop*)&_f2);
381 381 }
382 382 }
383 383
384 384 #ifndef SERIALGC
385 385 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
386 386 assert(in_words(size()) == 4, "check code below - may need adjustment");
387 387 // field[1] is always oop or NULL
388 388 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
389 389 if (is_vfinal()) {
390 390 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
391 391 }
392 392 }
393 393 #endif // SERIALGC
394 394
395 395 void ConstantPoolCacheEntry::adjust_pointers() {
396 396 assert(in_words(size()) == 4, "check code below - may need adjustment");
397 397 // field[1] is always oop or NULL
398 398 MarkSweep::adjust_pointer((oop*)&_f1);
399 399 if (is_vfinal()) {
400 400 MarkSweep::adjust_pointer((oop*)&_f2);
401 401 }
402 402 }
403 403
404 404 #ifndef SERIALGC
405 405 void ConstantPoolCacheEntry::update_pointers() {
406 406 assert(in_words(size()) == 4, "check code below - may need adjustment");
407 407 // field[1] is always oop or NULL
408 408 PSParallelCompact::adjust_pointer((oop*)&_f1);
409 409 if (is_vfinal()) {
410 410 PSParallelCompact::adjust_pointer((oop*)&_f2);
411 411 }
412 412 }
413 413 #endif // SERIALGC
414 414
415 415 // RedefineClasses() API support:
416 416 // If this constantPoolCacheEntry refers to old_method then update it
417 417 // to refer to new_method.
418 418 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
419 419 methodOop new_method, bool * trace_name_printed) {
420 420
421 421 if (is_vfinal()) {
422 422 // virtual and final so f2() contains method ptr instead of vtable index
423 423 if (f2() == (intptr_t)old_method) {
424 424 // match old_method so need an update
425 425 _f2 = (intptr_t)new_method;
426 426 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
427 427 if (!(*trace_name_printed)) {
428 428 // RC_TRACE_MESG macro has an embedded ResourceMark
429 429 RC_TRACE_MESG(("adjust: name=%s",
430 430 Klass::cast(old_method->method_holder())->external_name()));
431 431 *trace_name_printed = true;
432 432 }
433 433 // RC_TRACE macro has an embedded ResourceMark
434 434 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
435 435 new_method->name()->as_C_string(),
436 436 new_method->signature()->as_C_string()));
437 437 }
438 438
439 439 return true;
440 440 }
441 441
442 442 // f1() is not used with virtual entries so bail out
443 443 return false;
444 444 }
445 445
446 446 if ((oop)_f1 == NULL) {
447 447 // NULL f1() means this is a virtual entry so bail out
448 448 // We are assuming that the vtable index does not need change.
449 449 return false;
450 450 }
451 451
452 452 if ((oop)_f1 == old_method) {
453 453 _f1 = new_method;
454 454 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
455 455 if (!(*trace_name_printed)) {
456 456 // RC_TRACE_MESG macro has an embedded ResourceMark
457 457 RC_TRACE_MESG(("adjust: name=%s",
458 458 Klass::cast(old_method->method_holder())->external_name()));
459 459 *trace_name_printed = true;
460 460 }
461 461 // RC_TRACE macro has an embedded ResourceMark
462 462 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
463 463 new_method->name()->as_C_string(),
464 464 new_method->signature()->as_C_string()));
465 465 }
466 466
467 467 return true;
468 468 }
469 469
470 470 return false;
471 471 }
472 472
473 473 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
474 474 if (!is_method_entry()) {
475 475 // not a method entry so not interesting by default
476 476 return false;
477 477 }
478 478
479 479 methodOop m = NULL;
480 480 if (is_vfinal()) {
481 481 // virtual and final so _f2 contains method ptr instead of vtable index
482 482 m = (methodOop)_f2;
483 483 } else if ((oop)_f1 == NULL) {
484 484 // NULL _f1 means this is a virtual entry so also not interesting
485 485 return false;
486 486 } else {
487 487 if (!((oop)_f1)->is_method()) {
488 488 // _f1 can also contain a klassOop for an interface
489 489 return false;
490 490 }
491 491 m = (methodOop)_f1;
492 492 }
493 493
494 494 assert(m != NULL && m->is_method(), "sanity check");
495 495 if (m == NULL || !m->is_method() || m->method_holder() != k) {
496 496 // robustness for above sanity checks or method is not in
↓ open down ↓ |
484 lines elided |
↑ open up ↑ |
497 497 // the interesting class
498 498 return false;
499 499 }
500 500
501 501 // the method is in the interesting class so the entry is interesting
502 502 return true;
503 503 }
504 504
505 505 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
506 506 // print separator
507 - if (index == 0) tty->print_cr(" -------------");
507 + if (index == 0) st->print_cr(" -------------");
508 508 // print entry
509 - tty->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this);
509 + st->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this);
510 510 if (is_secondary_entry())
511 - tty->print_cr("[%5d|secondary]", main_entry_index());
511 + st->print_cr("[%5d|secondary]", main_entry_index());
512 512 else
513 - tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
514 - tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)(oop)_f1);
515 - tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2);
516 - tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags);
517 - tty->print_cr(" -------------");
513 + st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
514 + st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)(oop)_f1);
515 + st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2);
516 + st->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags);
517 + st->print_cr(" -------------");
518 518 }
519 519
520 520 void ConstantPoolCacheEntry::verify(outputStream* st) const {
521 521 // not implemented yet
522 522 }
523 523
524 524 // Implementation of ConstantPoolCache
525 525
526 526 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
527 527 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
528 528 for (int i = 0; i < length(); i++) {
529 529 ConstantPoolCacheEntry* e = entry_at(i);
530 530 int original_index = inverse_index_map[i];
531 531 if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
532 532 int main_index = (original_index - Rewriter::_secondary_entry_tag);
533 533 assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
534 534 e->initialize_secondary_entry(main_index);
535 535 } else {
536 536 e->initialize_entry(original_index);
537 537 }
538 538 assert(entry_at(i) == e, "sanity");
539 539 }
540 540 }
541 541
542 542 // RedefineClasses() API support:
543 543 // If any entry of this constantPoolCache points to any of
544 544 // old_methods, replace it with the corresponding new_method.
545 545 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
546 546 int methods_length, bool * trace_name_printed) {
547 547
548 548 if (methods_length == 0) {
549 549 // nothing to do if there are no methods
550 550 return;
551 551 }
552 552
553 553 // get shorthand for the interesting class
554 554 klassOop old_holder = old_methods[0]->method_holder();
555 555
556 556 for (int i = 0; i < length(); i++) {
557 557 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
558 558 // skip uninteresting methods
559 559 continue;
560 560 }
561 561
562 562 // The constantPoolCache contains entries for several different
563 563 // things, but we only care about methods. In fact, we only care
564 564 // about methods in the same class as the one that contains the
565 565 // old_methods. At this point, we have an interesting entry.
566 566
567 567 for (int j = 0; j < methods_length; j++) {
568 568 methodOop old_method = old_methods[j];
569 569 methodOop new_method = new_methods[j];
570 570
571 571 if (entry_at(i)->adjust_method_entry(old_method, new_method,
572 572 trace_name_printed)) {
573 573 // current old_method matched this entry and we updated it so
574 574 // break out and get to the next interesting entry if there one
575 575 break;
576 576 }
577 577 }
578 578 }
579 579 }
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX