< prev index next >

src/share/vm/oops/cpCache.cpp

Print this page




 109                                        bool is_final,
 110                                        bool is_volatile,
 111                                        Klass* root_klass) {
 112   set_f1(field_holder());
 113   set_f2(field_offset);
 114   assert((field_index & field_index_mask) == field_index,
 115          "field index does not fit in low flag bits");
 116   set_field_flags(field_type,
 117                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
 118                   ((is_final    ? 1 : 0) << is_final_shift),
 119                   field_index);
 120   set_bytecode_1(get_code);
 121   set_bytecode_2(put_code);
 122   NOT_PRODUCT(verify(tty));
 123 }
 124 
 125 void ConstantPoolCacheEntry::set_parameter_size(int value) {
 126   // This routine is called only in corner cases where the CPCE is not yet initialized.
 127   // See AbstractInterpreter::deopt_continue_after_entry.
 128   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
 129          err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 130   // Setting the parameter size by itself is only safe if the
 131   // current value of _flags is 0, otherwise another thread may have
 132   // updated it and we don't want to overwrite that value.  Don't
 133   // bother trying to update it once it's nonzero but always make
 134   // sure that the final parameter size agrees with what was passed.
 135   if (_flags == 0) {
 136     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
 137   }
 138   guarantee(parameter_size() == value,
 139             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 140 }
 141 
 142 void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
 143                                                        methodHandle method,
 144                                                        int vtable_index) {
 145   bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
 146   assert(method->interpreter_entry() != NULL, "should have been set at this point");
 147   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 148 
 149   int byte_no = -1;
 150   bool change_to_virtual = false;
 151 
 152   switch (invoke_code) {
 153     case Bytecodes::_invokeinterface:
 154       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
 155       // instruction somehow links to a non-interface method (in Object).
 156       // In that case, the method has no itable index and must be invoked as a virtual.
 157       // Set a flag to keep track of this corner case.
 158       change_to_virtual = true;
 159 


 576   }
 577 
 578   // Append invokedynamic entries at the end
 579   int invokedynamic_offset = inverse_index_map.length();
 580   for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
 581     int offset = i + invokedynamic_offset;
 582     ConstantPoolCacheEntry* e = entry_at(offset);
 583     int original_index = invokedynamic_inverse_index_map[i];
 584     e->initialize_entry(original_index);
 585     assert(entry_at(offset) == e, "sanity");
 586   }
 587 
 588   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
 589     const int cpci = invokedynamic_references_map[ref];
 590     if (cpci >= 0) {
 591 #ifdef ASSERT
 592       // invokedynamic and invokehandle have more entries; check if they
 593       // all point to the same constant pool cache entry.
 594       for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
 595         const int cpci_next = invokedynamic_references_map[ref + entry];
 596         assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));
 597       }
 598 #endif
 599       entry_at(cpci)->initialize_resolved_reference_index(ref);
 600       ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1;  // skip extra entries
 601     }
 602   }
 603 }
 604 
 605 #if INCLUDE_JVMTI
 606 // RedefineClasses() API support:
 607 // If any entry of this ConstantPoolCache points to any of
 608 // old_methods, replace it with the corresponding new_method.
 609 void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
 610   for (int i = 0; i < length(); i++) {
 611     ConstantPoolCacheEntry* entry = entry_at(i);
 612     Method* old_method = entry->get_interesting_method_entry(holder);
 613     if (old_method == NULL || !old_method->is_old()) {
 614       continue; // skip uninteresting entries
 615     }
 616     if (old_method->is_deleted()) {




 109                                        bool is_final,
 110                                        bool is_volatile,
 111                                        Klass* root_klass) {
 112   set_f1(field_holder());
 113   set_f2(field_offset);
 114   assert((field_index & field_index_mask) == field_index,
 115          "field index does not fit in low flag bits");
 116   set_field_flags(field_type,
 117                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
 118                   ((is_final    ? 1 : 0) << is_final_shift),
 119                   field_index);
 120   set_bytecode_1(get_code);
 121   set_bytecode_2(put_code);
 122   NOT_PRODUCT(verify(tty));
 123 }
 124 
 125 void ConstantPoolCacheEntry::set_parameter_size(int value) {
 126   // This routine is called only in corner cases where the CPCE is not yet initialized.
 127   // See AbstractInterpreter::deopt_continue_after_entry.
 128   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
 129          "size must not change: parameter_size=%d, value=%d", parameter_size(), value);
 130   // Setting the parameter size by itself is only safe if the
 131   // current value of _flags is 0, otherwise another thread may have
 132   // updated it and we don't want to overwrite that value.  Don't
 133   // bother trying to update it once it's nonzero but always make
 134   // sure that the final parameter size agrees with what was passed.
 135   if (_flags == 0) {
 136     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
 137   }
 138   guarantee(parameter_size() == value,
 139             "size must not change: parameter_size=%d, value=%d", parameter_size(), value);
 140 }
 141 
 142 void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
 143                                                        methodHandle method,
 144                                                        int vtable_index) {
 145   bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
 146   assert(method->interpreter_entry() != NULL, "should have been set at this point");
 147   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 148 
 149   int byte_no = -1;
 150   bool change_to_virtual = false;
 151 
 152   switch (invoke_code) {
 153     case Bytecodes::_invokeinterface:
 154       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
 155       // instruction somehow links to a non-interface method (in Object).
 156       // In that case, the method has no itable index and must be invoked as a virtual.
 157       // Set a flag to keep track of this corner case.
 158       change_to_virtual = true;
 159 


 576   }
 577 
 578   // Append invokedynamic entries at the end
 579   int invokedynamic_offset = inverse_index_map.length();
 580   for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
 581     int offset = i + invokedynamic_offset;
 582     ConstantPoolCacheEntry* e = entry_at(offset);
 583     int original_index = invokedynamic_inverse_index_map[i];
 584     e->initialize_entry(original_index);
 585     assert(entry_at(offset) == e, "sanity");
 586   }
 587 
 588   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
 589     const int cpci = invokedynamic_references_map[ref];
 590     if (cpci >= 0) {
 591 #ifdef ASSERT
 592       // invokedynamic and invokehandle have more entries; check if they
 593       // all point to the same constant pool cache entry.
 594       for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
 595         const int cpci_next = invokedynamic_references_map[ref + entry];
 596         assert(cpci == cpci_next, "%d == %d", cpci, cpci_next);
 597       }
 598 #endif
 599       entry_at(cpci)->initialize_resolved_reference_index(ref);
 600       ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1;  // skip extra entries
 601     }
 602   }
 603 }
 604 
 605 #if INCLUDE_JVMTI
 606 // RedefineClasses() API support:
 607 // If any entry of this ConstantPoolCache points to any of
 608 // old_methods, replace it with the corresponding new_method.
 609 void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
 610   for (int i = 0; i < length(); i++) {
 611     ConstantPoolCacheEntry* entry = entry_at(i);
 612     Method* old_method = entry->get_interesting_method_entry(holder);
 613     if (old_method == NULL || !old_method->is_old()) {
 614       continue; // skip uninteresting entries
 615     }
 616     if (old_method->is_deleted()) {


< prev index next >