src/share/vm/interpreter/rewriter.cpp

Print this page




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/bytecodes.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/rewriter.hpp"

  29 #include "memory/gcLocker.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/generateOopMap.hpp"
  32 #include "prims/methodHandles.hpp"
  33 
  34 // Computes a CPC map (new_index -> original_index) for constant pool entries
  35 // that are referred to by the interpreter at runtime via the constant pool cache.
  36 // Also computes a CP map (original_index -> new_index).
  37 // Marks entries in CP which require additional processing.
  38 void Rewriter::compute_index_maps() {
  39   const int length  = _pool->length();
  40   init_maps(length);
  41   bool saw_mh_symbol = false;
  42   for (int i = 0; i < length; i++) {
  43     int tag = _pool->tag_at(i).value();
  44     switch (tag) {
  45       case JVM_CONSTANT_InterfaceMethodref:
  46       case JVM_CONSTANT_Fieldref          : // fall through
  47       case JVM_CONSTANT_Methodref         : // fall through
  48         add_cp_cache_entry(i);


 125         if (bcs.get_index() != 0) continue;
 126 
 127         // fall through
 128       case Bytecodes::_istore_0:
 129       case Bytecodes::_lstore_0:
 130       case Bytecodes::_fstore_0:
 131       case Bytecodes::_dstore_0:
 132       case Bytecodes::_astore_0:
 133         THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
 134                   "can't overwrite local 0 in Object.<init>");
 135         break;
 136     }
 137   }
 138 }
 139 
 140 
 141 // Rewrite a classfile-order CP index into a native-order CPC index.
 142 void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
 143   address p = bcp + offset;
 144   if (!reverse) {


 145     int  cp_index    = Bytes::get_Java_u2(p);
 146     int  cache_index = cp_entry_to_cp_cache(cp_index);
 147     Bytes::put_native_u2(p, cache_index);
 148     if (!_method_handle_invokers.is_empty())
 149       maybe_rewrite_invokehandle(p - 1, cp_index, cache_index, reverse);
 150   } else {
 151     int cache_index = Bytes::get_native_u2(p);
 152     int pool_index = cp_cache_entry_pool_index(cache_index);
 153     Bytes::put_Java_u2(p, pool_index);
 154     if (!_method_handle_invokers.is_empty())
 155       maybe_rewrite_invokehandle(p - 1, pool_index, cache_index, reverse);
 156   }
 157 }
 158 
 159 // If the constant pool entry for invokespecial is InterfaceMethodref,
 160 // we need to add a separate cpCache entry for its resolution, because it is
 161 // different than the resolution for invokeinterface with InterfaceMethodref.
 162 // These cannot share cpCache entries.  It's unclear if all invokespecial to
 163 // InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
 164 // is created for each one.  This was added with lambda.
 165 void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error) {
 166   address p = bcp + offset;
 167   if (!reverse) {


 168     int cp_index = Bytes::get_Java_u2(p);
 169     if (_pool->tag_at(cp_index).is_interface_method()) {
 170     int cache_index = add_invokespecial_cp_cache_entry(cp_index);
 171     if (cache_index != (int)(jushort) cache_index) {
 172       *invokespecial_error = true;
 173     }
 174     Bytes::put_native_u2(p, cache_index);
 175   } else {
 176       rewrite_member_reference(bcp, offset, reverse);
 177     }
 178   } else {
 179     rewrite_member_reference(bcp, offset, reverse);
 180   }
 181 }
 182 
 183 
 184 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
 185 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
 186   if (!reverse) {
 187     if ((*opc) == (u1)Bytecodes::_invokevirtual ||


 276 
 277       // invokedynamic resolved references map also points to cp cache and must
 278       // add delta to each.
 279       int resolved_index = _patch_invokedynamic_refs->at(i);
 280       for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
 281         assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
 282              "should be the same index");
 283         _invokedynamic_references_map.at_put(resolved_index+entry,
 284                                              cache_index + delta);
 285       }
 286     }
 287   }
 288 }
 289 
 290 
 291 // Rewrite some ldc bytecodes to _fast_aldc
 292 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
 293                                  bool reverse) {
 294   if (!reverse) {
 295     assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");


 296     address p = bcp + offset;
 297     int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
 298     constantTag tag = _pool->tag_at(cp_index).value();
 299     if (tag.is_method_handle() || tag.is_method_type() || tag.is_string()) {
 300       int ref_index = cp_entry_to_resolved_references(cp_index);
 301       if (is_wide) {
 302         (*bcp) = Bytecodes::_fast_aldc_w;
 303         assert(ref_index == (u2)ref_index, "index overflow");
 304         Bytes::put_native_u2(p, ref_index);
 305       } else {
 306         (*bcp) = Bytecodes::_fast_aldc;
 307         assert(ref_index == (u1)ref_index, "index overflow");
 308         (*p) = (u1)ref_index;
 309       }
 310     }
 311   } else {
 312     Bytecodes::Code rewritten_bc =
 313               (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
 314     if ((*bcp) == rewritten_bc) {
 315       address p = bcp + offset;


 357       // a length of zero, meaning we need to make another method
 358       // call to calculate the length.
 359       bc_length = Bytecodes::length_for(c);
 360       if (bc_length == 0) {
 361         bc_length = Bytecodes::length_at(method, bcp);
 362 
 363         // length_at will put us at the bytecode after the one modified
 364         // by 'wide'. We don't currently examine any of the bytecodes
 365         // modified by wide, but in case we do in the future...
 366         if (c == Bytecodes::_wide) {
 367           prefix_length = 1;
 368           c = (Bytecodes::Code)bcp[1];
 369         }
 370       }
 371 
 372       assert(bc_length != 0, "impossible bytecode length");
 373 
 374       switch (c) {
 375         case Bytecodes::_lookupswitch   : {
 376 #ifndef CC_INTERP


 377           Bytecode_lookupswitch bc(method, bcp);
 378           (*bcp) = (
 379             bc.number_of_pairs() < BinarySwitchThreshold
 380             ? Bytecodes::_fast_linearswitch
 381             : Bytecodes::_fast_binaryswitch
 382           );
 383 #endif
 384           break;
 385         }
 386         case Bytecodes::_fast_linearswitch:
 387         case Bytecodes::_fast_binaryswitch: {
 388 #ifndef CC_INTERP
 389           (*bcp) = Bytecodes::_lookupswitch;
 390 #endif
 391           break;
 392         }
 393 
 394         case Bytecodes::_invokespecial  : {
 395           rewrite_invokespecial(bcp, prefix_length+1, reverse, invokespecial_error);
 396           break;
 397         }
 398 
 399         case Bytecodes::_getstatic      : // fall through
 400         case Bytecodes::_putstatic      : // fall through
 401         case Bytecodes::_getfield       : // fall through
 402         case Bytecodes::_putfield       : // fall through
 403         case Bytecodes::_invokevirtual  : // fall through



 404         case Bytecodes::_invokestatic   :
 405         case Bytecodes::_invokeinterface:
 406         case Bytecodes::_invokehandle   : // if reverse=true
 407           rewrite_member_reference(bcp, prefix_length+1, reverse);
 408           break;
 409         case Bytecodes::_invokedynamic:


 410           rewrite_invokedynamic(bcp, prefix_length+1, reverse);
 411           break;
 412         case Bytecodes::_ldc:
 413         case Bytecodes::_fast_aldc:  // if reverse=true
 414           maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
 415           break;
 416         case Bytecodes::_ldc_w:
 417         case Bytecodes::_fast_aldc_w:  // if reverse=true
 418           maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
 419           break;
 420         case Bytecodes::_jsr            : // fall through
 421         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
 422         case Bytecodes::_monitorenter   : // fall through
 423         case Bytecodes::_monitorexit    : has_monitor_bytecodes = true; break;
 424       }
 425     }
 426   }
 427 
 428   // Update access flags
 429   if (has_monitor_bytecodes) {




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/bytecodes.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/rewriter.hpp"
  29 #include "memory/metaspaceShared.hpp"
  30 #include "memory/gcLocker.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/generateOopMap.hpp"
  33 #include "prims/methodHandles.hpp"
  34 
  35 // Computes a CPC map (new_index -> original_index) for constant pool entries
  36 // that are referred to by the interpreter at runtime via the constant pool cache.
  37 // Also computes a CP map (original_index -> new_index).
  38 // Marks entries in CP which require additional processing.
  39 void Rewriter::compute_index_maps() {
  40   const int length  = _pool->length();
  41   init_maps(length);
  42   bool saw_mh_symbol = false;
  43   for (int i = 0; i < length; i++) {
  44     int tag = _pool->tag_at(i).value();
  45     switch (tag) {
  46       case JVM_CONSTANT_InterfaceMethodref:
  47       case JVM_CONSTANT_Fieldref          : // fall through
  48       case JVM_CONSTANT_Methodref         : // fall through
  49         add_cp_cache_entry(i);


 126         if (bcs.get_index() != 0) continue;
 127 
 128         // fall through
 129       case Bytecodes::_istore_0:
 130       case Bytecodes::_lstore_0:
 131       case Bytecodes::_fstore_0:
 132       case Bytecodes::_dstore_0:
 133       case Bytecodes::_astore_0:
 134         THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
 135                   "can't overwrite local 0 in Object.<init>");
 136         break;
 137     }
 138   }
 139 }
 140 
 141 
 142 // Rewrite a classfile-order CP index into a native-order CPC index.
 143 void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
 144   address p = bcp + offset;
 145   if (!reverse) {
 146     assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 147            "rewirting to _fast_xxxx for archived methods should only happen at dump time");
 148     int  cp_index    = Bytes::get_Java_u2(p);
 149     int  cache_index = cp_entry_to_cp_cache(cp_index);
 150     Bytes::put_native_u2(p, cache_index);
 151     if (!_method_handle_invokers.is_empty())
 152       maybe_rewrite_invokehandle(p - 1, cp_index, cache_index, reverse);
 153   } else {
 154     int cache_index = Bytes::get_native_u2(p);
 155     int pool_index = cp_cache_entry_pool_index(cache_index);
 156     Bytes::put_Java_u2(p, pool_index);
 157     if (!_method_handle_invokers.is_empty())
 158       maybe_rewrite_invokehandle(p - 1, pool_index, cache_index, reverse);
 159   }
 160 }
 161 
 162 // If the constant pool entry for invokespecial is InterfaceMethodref,
 163 // we need to add a separate cpCache entry for its resolution, because it is
 164 // different than the resolution for invokeinterface with InterfaceMethodref.
 165 // These cannot share cpCache entries.  It's unclear if all invokespecial to
 166 // InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
 167 // is created for each one.  This was added with lambda.
 168 void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error) {
 169   address p = bcp + offset;
 170   if (!reverse) {
 171     assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 172            "rewirting to _fast_invokevfinal for archived methods should only happen at dump time");
 173     int cp_index = Bytes::get_Java_u2(p);
 174     if (_pool->tag_at(cp_index).is_interface_method()) {
 175       int cache_index = add_invokespecial_cp_cache_entry(cp_index);
 176       if (cache_index != (int)(jushort) cache_index) {
 177         *invokespecial_error = true;
 178       }
 179       Bytes::put_native_u2(p, cache_index);
 180     } else {
 181       rewrite_member_reference(bcp, offset, reverse);
 182     }
 183   } else {
 184     rewrite_member_reference(bcp, offset, reverse);
 185   }
 186 }
 187 
 188 
 189 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
 190 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
 191   if (!reverse) {
 192     if ((*opc) == (u1)Bytecodes::_invokevirtual ||


 281 
 282       // invokedynamic resolved references map also points to cp cache and must
 283       // add delta to each.
 284       int resolved_index = _patch_invokedynamic_refs->at(i);
 285       for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
 286         assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
 287              "should be the same index");
 288         _invokedynamic_references_map.at_put(resolved_index+entry,
 289                                              cache_index + delta);
 290       }
 291     }
 292   }
 293 }
 294 
 295 
 296 // Rewrite some ldc bytecodes to _fast_aldc
 297 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
 298                                  bool reverse) {
 299   if (!reverse) {
 300     assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
 301     assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 302            "rewirting to _fast_aldc or _fast_aldc_w for archived methods should only happen at dump time");
 303     address p = bcp + offset;
 304     int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
 305     constantTag tag = _pool->tag_at(cp_index).value();
 306     if (tag.is_method_handle() || tag.is_method_type() || tag.is_string()) {
 307       int ref_index = cp_entry_to_resolved_references(cp_index);
 308       if (is_wide) {
 309         (*bcp) = Bytecodes::_fast_aldc_w;
 310         assert(ref_index == (u2)ref_index, "index overflow");
 311         Bytes::put_native_u2(p, ref_index);
 312       } else {
 313         (*bcp) = Bytecodes::_fast_aldc;
 314         assert(ref_index == (u1)ref_index, "index overflow");
 315         (*p) = (u1)ref_index;
 316       }
 317     }
 318   } else {
 319     Bytecodes::Code rewritten_bc =
 320               (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
 321     if ((*bcp) == rewritten_bc) {
 322       address p = bcp + offset;


 364       // a length of zero, meaning we need to make another method
 365       // call to calculate the length.
 366       bc_length = Bytecodes::length_for(c);
 367       if (bc_length == 0) {
 368         bc_length = Bytecodes::length_at(method, bcp);
 369 
 370         // length_at will put us at the bytecode after the one modified
 371         // by 'wide'. We don't currently examine any of the bytecodes
 372         // modified by wide, but in case we do in the future...
 373         if (c == Bytecodes::_wide) {
 374           prefix_length = 1;
 375           c = (Bytecodes::Code)bcp[1];
 376         }
 377       }
 378 
 379       assert(bc_length != 0, "impossible bytecode length");
 380 
 381       switch (c) {
 382         case Bytecodes::_lookupswitch   : {
 383 #ifndef CC_INTERP
 384           assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 385                  "rewirting to _fast_xxxswitch for archived methods should only happen at dump time");
 386           Bytecode_lookupswitch bc(method, bcp);
 387           (*bcp) = (
 388             bc.number_of_pairs() < BinarySwitchThreshold
 389             ? Bytecodes::_fast_linearswitch
 390             : Bytecodes::_fast_binaryswitch
 391           );
 392 #endif
 393           break;
 394         }
 395         case Bytecodes::_fast_linearswitch:
 396         case Bytecodes::_fast_binaryswitch: {
 397 #ifndef CC_INTERP
 398           (*bcp) = Bytecodes::_lookupswitch;
 399 #endif
 400           break;
 401         }
 402 
 403         case Bytecodes::_invokespecial  : {
 404           rewrite_invokespecial(bcp, prefix_length+1, reverse, invokespecial_error);
 405           break;
 406         }
 407 
 408         case Bytecodes::_getstatic      : // fall through
 409         case Bytecodes::_putstatic      : // fall through
 410         case Bytecodes::_getfield       : // fall through
 411         case Bytecodes::_putfield       : // fall through
 412         case Bytecodes::_invokevirtual  : // fall through
 413           assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 414                  "rewirting to _fast_getXXX/putXXX or _fast_invokeXXX for archived methods should"
 415                  " only happen at dump time");
 416         case Bytecodes::_invokestatic   :
 417         case Bytecodes::_invokeinterface:
 418         case Bytecodes::_invokehandle   : // if reverse=true
 419           rewrite_member_reference(bcp, prefix_length+1, reverse);
 420           break;
 421         case Bytecodes::_invokedynamic:
 422           assert(DumpSharedSpaces || !MetaspaceShared::is_in_shared_space(bcp),
 423                  "rewirting _invoke_dynamic for archived methods should only happen at dump time");
 424           rewrite_invokedynamic(bcp, prefix_length+1, reverse);
 425           break;
 426         case Bytecodes::_ldc:
 427         case Bytecodes::_fast_aldc:  // if reverse=true
 428           maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
 429           break;
 430         case Bytecodes::_ldc_w:
 431         case Bytecodes::_fast_aldc_w:  // if reverse=true
 432           maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
 433           break;
 434         case Bytecodes::_jsr            : // fall through
 435         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
 436         case Bytecodes::_monitorenter   : // fall through
 437         case Bytecodes::_monitorexit    : has_monitor_bytecodes = true; break;
 438       }
 439     }
 440   }
 441 
 442   // Update access flags
 443   if (has_monitor_bytecodes) {