src/share/vm/opto/parse1.cpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.


  89     BytecodeParseHistogram::print();
  90   }
  91 }
  92 #endif
  93 
  94 //------------------------------ON STACK REPLACEMENT---------------------------
  95 
  96 // Construct a node which can be used to get incoming state for
  97 // on stack replacement.
  98 Node *Parse::fetch_interpreter_state(int index,
  99                                      BasicType bt,
 100                                      Node *local_addrs,
 101                                      Node *local_addrs_base) {
 102   Node *mem = memory(Compile::AliasIdxRaw);
 103   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 104   Node *ctl = control();
 105 
 106   // Very similar to LoadNode::make, except we handle un-aligned longs and
 107   // doubles on Sparc.  Intel can handle them just fine directly.
 108   Node *l;
 109   switch( bt ) {                // Signature is flattened
 110   case T_INT:     l = new (C) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
 111   case T_FLOAT:   l = new (C) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
 112   case T_ADDRESS: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
 113   case T_OBJECT:  l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
 114   case T_LONG:
 115   case T_DOUBLE: {
 116     // Since arguments are in reverse order, the argument address 'adr'
 117     // refers to the back half of the long/double.  Recompute adr.
 118     adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
 119     if( Matcher::misaligned_doubles_ok ) {
 120       l = (bt == T_DOUBLE)
 121         ? (Node*)new (C) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
 122         : (Node*)new (C) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
 123     } else {
 124       l = (bt == T_DOUBLE)
 125         ? (Node*)new (C) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
 126         : (Node*)new (C) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
 127     }
 128     break;
 129   }
 130   default: ShouldNotReachHere();
 131   }
 132   return _gvn.transform(l);
 133 }
 134 
 135 // Helper routine to prevent the interpreter from handing
 136 // unexpected typestate to an OSR method.
 137 // The Node l is a value newly dug out of the interpreter frame.
 138 // The type is the type predicted by ciTypeFlow.  Note that it is
 139 // not a general type, but can only come from Type::get_typeflow_type.
 140 // The safepoint is a map which will feed an uncommon trap.
 141 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 142                                     SafePointNode* &bad_type_exit) {
 143 
 144   const TypeOopPtr* tp = type->isa_oopptr();
 145 
 146   // TypeFlow may assert null-ness if a type appears unloaded.


 212     return;
 213   }
 214 
 215   // Commute monitors from interpreter frame to compiler frame.
 216   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 217   int mcnt = osr_block->flow()->monitor_count();
 218   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 219   for (index = 0; index < mcnt; index++) {
 220     // Make a BoxLockNode for the monitor.
 221     Node *box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
 222 
 223 
 224     // Displaced headers and locked objects are interleaved in the
 225     // temp OSR buffer.  We only copy the locked objects out here.
 226     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 227     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
 228     // Try and copy the displaced header to the BoxNode
 229     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 230 
 231 
 232     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw);
 233 
 234     // Build a bogus FastLockNode (no code will be generated) and push the
 235     // monitor into our debug info.
 236     const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock();
 237     map()->push_monitor(flock);
 238 
 239     // If the lock is our method synchronization lock, tuck it away in
 240     // _sync_lock for return and rethrow exit paths.
 241     if (index == 0 && method()->is_synchronized()) {
 242       _synch_lock = flock;
 243     }
 244   }
 245 
 246   // Use the raw liveness computation to make sure that unexpected
 247   // values don't propagate into the OSR frame.
 248   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 249   if (!live_locals.is_valid()) {
 250     // Degenerate or breakpointed method.
 251     C->record_method_not_compilable("OSR in empty or breakpointed method");
 252     return;


1914          "must have non-null instance type");
1915 
1916   const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
1917   if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
1918     // The type isn't known exactly so see if CHA tells us anything.
1919     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
1920     if (!Dependencies::has_finalizable_subclass(ik)) {
1921       // No finalizable subclasses so skip the dynamic check.
1922       C->dependencies()->assert_has_no_finalizable_subclasses(ik);
1923       return;
1924     }
1925   }
1926 
1927   // Insert a dynamic test for whether the instance needs
1928   // finalization.  In general this will fold up since the concrete
1929   // class is often visible so the access flags are constant.
1930   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
1931   Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
1932 
1933   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
1934   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
1935 
1936   Node* mask  = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
1937   Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
1938   Node* test  = _gvn.transform(new (C) BoolNode(check, BoolTest::ne));
1939 
1940   IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
1941 
1942   RegionNode* result_rgn = new (C) RegionNode(3);
1943   record_for_igvn(result_rgn);
1944 
1945   Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff));
1946   result_rgn->init_req(1, skip_register);
1947 
1948   Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff));
1949   set_control(needs_register);
1950   if (stopped()) {
1951     // There is no slow path.
1952     result_rgn->init_req(2, top());
1953   } else {
1954     Node *call = make_runtime_call(RC_NO_LEAF,




  89     BytecodeParseHistogram::print();
  90   }
  91 }
  92 #endif
  93 
  94 //------------------------------ON STACK REPLACEMENT---------------------------
  95 
  96 // Construct a node which can be used to get incoming state for
  97 // on stack replacement.
  98 Node *Parse::fetch_interpreter_state(int index,
  99                                      BasicType bt,
 100                                      Node *local_addrs,
 101                                      Node *local_addrs_base) {
 102   Node *mem = memory(Compile::AliasIdxRaw);
 103   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 104   Node *ctl = control();
 105 
 106   // Very similar to LoadNode::make, except we handle un-aligned longs and
 107   // doubles on Sparc.  Intel can handle them just fine directly.
 108   Node *l;
 109   switch (bt) {                // Signature is flattened
 110   case T_INT:     l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        LoadNode::unordered); break;
 111   case T_FLOAT:   l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         LoadNode::unordered); break;
 112   case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  LoadNode::unordered); break;
 113   case T_OBJECT:  l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, LoadNode::unordered); break;
 114   case T_LONG:
 115   case T_DOUBLE: {
 116     // Since arguments are in reverse order, the argument address 'adr'
 117     // refers to the back half of the long/double.  Recompute adr.
 118     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 119     if (Matcher::misaligned_doubles_ok) {
 120       l = (bt == T_DOUBLE)
 121         ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, LoadNode::unordered)
 122         : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, false, LoadNode::unordered);
 123     } else {
 124       l = (bt == T_DOUBLE)
 125         ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, LoadNode::unordered)
 126         : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, LoadNode::unordered);
 127     }
 128     break;
 129   }
 130   default: ShouldNotReachHere();
 131   }
 132   return _gvn.transform(l);
 133 }
 134 
 135 // Helper routine to prevent the interpreter from handing
 136 // unexpected typestate to an OSR method.
 137 // The Node l is a value newly dug out of the interpreter frame.
 138 // The type is the type predicted by ciTypeFlow.  Note that it is
 139 // not a general type, but can only come from Type::get_typeflow_type.
 140 // The safepoint is a map which will feed an uncommon trap.
 141 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 142                                     SafePointNode* &bad_type_exit) {
 143 
 144   const TypeOopPtr* tp = type->isa_oopptr();
 145 
 146   // TypeFlow may assert null-ness if a type appears unloaded.


 212     return;
 213   }
 214 
 215   // Commute monitors from interpreter frame to compiler frame.
 216   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 217   int mcnt = osr_block->flow()->monitor_count();
 218   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 219   for (index = 0; index < mcnt; index++) {
 220     // Make a BoxLockNode for the monitor.
 221     Node *box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
 222 
 223 
 224     // Displaced headers and locked objects are interleaved in the
 225     // temp OSR buffer.  We only copy the locked objects out here.
 226     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 227     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
 228     // Try and copy the displaced header to the BoxNode
 229     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 230 
 231 
 232     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, false, StoreNode::unordered);
 233 
 234     // Build a bogus FastLockNode (no code will be generated) and push the
 235     // monitor into our debug info.
 236     const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock();
 237     map()->push_monitor(flock);
 238 
 239     // If the lock is our method synchronization lock, tuck it away in
 240     // _sync_lock for return and rethrow exit paths.
 241     if (index == 0 && method()->is_synchronized()) {
 242       _synch_lock = flock;
 243     }
 244   }
 245 
 246   // Use the raw liveness computation to make sure that unexpected
 247   // values don't propagate into the OSR frame.
 248   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 249   if (!live_locals.is_valid()) {
 250     // Degenerate or breakpointed method.
 251     C->record_method_not_compilable("OSR in empty or breakpointed method");
 252     return;


1914          "must have non-null instance type");
1915 
1916   const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
1917   if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
1918     // The type isn't known exactly so see if CHA tells us anything.
1919     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
1920     if (!Dependencies::has_finalizable_subclass(ik)) {
1921       // No finalizable subclasses so skip the dynamic check.
1922       C->dependencies()->assert_has_no_finalizable_subclasses(ik);
1923       return;
1924     }
1925   }
1926 
1927   // Insert a dynamic test for whether the instance needs
1928   // finalization.  In general this will fold up since the concrete
1929   // class is often visible so the access flags are constant.
1930   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
1931   Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
1932 
1933   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
1934   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, false, LoadNode::unordered);
1935 
1936   Node* mask  = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
1937   Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
1938   Node* test  = _gvn.transform(new (C) BoolNode(check, BoolTest::ne));
1939 
1940   IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
1941 
1942   RegionNode* result_rgn = new (C) RegionNode(3);
1943   record_for_igvn(result_rgn);
1944 
1945   Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff));
1946   result_rgn->init_req(1, skip_register);
1947 
1948   Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff));
1949   set_control(needs_register);
1950   if (stopped()) {
1951     // There is no slow path.
1952     result_rgn->init_req(2, top());
1953   } else {
1954     Node *call = make_runtime_call(RC_NO_LEAF,