< prev index next >

src/share/vm/opto/macro.cpp

Print this page




1109   if (_fallthroughcatchproj != NULL) {
1110     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
1111   }
1112   if (_memproj_fallthrough != NULL) {
1113     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
1114   }
1115   if (_memproj_catchall != NULL) {
1116     _igvn.replace_node(_memproj_catchall, C->top());
1117   }
1118   if (_ioproj_fallthrough != NULL) {
1119     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
1120   }
1121   if (_ioproj_catchall != NULL) {
1122     _igvn.replace_node(_ioproj_catchall, C->top());
1123   }
1124   if (_catchallcatchproj != NULL) {
1125     _igvn.replace_node(_catchallcatchproj, C->top());
1126   }
1127 }
1128 






























































1129 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1130   // Don't do scalar replacement if the frame can be popped by JVMTI:
1131   // if reallocation fails during deoptimization we'll pop all
1132   // interpreter frames for this compiled frame and that won't play
1133   // nice with JVMTI popframe.
1134   if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
1135     return false;
1136   }
1137   Node* klass = alloc->in(AllocateNode::KlassNode);
1138   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1139   Node* res = alloc->result_cast();
1140   // Eliminate boxing allocations which are not used
1141   // regardless scalar replacable status.
1142   bool boxing_alloc = C->eliminate_boxing() &&
1143                       tklass->klass()->is_instance_klass()  &&
1144                       tklass->klass()->as_instance_klass()->is_box_klass();
1145   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
1146     return false;
1147   }
1148 


1617                                             CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
1618                                             "dtrace_object_alloc",
1619                                             TypeRawPtr::BOTTOM);
1620 
1621       // Get base of thread-local storage area
1622       Node* thread = new ThreadLocalNode();
1623       transform_later(thread);
1624 
1625       call->init_req(TypeFunc::Parms+0, thread);
1626       call->init_req(TypeFunc::Parms+1, fast_oop);
1627       call->init_req(TypeFunc::Control, fast_oop_ctrl);
1628       call->init_req(TypeFunc::I_O    , top()); // does no i/o
1629       call->init_req(TypeFunc::Memory , fast_oop_rawmem);
1630       call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1631       call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1632       transform_later(call);
1633       fast_oop_ctrl = new ProjNode(call,TypeFunc::Control);
1634       transform_later(fast_oop_ctrl);
1635       fast_oop_rawmem = new ProjNode(call,TypeFunc::Memory);
1636       transform_later(fast_oop_rawmem);





















































1637     }
1638 
1639     // Plug in the successful fast-path into the result merge point
1640     result_region    ->init_req(fast_result_path, fast_oop_ctrl);
1641     result_phi_rawoop->init_req(fast_result_path, fast_oop);
1642     result_phi_i_o   ->init_req(fast_result_path, i_o);
1643     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1644   } else {
1645     slow_region = ctrl;
1646     result_phi_i_o = i_o; // Rename it to use in the following code.
1647   }
1648 
1649   // Generate slow-path call
1650   CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1651                                OptoRuntime::stub_name(slow_call_address),
1652                                alloc->jvms()->bci(),
1653                                TypePtr::BOTTOM);
1654   call->init_req( TypeFunc::Control, slow_region );
1655   call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
1656   call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs




1109   if (_fallthroughcatchproj != NULL) {
1110     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
1111   }
1112   if (_memproj_fallthrough != NULL) {
1113     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
1114   }
1115   if (_memproj_catchall != NULL) {
1116     _igvn.replace_node(_memproj_catchall, C->top());
1117   }
1118   if (_ioproj_fallthrough != NULL) {
1119     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
1120   }
1121   if (_ioproj_catchall != NULL) {
1122     _igvn.replace_node(_ioproj_catchall, C->top());
1123   }
1124   if (_catchallcatchproj != NULL) {
1125     _igvn.replace_node(_catchallcatchproj, C->top());
1126   }
1127 }
1128 
1129 void PhaseMacroExpand::conditional_sample(Node *should_sample,
1130                                           BoolTest::mask test,
1131                                           float probability,
1132                                           CallLeafNode *call,
1133                                           Node *thread,
1134                                           Node **fast_oop_ctrl,
1135                                           Node **fast_oop_rawmem,
1136                                           Node **fast_oop,
1137                                           Node *in_node) {
1138   Node* sample_cmp = new CmpXNode(should_sample, _igvn.MakeConX(0));
1139   transform_later(sample_cmp);
1140 
1141   Node *sample_bool = new BoolNode(sample_cmp, test);
1142   transform_later(sample_bool);
1143 
1144   IfNode *sample_if = new IfNode(*fast_oop_ctrl,
1145                                  sample_bool,
1146                                  probability,
1147                                  COUNT_UNKNOWN);
1148   transform_later(sample_if);
1149 
1150   // Slow-path call to sample
1151   Node *sample_true = new IfTrueNode(sample_if);
1152   transform_later(sample_true);
1153 
1154   // Fast path to no sample
1155   Node *sample_false = new IfFalseNode(sample_if);
1156   transform_later(sample_false);
1157 
1158   // Create postdominators for both the control and data flow paths.
1159   Node *sample_region = new RegionNode(3);
1160   Node *sample_phi_rawmem = new PhiNode(sample_region,
1161                                         Type::MEMORY,
1162                                         TypeRawPtr::BOTTOM);
1163 
1164   sample_region->init_req(1, sample_false);
1165   sample_phi_rawmem->init_req(1, *fast_oop_rawmem);
1166 
1167   // Invoke the sampling method on the slow path.
1168   int size = TypeFunc::Parms + 2;
1169 
1170   call->init_req(TypeFunc::Parms+0, thread);
1171   call->init_req(TypeFunc::Parms+1, *fast_oop);
1172   call->init_req( TypeFunc::Control, sample_true);
1173   call->init_req( TypeFunc::I_O    , top());   // does no i/o
1174   call->init_req( TypeFunc::Memory , *fast_oop_rawmem );
1175   call->init_req( TypeFunc::ReturnAdr, in_node->in(TypeFunc::ReturnAdr));
1176   call->init_req( TypeFunc::FramePtr, in_node->in(TypeFunc::FramePtr));
1177   transform_later(call);
1178   Node *sample_oop_rawmem = new ProjNode(call, TypeFunc::Memory);
1179   transform_later(sample_oop_rawmem);
1180 
1181   // Tie the slow path to the postdominating node.
1182   sample_region->init_req(2, sample_true);
1183   sample_phi_rawmem->init_req(2, sample_oop_rawmem);
1184   transform_later(sample_region);
1185 
1186   *fast_oop_ctrl = sample_region;
1187   *fast_oop_rawmem = sample_phi_rawmem;
1188   transform_later(*fast_oop_rawmem);
1189 }
1190 
1191 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1192   // Don't do scalar replacement if the frame can be popped by JVMTI:
1193   // if reallocation fails during deoptimization we'll pop all
1194   // interpreter frames for this compiled frame and that won't play
1195   // nice with JVMTI popframe.
1196   if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
1197     return false;
1198   }
1199   Node* klass = alloc->in(AllocateNode::KlassNode);
1200   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
1201   Node* res = alloc->result_cast();
1202   // Eliminate boxing allocations which are not used
1203   // regardless scalar replacable status.
1204   bool boxing_alloc = C->eliminate_boxing() &&
1205                       tklass->klass()->is_instance_klass()  &&
1206                       tklass->klass()->as_instance_klass()->is_box_klass();
1207   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
1208     return false;
1209   }
1210 


1679                                             CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
1680                                             "dtrace_object_alloc",
1681                                             TypeRawPtr::BOTTOM);
1682 
1683       // Get base of thread-local storage area
1684       Node* thread = new ThreadLocalNode();
1685       transform_later(thread);
1686 
1687       call->init_req(TypeFunc::Parms+0, thread);
1688       call->init_req(TypeFunc::Parms+1, fast_oop);
1689       call->init_req(TypeFunc::Control, fast_oop_ctrl);
1690       call->init_req(TypeFunc::I_O    , top()); // does no i/o
1691       call->init_req(TypeFunc::Memory , fast_oop_rawmem);
1692       call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
1693       call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
1694       transform_later(call);
1695       fast_oop_ctrl = new ProjNode(call,TypeFunc::Control);
1696       transform_later(fast_oop_ctrl);
1697       fast_oop_rawmem = new ProjNode(call,TypeFunc::Memory);
1698       transform_later(fast_oop_rawmem);
1699     }
1700 
1701     if (HeapMonitor) {
1702       // Inlined version of HeapMonitoring::object_alloc_base
1703       // Get base of thread-local storage area
1704       Node* thread = new ThreadLocalNode();
1705       transform_later(thread);
1706 
1707       ByteSize sample_offset = JavaThread::bytes_until_sample_offset();
1708 
1709       // Do test to see if we should sample.
1710       // Get bytes_until_sample from thread local storage.
1711       Node *bytes_until_sample = make_load(fast_oop_ctrl,
1712                                            fast_oop_rawmem,
1713                                            thread,
1714                                            in_bytes(sample_offset),
1715                                            TypeX_X,
1716                                            TypeX_X->basic_type());
1717 
1718       // new_bytes_until_sample = bytes_until_sample - size_in_bytes
1719       Node *new_bytes_until_sample =
1720           new SubXNode(bytes_until_sample, size_in_bytes);
1721       transform_later(new_bytes_until_sample);
1722 
1723       // bytes_until_sample = new_bytes_until_sample;
1724       fast_oop_rawmem = make_store(fast_oop_ctrl,
1725                                    fast_oop_rawmem,
1726                                    thread,
1727                                    in_bytes(sample_offset),
1728                                    new_bytes_until_sample,
1729                                    TypeX_X->basic_type());
1730 
1731       // Call to make if sampling succeeds
1732       int size = TypeFunc::Parms + 2;
1733       CallLeafNode *call = new CallLeafNode(
1734           OptoRuntime::heap_object_alloc_Type(),
1735           CAST_FROM_FN_PTR(address,
1736                            HeapMonitoring::object_alloc_do_sample),
1737           "object_alloc_do_sample",
1738           TypeRawPtr::BOTTOM);
1739 
1740       // if (new_bytes_until_sample < 0)
1741       conditional_sample(new_bytes_until_sample,
1742                          BoolTest::le,
1743                          // Probability
1744                          // ~1/10000
1745                          PROB_UNLIKELY_MAG(4),
1746                          call,
1747                          thread,
1748                          &fast_oop_ctrl,
1749                          &fast_oop_rawmem,
1750                          &fast_oop,
1751                          alloc);
1752     }
1753 
1754     // Plug in the successful fast-path into the result merge point
1755     result_region    ->init_req(fast_result_path, fast_oop_ctrl);
1756     result_phi_rawoop->init_req(fast_result_path, fast_oop);
1757     result_phi_i_o   ->init_req(fast_result_path, i_o);
1758     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
1759   } else {
1760     slow_region = ctrl;
1761     result_phi_i_o = i_o; // Rename it to use in the following code.
1762   }
1763 
1764   // Generate slow-path call
1765   CallNode *call = new CallStaticJavaNode(slow_call_type, slow_call_address,
1766                                OptoRuntime::stub_name(slow_call_address),
1767                                alloc->jvms()->bci(),
1768                                TypePtr::BOTTOM);
1769   call->init_req( TypeFunc::Control, slow_region );
1770   call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
1771   call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs


< prev index next >