789 assert(init->outcnt() <= 2, "only a control and memory projection expected");
790 Node *ctrl_proj = init->proj_out(TypeFunc::Control);
791 if (ctrl_proj != NULL) {
792 assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
793 _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
794 }
795 Node *mem_proj = init->proj_out(TypeFunc::Memory);
796 if (mem_proj != NULL) {
797 Node *mem = init->in(TypeFunc::Memory);
798 #ifdef ASSERT
799 if (mem->is_MergeMem()) {
800 assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
801 } else {
802 assert(mem == _memproj_fallthrough, "allocation memory projection");
803 }
804 #endif
805 _igvn.replace_node(mem_proj, mem);
806 }
807 } else if (use->is_AddP()) {
808 // raw memory addresses used only by the initialization
809 _igvn.hash_delete(use);
810 _igvn.subsume_node(use, C->top());
811 } else {
812 assert(false, "only Initialize or AddP expected");
813 }
814 j -= (oc1 - _resproj->outcnt());
815 }
816 }
817 if (_fallthroughcatchproj != NULL) {
818 _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
819 }
820 if (_memproj_fallthrough != NULL) {
821 _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
822 }
823 if (_memproj_catchall != NULL) {
824 _igvn.replace_node(_memproj_catchall, C->top());
825 }
826 if (_ioproj_fallthrough != NULL) {
827 _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
828 }
829 if (_ioproj_catchall != NULL) {
830 _igvn.replace_node(_ioproj_catchall, C->top());
1274 // we end up with a call that has only 1 control projection
1275 if (_ioproj_catchall != NULL ) {
1276 for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
1277 Node *use = _ioproj_catchall->fast_out(i);
1278 _igvn.hash_delete(use);
1279 imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
1280 _igvn._worklist.push(use);
1281 // back up iterator
1282 --i;
1283 }
1284 }
1285
1286 // if we generated only a slow call, we are done
1287 if (always_slow)
1288 return;
1289
1290
1291 if (_fallthroughcatchproj != NULL) {
1292 ctrl = _fallthroughcatchproj->clone();
1293 transform_later(ctrl);
1294 _igvn.hash_delete(_fallthroughcatchproj);
1295 _igvn.subsume_node(_fallthroughcatchproj, result_region);
1296 } else {
1297 ctrl = top();
1298 }
1299 Node *slow_result;
1300 if (_resproj == NULL) {
1301 // no uses of the allocation result
1302 slow_result = top();
1303 } else {
1304 slow_result = _resproj->clone();
1305 transform_later(slow_result);
1306 _igvn.hash_delete(_resproj);
1307 _igvn.subsume_node(_resproj, result_phi_rawoop);
1308 }
1309
1310 // Plug slow-path into result merge point
1311 result_region ->init_req( slow_result_path, ctrl );
1312 result_phi_rawoop->init_req( slow_result_path, slow_result);
1313 result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
1314 transform_later(result_region);
1315 transform_later(result_phi_rawoop);
1316 transform_later(result_phi_rawmem);
1317 transform_later(result_phi_i_o);
1318 // This completes all paths into the result merge point
1319 }
1320
1321
1322 // Helper for PhaseMacroExpand::expand_allocate_common.
1323 // Initializes the newly-allocated storage.
1324 Node*
1325 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1326 Node* control, Node* rawmem, Node* object,
1327 Node* klass_node, Node* length,
1596 extract_call_projections(alock);
1597 // There are 2 projections from the lock. The lock node will
1598 // be deleted when its last use is subsumed below.
1599 assert(alock->outcnt() == 2 &&
1600 _fallthroughproj != NULL &&
1601 _memproj_fallthrough != NULL,
1602 "Unexpected projections from Lock/Unlock");
1603
1604 Node* fallthroughproj = _fallthroughproj;
1605 Node* memproj_fallthrough = _memproj_fallthrough;
1606
1607 // The memory projection from a lock/unlock is RawMem
1608 // The input to a Lock is merged memory, so extract its RawMem input
1609 // (unless the MergeMem has been optimized away.)
1610 if (alock->is_Lock()) {
1611 // Seach for MemBarAcquire node and delete it also.
1612 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
1613 assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, "");
1614 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
1615 Node* memproj = membar->proj_out(TypeFunc::Memory);
1616 _igvn.hash_delete(ctrlproj);
1617 _igvn.subsume_node(ctrlproj, fallthroughproj);
1618 _igvn.hash_delete(memproj);
1619 _igvn.subsume_node(memproj, memproj_fallthrough);
1620
1621 // Delete FastLock node also if this Lock node is unique user
1622 // (a loop peeling may clone a Lock node).
1623 Node* flock = alock->as_Lock()->fastlock_node();
1624 if (flock->outcnt() == 1) {
1625 assert(flock->unique_out() == alock, "sanity");
1626 _igvn.hash_delete(flock);
1627 _igvn.subsume_node(flock, top());
1628 }
1629 }
1630
1631 // Seach for MemBarRelease node and delete it also.
1632 if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
1633 ctrl->in(0)->is_MemBar()) {
1634 MemBarNode* membar = ctrl->in(0)->as_MemBar();
1635 assert(membar->Opcode() == Op_MemBarRelease &&
1636 mem->is_Proj() && membar == mem->in(0), "");
1637 _igvn.hash_delete(fallthroughproj);
1638 _igvn.subsume_node(fallthroughproj, ctrl);
1639 _igvn.hash_delete(memproj_fallthrough);
1640 _igvn.subsume_node(memproj_fallthrough, mem);
1641 fallthroughproj = ctrl;
1642 memproj_fallthrough = mem;
1643 ctrl = membar->in(TypeFunc::Control);
1644 mem = membar->in(TypeFunc::Memory);
1645 }
1646
1647 _igvn.hash_delete(fallthroughproj);
1648 _igvn.subsume_node(fallthroughproj, ctrl);
1649 _igvn.hash_delete(memproj_fallthrough);
1650 _igvn.subsume_node(memproj_fallthrough, mem);
1651 return true;
1652 }
1653
1654
1655 //------------------------------expand_lock_node----------------------
1656 void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
1657
1658 Node* ctrl = lock->in(TypeFunc::Control);
1659 Node* mem = lock->in(TypeFunc::Memory);
1660 Node* obj = lock->obj_node();
1661 Node* box = lock->box_node();
1662 Node* flock = lock->fastlock_node();
1663
1664 // Make the merge point
1665 Node *region;
1666 Node *mem_phi;
1667 Node *slow_path;
1668
1669 if (UseOptoBiasInlining) {
1670 /*
1862
1863 extract_call_projections(call);
1864
1865 // Slow path can only throw asynchronous exceptions, which are always
1866 // de-opted. So the compiler thinks the slow-call can never throw an
1867 // exception. If it DOES throw an exception we would need the debug
1868 // info removed first (since if it throws there is no monitor).
1869 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1870 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1871
1872 // Capture slow path
1873 // disconnect fall-through projection from call and create a new one
1874 // hook up users of fall-through projection to region
1875 Node *slow_ctrl = _fallthroughproj->clone();
1876 transform_later(slow_ctrl);
1877 _igvn.hash_delete(_fallthroughproj);
1878 _fallthroughproj->disconnect_inputs(NULL);
1879 region->init_req(1, slow_ctrl);
1880 // region inputs are now complete
1881 transform_later(region);
1882 _igvn.subsume_node(_fallthroughproj, region);
1883
1884 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1885 mem_phi->init_req(1, memproj );
1886 transform_later(mem_phi);
1887 _igvn.hash_delete(_memproj_fallthrough);
1888 _igvn.subsume_node(_memproj_fallthrough, mem_phi);
1889 }
1890
1891 //------------------------------expand_unlock_node----------------------
1892 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
1893
1894 Node* ctrl = unlock->in(TypeFunc::Control);
1895 Node* mem = unlock->in(TypeFunc::Memory);
1896 Node* obj = unlock->obj_node();
1897 Node* box = unlock->box_node();
1898
1899 // No need for a null check on unlock
1900
1901 // Make the merge point
1902 Node *region;
1903 Node *mem_phi;
1904
1905 if (UseOptoBiasInlining) {
1906 // Check for biased locking unlock case, which is a no-op.
1907 // See the full description in MacroAssembler::biased_locking_exit().
1908 region = new (C, 4) RegionNode(4);
1926 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
1927
1928 CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
1929
1930 extract_call_projections(call);
1931
1932 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1933 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1934
1935 // No exceptions for unlocking
1936 // Capture slow path
1937 // disconnect fall-through projection from call and create a new one
1938 // hook up users of fall-through projection to region
1939 Node *slow_ctrl = _fallthroughproj->clone();
1940 transform_later(slow_ctrl);
1941 _igvn.hash_delete(_fallthroughproj);
1942 _fallthroughproj->disconnect_inputs(NULL);
1943 region->init_req(1, slow_ctrl);
1944 // region inputs are now complete
1945 transform_later(region);
1946 _igvn.subsume_node(_fallthroughproj, region);
1947
1948 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1949 mem_phi->init_req(1, memproj );
1950 mem_phi->init_req(2, mem);
1951 transform_later(mem_phi);
1952 _igvn.hash_delete(_memproj_fallthrough);
1953 _igvn.subsume_node(_memproj_fallthrough, mem_phi);
1954 }
1955
1956 //------------------------------expand_macro_nodes----------------------
1957 // Returns true if a failure occurred.
1958 bool PhaseMacroExpand::expand_macro_nodes() {
1959 if (C->macro_count() == 0)
1960 return false;
1961 // First, attempt to eliminate locks
1962 bool progress = true;
1963 while (progress) {
1964 progress = false;
1965 for (int i = C->macro_count(); i > 0; i--) {
1966 Node * n = C->macro_node(i-1);
1967 bool success = false;
1968 debug_only(int old_macro_count = C->macro_count(););
1969 if (n->is_AbstractLock()) {
1970 success = eliminate_locking_node(n->as_AbstractLock());
1971 } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
1972 _igvn.add_users_to_worklist(n);
1973 _igvn.hash_delete(n);
1974 _igvn.subsume_node(n, n->in(1));
1975 success = true;
1976 }
1977 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
1978 progress = progress || success;
1979 }
1980 }
1981 // Next, attempt to eliminate allocations
1982 progress = true;
1983 while (progress) {
1984 progress = false;
1985 for (int i = C->macro_count(); i > 0; i--) {
1986 Node * n = C->macro_node(i-1);
1987 bool success = false;
1988 debug_only(int old_macro_count = C->macro_count(););
1989 switch (n->class_id()) {
1990 case Node::Class_Allocate:
1991 case Node::Class_AllocateArray:
1992 success = eliminate_allocate_node(n->as_Allocate());
1993 break;
1994 case Node::Class_Lock:
|
789 assert(init->outcnt() <= 2, "only a control and memory projection expected");
790 Node *ctrl_proj = init->proj_out(TypeFunc::Control);
791 if (ctrl_proj != NULL) {
792 assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
793 _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
794 }
795 Node *mem_proj = init->proj_out(TypeFunc::Memory);
796 if (mem_proj != NULL) {
797 Node *mem = init->in(TypeFunc::Memory);
798 #ifdef ASSERT
799 if (mem->is_MergeMem()) {
800 assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
801 } else {
802 assert(mem == _memproj_fallthrough, "allocation memory projection");
803 }
804 #endif
805 _igvn.replace_node(mem_proj, mem);
806 }
807 } else if (use->is_AddP()) {
808 // raw memory addresses used only by the initialization
809 _igvn.replace_node(use, C->top());
810 } else {
811 assert(false, "only Initialize or AddP expected");
812 }
813 j -= (oc1 - _resproj->outcnt());
814 }
815 }
816 if (_fallthroughcatchproj != NULL) {
817 _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
818 }
819 if (_memproj_fallthrough != NULL) {
820 _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
821 }
822 if (_memproj_catchall != NULL) {
823 _igvn.replace_node(_memproj_catchall, C->top());
824 }
825 if (_ioproj_fallthrough != NULL) {
826 _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
827 }
828 if (_ioproj_catchall != NULL) {
829 _igvn.replace_node(_ioproj_catchall, C->top());
1273 // we end up with a call that has only 1 control projection
1274 if (_ioproj_catchall != NULL ) {
1275 for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
1276 Node *use = _ioproj_catchall->fast_out(i);
1277 _igvn.hash_delete(use);
1278 imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
1279 _igvn._worklist.push(use);
1280 // back up iterator
1281 --i;
1282 }
1283 }
1284
1285 // if we generated only a slow call, we are done
1286 if (always_slow)
1287 return;
1288
1289
1290 if (_fallthroughcatchproj != NULL) {
1291 ctrl = _fallthroughcatchproj->clone();
1292 transform_later(ctrl);
1293 _igvn.replace_node(_fallthroughcatchproj, result_region);
1294 } else {
1295 ctrl = top();
1296 }
1297 Node *slow_result;
1298 if (_resproj == NULL) {
1299 // no uses of the allocation result
1300 slow_result = top();
1301 } else {
1302 slow_result = _resproj->clone();
1303 transform_later(slow_result);
1304 _igvn.replace_node(_resproj, result_phi_rawoop);
1305 }
1306
1307 // Plug slow-path into result merge point
1308 result_region ->init_req( slow_result_path, ctrl );
1309 result_phi_rawoop->init_req( slow_result_path, slow_result);
1310 result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
1311 transform_later(result_region);
1312 transform_later(result_phi_rawoop);
1313 transform_later(result_phi_rawmem);
1314 transform_later(result_phi_i_o);
1315 // This completes all paths into the result merge point
1316 }
1317
1318
1319 // Helper for PhaseMacroExpand::expand_allocate_common.
1320 // Initializes the newly-allocated storage.
1321 Node*
1322 PhaseMacroExpand::initialize_object(AllocateNode* alloc,
1323 Node* control, Node* rawmem, Node* object,
1324 Node* klass_node, Node* length,
1593 extract_call_projections(alock);
1594 // There are 2 projections from the lock. The lock node will
1595 // be deleted when its last use is subsumed below.
1596 assert(alock->outcnt() == 2 &&
1597 _fallthroughproj != NULL &&
1598 _memproj_fallthrough != NULL,
1599 "Unexpected projections from Lock/Unlock");
1600
1601 Node* fallthroughproj = _fallthroughproj;
1602 Node* memproj_fallthrough = _memproj_fallthrough;
1603
1604 // The memory projection from a lock/unlock is RawMem
1605 // The input to a Lock is merged memory, so extract its RawMem input
1606 // (unless the MergeMem has been optimized away.)
1607 if (alock->is_Lock()) {
1608 // Seach for MemBarAcquire node and delete it also.
1609 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
1610 assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, "");
1611 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
1612 Node* memproj = membar->proj_out(TypeFunc::Memory);
1613 _igvn.replace_node(ctrlproj, fallthroughproj);
1614 _igvn.replace_node(memproj, memproj_fallthrough);
1615
1616 // Delete FastLock node also if this Lock node is unique user
1617 // (a loop peeling may clone a Lock node).
1618 Node* flock = alock->as_Lock()->fastlock_node();
1619 if (flock->outcnt() == 1) {
1620 assert(flock->unique_out() == alock, "sanity");
1621 _igvn.replace_node(flock, top());
1622 }
1623 }
1624
1625 // Seach for MemBarRelease node and delete it also.
1626 if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
1627 ctrl->in(0)->is_MemBar()) {
1628 MemBarNode* membar = ctrl->in(0)->as_MemBar();
1629 assert(membar->Opcode() == Op_MemBarRelease &&
1630 mem->is_Proj() && membar == mem->in(0), "");
1631 _igvn.replace_node(fallthroughproj, ctrl);
1632 _igvn.replace_node(memproj_fallthrough, mem);
1633 fallthroughproj = ctrl;
1634 memproj_fallthrough = mem;
1635 ctrl = membar->in(TypeFunc::Control);
1636 mem = membar->in(TypeFunc::Memory);
1637 }
1638
1639 _igvn.replace_node(fallthroughproj, ctrl);
1640 _igvn.replace_node(memproj_fallthrough, mem);
1641 return true;
1642 }
1643
1644
1645 //------------------------------expand_lock_node----------------------
1646 void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
1647
1648 Node* ctrl = lock->in(TypeFunc::Control);
1649 Node* mem = lock->in(TypeFunc::Memory);
1650 Node* obj = lock->obj_node();
1651 Node* box = lock->box_node();
1652 Node* flock = lock->fastlock_node();
1653
1654 // Make the merge point
1655 Node *region;
1656 Node *mem_phi;
1657 Node *slow_path;
1658
1659 if (UseOptoBiasInlining) {
1660 /*
1852
1853 extract_call_projections(call);
1854
1855 // Slow path can only throw asynchronous exceptions, which are always
1856 // de-opted. So the compiler thinks the slow-call can never throw an
1857 // exception. If it DOES throw an exception we would need the debug
1858 // info removed first (since if it throws there is no monitor).
1859 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1860 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1861
1862 // Capture slow path
1863 // disconnect fall-through projection from call and create a new one
1864 // hook up users of fall-through projection to region
1865 Node *slow_ctrl = _fallthroughproj->clone();
1866 transform_later(slow_ctrl);
1867 _igvn.hash_delete(_fallthroughproj);
1868 _fallthroughproj->disconnect_inputs(NULL);
1869 region->init_req(1, slow_ctrl);
1870 // region inputs are now complete
1871 transform_later(region);
1872 _igvn.replace_node(_fallthroughproj, region);
1873
1874 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1875 mem_phi->init_req(1, memproj );
1876 transform_later(mem_phi);
1877 _igvn.replace_node(_memproj_fallthrough, mem_phi);
1878 }
1879
1880 //------------------------------expand_unlock_node----------------------
1881 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
1882
1883 Node* ctrl = unlock->in(TypeFunc::Control);
1884 Node* mem = unlock->in(TypeFunc::Memory);
1885 Node* obj = unlock->obj_node();
1886 Node* box = unlock->box_node();
1887
1888 // No need for a null check on unlock
1889
1890 // Make the merge point
1891 Node *region;
1892 Node *mem_phi;
1893
1894 if (UseOptoBiasInlining) {
1895 // Check for biased locking unlock case, which is a no-op.
1896 // See the full description in MacroAssembler::biased_locking_exit().
1897 region = new (C, 4) RegionNode(4);
1915 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
1916
1917 CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
1918
1919 extract_call_projections(call);
1920
1921 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
1922 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
1923
1924 // No exceptions for unlocking
1925 // Capture slow path
1926 // disconnect fall-through projection from call and create a new one
1927 // hook up users of fall-through projection to region
1928 Node *slow_ctrl = _fallthroughproj->clone();
1929 transform_later(slow_ctrl);
1930 _igvn.hash_delete(_fallthroughproj);
1931 _fallthroughproj->disconnect_inputs(NULL);
1932 region->init_req(1, slow_ctrl);
1933 // region inputs are now complete
1934 transform_later(region);
1935 _igvn.replace_node(_fallthroughproj, region);
1936
1937 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) );
1938 mem_phi->init_req(1, memproj );
1939 mem_phi->init_req(2, mem);
1940 transform_later(mem_phi);
1941 _igvn.replace_node(_memproj_fallthrough, mem_phi);
1942 }
1943
1944 //------------------------------expand_macro_nodes----------------------
1945 // Returns true if a failure occurred.
1946 bool PhaseMacroExpand::expand_macro_nodes() {
1947 if (C->macro_count() == 0)
1948 return false;
1949 // First, attempt to eliminate locks
1950 bool progress = true;
1951 while (progress) {
1952 progress = false;
1953 for (int i = C->macro_count(); i > 0; i--) {
1954 Node * n = C->macro_node(i-1);
1955 bool success = false;
1956 debug_only(int old_macro_count = C->macro_count(););
1957 if (n->is_AbstractLock()) {
1958 success = eliminate_locking_node(n->as_AbstractLock());
1959 } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
1960 _igvn.replace_node(n, n->in(1));
1961 success = true;
1962 }
1963 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
1964 progress = progress || success;
1965 }
1966 }
1967 // Next, attempt to eliminate allocations
1968 progress = true;
1969 while (progress) {
1970 progress = false;
1971 for (int i = C->macro_count(); i > 0; i--) {
1972 Node * n = C->macro_node(i-1);
1973 bool success = false;
1974 debug_only(int old_macro_count = C->macro_count(););
1975 switch (n->class_id()) {
1976 case Node::Class_Allocate:
1977 case Node::Class_AllocateArray:
1978 success = eliminate_allocate_node(n->as_Allocate());
1979 break;
1980 case Node::Class_Lock:
|