1482
1483 // get (wide) offset to O1_disp
1484 const Register O1_disp = O1;
1485 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1486 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1487
1488 // Handle all the JSR stuff here, then exit.
1489 // It's much shorter and cleaner than intermingling with the
1490 // non-JSR normal-branch stuff occurring below.
1491 if( is_jsr ) {
1492 // compute return address as bci in Otos_i
1493 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1494 __ sub(Lbcp, G3_scratch, G3_scratch);
1495 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1496
1497 // Bump Lbcp to target of JSR
1498 __ add(Lbcp, O1_disp, Lbcp);
1499 // Push returnAddress for "ret" on stack
1500 __ push_ptr(Otos_i);
1501 // And away we go!
1502 __ dispatch_next(vtos);
1503 return;
1504 }
1505
1506 // Normal (non-jsr) branch handling
1507
1508 // Save the current Lbcp
1509 const Register l_cur_bcp = Lscratch;
1510 __ mov( Lbcp, l_cur_bcp );
1511
1512 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1513 if ( increment_invocation_counter_for_backward_branches ) {
1514 Label Lforward;
1515 // check branch direction
1516 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1517 // Bump bytecode pointer by displacement (take the branch)
1518 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1519
1520 const Register G3_method_counters = G3_scratch;
1521 __ get_method_counters(Lmethod, G3_method_counters, Lforward);
1522
1590 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
1591 if (UseOnStackReplacement) {
1592
1593 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
1594 }
1595 } else {
1596 if (UseOnStackReplacement) {
1597 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
1598 }
1599 }
1600 }
1601
1602 __ bind(Lforward);
1603 } else
1604 // Bump bytecode pointer by displacement (take the branch)
1605 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1606
1607 // continue with bytecode @ target
1608 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1609 // %%%%% and changing dispatch_next to dispatch_only
1610 __ dispatch_next(vtos);
1611 }
1612
1613
1614 // Note Condition in argument is TemplateTable::Condition
1615 // arg scope is within class scope
1616
1617 void TemplateTable::if_0cmp(Condition cc) {
1618 // no pointers, integer only!
1619 transition(itos, vtos);
1620 // assume branch is more often taken than not (loops use backward branches)
1621 __ cmp( Otos_i, 0);
1622 __ if_cmp(ccNot(cc), false);
1623 }
1624
1625
1626 void TemplateTable::if_icmp(Condition cc) {
1627 transition(itos, vtos);
1628 __ pop_i(O1);
1629 __ cmp(O1, Otos_i);
1630 __ if_cmp(ccNot(cc), false);
1659 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1660 // the result. The return address (really a BCI) was stored with an
1661 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1662 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1663 // loaded value.
1664 { Label zzz ;
1665 __ set (65536, G3_scratch) ;
1666 __ cmp (Otos_i, G3_scratch) ;
1667 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1668 __ delayed()->nop();
1669 __ stop("BCI is in the wrong register half?");
1670 __ bind (zzz) ;
1671 }
1672 #endif
1673
1674 __ profile_ret(vtos, Otos_i, G4_scratch);
1675
1676 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1677 __ add(G3_scratch, Otos_i, G3_scratch);
1678 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1679 __ dispatch_next(vtos);
1680 }
1681
1682
1683 void TemplateTable::wide_ret() {
1684 transition(vtos, vtos);
1685 locals_index_wide(G3_scratch);
1686 __ access_local_returnAddress(G3_scratch, Otos_i);
1687 // Otos_i contains the bci, compute the bcp from that
1688
1689 __ profile_ret(vtos, Otos_i, G4_scratch);
1690
1691 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1692 __ add(G3_scratch, Otos_i, G3_scratch);
1693 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1694 __ dispatch_next(vtos);
1695 }
1696
1697
1698 void TemplateTable::tableswitch() {
1699 transition(itos, vtos);
|
1482
1483 // get (wide) offset to O1_disp
1484 const Register O1_disp = O1;
1485 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1486 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1487
1488 // Handle all the JSR stuff here, then exit.
1489 // It's much shorter and cleaner than intermingling with the
1490 // non-JSR normal-branch stuff occurring below.
1491 if( is_jsr ) {
1492 // compute return address as bci in Otos_i
1493 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1494 __ sub(Lbcp, G3_scratch, G3_scratch);
1495 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1496
1497 // Bump Lbcp to target of JSR
1498 __ add(Lbcp, O1_disp, Lbcp);
1499 // Push returnAddress for "ret" on stack
1500 __ push_ptr(Otos_i);
1501 // And away we go!
1502 __ dispatch_next(vtos, 0, true);
1503 return;
1504 }
1505
1506 // Normal (non-jsr) branch handling
1507
1508 // Save the current Lbcp
1509 const Register l_cur_bcp = Lscratch;
1510 __ mov( Lbcp, l_cur_bcp );
1511
1512 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1513 if ( increment_invocation_counter_for_backward_branches ) {
1514 Label Lforward;
1515 // check branch direction
1516 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1517 // Bump bytecode pointer by displacement (take the branch)
1518 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1519
1520 const Register G3_method_counters = G3_scratch;
1521 __ get_method_counters(Lmethod, G3_method_counters, Lforward);
1522
1590 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
1591 if (UseOnStackReplacement) {
1592
1593 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
1594 }
1595 } else {
1596 if (UseOnStackReplacement) {
1597 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
1598 }
1599 }
1600 }
1601
1602 __ bind(Lforward);
1603 } else
1604 // Bump bytecode pointer by displacement (take the branch)
1605 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1606
1607 // continue with bytecode @ target
1608 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1609 // %%%%% and changing dispatch_next to dispatch_only
1610 __ dispatch_next(vtos, 0, true);
1611 }
1612
1613
1614 // Note Condition in argument is TemplateTable::Condition
1615 // arg scope is within class scope
1616
1617 void TemplateTable::if_0cmp(Condition cc) {
1618 // no pointers, integer only!
1619 transition(itos, vtos);
1620 // assume branch is more often taken than not (loops use backward branches)
1621 __ cmp( Otos_i, 0);
1622 __ if_cmp(ccNot(cc), false);
1623 }
1624
1625
1626 void TemplateTable::if_icmp(Condition cc) {
1627 transition(itos, vtos);
1628 __ pop_i(O1);
1629 __ cmp(O1, Otos_i);
1630 __ if_cmp(ccNot(cc), false);
1659 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1660 // the result. The return address (really a BCI) was stored with an
1661 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1662 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1663 // loaded value.
1664 { Label zzz ;
1665 __ set (65536, G3_scratch) ;
1666 __ cmp (Otos_i, G3_scratch) ;
1667 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1668 __ delayed()->nop();
1669 __ stop("BCI is in the wrong register half?");
1670 __ bind (zzz) ;
1671 }
1672 #endif
1673
1674 __ profile_ret(vtos, Otos_i, G4_scratch);
1675
1676 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1677 __ add(G3_scratch, Otos_i, G3_scratch);
1678 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1679 __ dispatch_next(vtos, 0, true);
1680 }
1681
1682
1683 void TemplateTable::wide_ret() {
1684 transition(vtos, vtos);
1685 locals_index_wide(G3_scratch);
1686 __ access_local_returnAddress(G3_scratch, Otos_i);
1687 // Otos_i contains the bci, compute the bcp from that
1688
1689 __ profile_ret(vtos, Otos_i, G4_scratch);
1690
1691 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1692 __ add(G3_scratch, Otos_i, G3_scratch);
1693 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1694 __ dispatch_next(vtos);
1695 }
1696
1697
1698 void TemplateTable::tableswitch() {
1699 transition(itos, vtos);
|