src/cpu/x86/vm/templateTable_x86_32.cpp

Print this page
rev 5510 : 8027252: Crash in interpreter because get_unsigned_2_byte_index_at_bcp reads 4 bytes
Summary: Use 2-byte loads to load indexes from the byte code stream to avoid out of bounds reads.
Reviewed-by:


 541   locals_index(rbx);
 542   __ fld_s(faddress(rbx));
 543 }
 544 
 545 
 546 void TemplateTable::dload() {
 547   transition(vtos, dtos);
 548   locals_index(rbx);
 549   __ fld_d(daddress(rbx));
 550 }
 551 
 552 
 553 void TemplateTable::aload() {
 554   transition(vtos, atos);
 555   locals_index(rbx);
 556   __ movptr(rax, aaddress(rbx));
 557 }
 558 
 559 
 560 void TemplateTable::locals_index_wide(Register reg) {
 561   __ movl(reg, at_bcp(2));
 562   __ bswapl(reg);
 563   __ shrl(reg, 16);
 564   __ negptr(reg);
 565 }
 566 
 567 
 568 void TemplateTable::wide_iload() {
 569   transition(vtos, itos);
 570   locals_index_wide(rbx);
 571   __ movl(rax, iaddress(rbx));
 572 }
 573 
 574 
 575 void TemplateTable::wide_lload() {
 576   transition(vtos, ltos);
 577   locals_index_wide(rbx);
 578   __ movptr(rax, laddress(rbx));
 579   NOT_LP64(__ movl(rdx, haddress(rbx)));
 580 }
 581 


1535     __ fld_s(at_rsp());
1536   } else {
1537     __ fld_d(at_rsp());
1538     __ pop(rdx);
1539   }
1540   __ pop(rcx);
1541   __ fcmp2int(rax, unordered_result < 0);
1542 }
1543 
1544 
1545 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1546   __ get_method(rcx);           // ECX holds method
1547   __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1548 
1549   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1550                              InvocationCounter::counter_offset();
1551   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1552                               InvocationCounter::counter_offset();
1553 
1554   // Load up EDX with the branch displacement

1555   __ movl(rdx, at_bcp(1));



1556   __ bswapl(rdx);
1557   if (!is_wide) __ sarl(rdx, 16);
1558   LP64_ONLY(__ movslq(rdx, rdx));
1559 
1560 
1561   // Handle all the JSR stuff here, then exit.
1562   // It's much shorter and cleaner than intermingling with the
1563   // non-JSR normal-branch stuff occurring below.
1564   if (is_jsr) {
1565     // Pre-load the next target bytecode into EBX
1566     __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1567 
1568     // compute return address as bci in rax,
1569     __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1570     __ subptr(rax, Address(rcx, Method::const_offset()));
1571     // Adjust the bcp in RSI by the displacement in EDX
1572     __ addptr(rsi, rdx);
1573     // Push return address
1574     __ push_i(rax);
1575     // jsr returns vtos




 541   locals_index(rbx);
 542   __ fld_s(faddress(rbx));
 543 }
 544 
 545 
 546 void TemplateTable::dload() {
 547   transition(vtos, dtos);
 548   locals_index(rbx);
 549   __ fld_d(daddress(rbx));
 550 }
 551 
 552 
 553 void TemplateTable::aload() {
 554   transition(vtos, atos);
 555   locals_index(rbx);
 556   __ movptr(rax, aaddress(rbx));
 557 }
 558 
 559 
 560 void TemplateTable::locals_index_wide(Register reg) {
 561   __ load_unsigned_short(reg, at_bcp(2));
 562   __ bswapl(reg);
 563   __ shrl(reg, 16);
 564   __ negptr(reg);
 565 }
 566 
 567 
 568 void TemplateTable::wide_iload() {
 569   transition(vtos, itos);
 570   locals_index_wide(rbx);
 571   __ movl(rax, iaddress(rbx));
 572 }
 573 
 574 
 575 void TemplateTable::wide_lload() {
 576   transition(vtos, ltos);
 577   locals_index_wide(rbx);
 578   __ movptr(rax, laddress(rbx));
 579   NOT_LP64(__ movl(rdx, haddress(rbx)));
 580 }
 581 


1535     __ fld_s(at_rsp());
1536   } else {
1537     __ fld_d(at_rsp());
1538     __ pop(rdx);
1539   }
1540   __ pop(rcx);
1541   __ fcmp2int(rax, unordered_result < 0);
1542 }
1543 
1544 
1545 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1546   __ get_method(rcx);           // ECX holds method
1547   __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1548 
1549   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1550                              InvocationCounter::counter_offset();
1551   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1552                               InvocationCounter::counter_offset();
1553 
1554   // Load up EDX with the branch displacement
1555   if (is_wide) {
1556     __ movl(rdx, at_bcp(1));
1557   } else {
1558     __ load_signed_short(rdx, at_bcp(1));
1559   }
1560   __ bswapl(rdx);
1561   if (!is_wide) __ sarl(rdx, 16);
1562   LP64_ONLY(__ movslq(rdx, rdx));
1563 
1564 
1565   // Handle all the JSR stuff here, then exit.
1566   // It's much shorter and cleaner than intermingling with the
1567   // non-JSR normal-branch stuff occurring below.
1568   if (is_jsr) {
1569     // Pre-load the next target bytecode into EBX
1570     __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1571 
1572     // compute return address as bci in rax,
1573     __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1574     __ subptr(rax, Address(rcx, Method::const_offset()));
1575     // Adjust the bcp in RSI by the displacement in EDX
1576     __ addptr(rsi, rdx);
1577     // Push return address
1578     __ push_i(rax);
1579     // jsr returns vtos