< prev index next >

src/cpu/x86/vm/c1_Runtime1_x86.cpp

Print this page
rev 11777 : [mq]: gcinterface.patch


1590         __ xorptr(rax,rax);
1591 #else
1592         __ xorptr(rax, rax);
1593 #endif // _LP64
1594 
1595         __ bind(do_return);
1596         __ addptr(rsp, 32);
1597         LP64_ONLY(__ pop(rdx);)
1598         __ pop(rcx);
1599         __ pop(rsi);
1600         __ ret(0);
1601       }
1602       break;
1603 
1604 #if INCLUDE_ALL_GCS
1605     case g1_pre_barrier_slow_id:
1606       {
1607         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1608         // arg0 : previous value of memory
1609 
1610         BarrierSet* bs = Universe::heap()->barrier_set();
1611         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1612           __ movptr(rax, (int)id);
1613           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1614           __ should_not_reach_here();
1615           break;
1616         }
1617         __ push(rax);
1618         __ push(rdx);
1619 
1620         const Register pre_val = rax;
1621         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1622         const Register tmp = rdx;
1623 
1624         NOT_LP64(__ get_thread(thread);)
1625 
1626         Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1627                                              SATBMarkQueue::byte_offset_of_index()));
1628         Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1629                                         SATBMarkQueue::byte_offset_of_buf()));
1630 


1654         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1655 
1656         restore_live_registers(sasm);
1657 
1658         __ bind(done);
1659 
1660         __ pop(rdx);
1661         __ pop(rax);
1662       }
1663       break;
1664 
1665     case g1_post_barrier_slow_id:
1666       {
1667         StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1668 
1669 
1670         // arg0: store_address
1671         Address store_addr(rbp, 2*BytesPerWord);
1672 
1673         CardTableModRefBS* ct =
1674           barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
1675         assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1676 
1677         Label done;
1678         Label enqueued;
1679         Label runtime;
1680 
1681         // At this point we know new_value is non-NULL and the new_value crosses regions.
1682         // Must check to see if card is already dirty
1683 
1684         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1685 
1686         Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1687                                              DirtyCardQueue::byte_offset_of_index()));
1688         Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1689                                         DirtyCardQueue::byte_offset_of_buf()));
1690 
1691         __ push(rax);
1692         __ push(rcx);
1693 
1694         const Register cardtable = rax;




1590         __ xorptr(rax,rax);
1591 #else
1592         __ xorptr(rax, rax);
1593 #endif // _LP64
1594 
1595         __ bind(do_return);
1596         __ addptr(rsp, 32);
1597         LP64_ONLY(__ pop(rdx);)
1598         __ pop(rcx);
1599         __ pop(rsi);
1600         __ ret(0);
1601       }
1602       break;
1603 
1604 #if INCLUDE_ALL_GCS
1605     case g1_pre_barrier_slow_id:
1606       {
1607         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1608         // arg0 : previous value of memory
1609 
1610         BarrierSet* bs = GC::gc()->heap()->barrier_set();
1611         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1612           __ movptr(rax, (int)id);
1613           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1614           __ should_not_reach_here();
1615           break;
1616         }
1617         __ push(rax);
1618         __ push(rdx);
1619 
1620         const Register pre_val = rax;
1621         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1622         const Register tmp = rdx;
1623 
1624         NOT_LP64(__ get_thread(thread);)
1625 
1626         Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1627                                              SATBMarkQueue::byte_offset_of_index()));
1628         Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1629                                         SATBMarkQueue::byte_offset_of_buf()));
1630 


1654         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1655 
1656         restore_live_registers(sasm);
1657 
1658         __ bind(done);
1659 
1660         __ pop(rdx);
1661         __ pop(rax);
1662       }
1663       break;
1664 
1665     case g1_post_barrier_slow_id:
1666       {
1667         StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1668 
1669 
1670         // arg0: store_address
1671         Address store_addr(rbp, 2*BytesPerWord);
1672 
1673         CardTableModRefBS* ct =
1674           barrier_set_cast<CardTableModRefBS>(GC::gc()->heap()->barrier_set());
1675         assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1676 
1677         Label done;
1678         Label enqueued;
1679         Label runtime;
1680 
1681         // At this point we know new_value is non-NULL and the new_value crosses regions.
1682         // Must check to see if card is already dirty
1683 
1684         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1685 
1686         Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1687                                              DirtyCardQueue::byte_offset_of_index()));
1688         Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1689                                         DirtyCardQueue::byte_offset_of_buf()));
1690 
1691         __ push(rax);
1692         __ push(rcx);
1693 
1694         const Register cardtable = rax;


< prev index next >