< prev index next >

src/hotspot/cpu/x86/c1_Runtime1_x86.cpp

Print this page




1546         __ xorptr(rax,rax);
1547 #else
1548         __ xorptr(rax, rax);
1549 #endif // _LP64
1550 
1551         __ bind(do_return);
1552         __ addptr(rsp, 32);
1553         LP64_ONLY(__ pop(rdx);)
1554         __ pop(rcx);
1555         __ pop(rsi);
1556         __ ret(0);
1557       }
1558       break;
1559 
1560 #if INCLUDE_ALL_GCS
1561     case g1_pre_barrier_slow_id:
1562       {
1563         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1564         // arg0 : previous value of memory
1565 
1566         BarrierSet* bs = Universe::heap()->barrier_set();
1567         if (bs->kind() != BarrierSet::G1BarrierSet) {
1568           __ movptr(rax, (int)id);
1569           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1570           __ should_not_reach_here();
1571           break;
1572         }
1573         __ push(rax);
1574         __ push(rdx);
1575 
1576         const Register pre_val = rax;
1577         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1578         const Register tmp = rdx;
1579 
1580         NOT_LP64(__ get_thread(thread);)
1581 
1582         Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
1583         Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
1584         Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
1585 
1586         Label done;


1613 
1614         save_live_registers(sasm, 3);
1615 
1616         // load the pre-value
1617         f.load_argument(0, rcx);
1618         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1619 
1620         restore_live_registers(sasm);
1621 
1622         __ bind(done);
1623 
1624         __ pop(rdx);
1625         __ pop(rax);
1626       }
1627       break;
1628 
1629     case g1_post_barrier_slow_id:
1630       {
1631         StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1632 
1633         BarrierSet* bs = Universe::heap()->barrier_set();
1634         if (bs->kind() != BarrierSet::G1BarrierSet) {
1635           __ movptr(rax, (int)id);
1636           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1637           __ should_not_reach_here();
1638           break;
1639         }
1640 
1641         // arg0: store_address
1642         Address store_addr(rbp, 2*BytesPerWord);
1643 
1644         Label done;
1645         Label enqueued;
1646         Label runtime;
1647 
1648         // At this point we know new_value is non-NULL and the new_value crosses regions.
1649         // Must check to see if card is already dirty
1650 
1651         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1652 
1653         Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));




1546         __ xorptr(rax,rax);
1547 #else
1548         __ xorptr(rax, rax);
1549 #endif // _LP64
1550 
1551         __ bind(do_return);
1552         __ addptr(rsp, 32);
1553         LP64_ONLY(__ pop(rdx);)
1554         __ pop(rcx);
1555         __ pop(rsi);
1556         __ ret(0);
1557       }
1558       break;
1559 
1560 #if INCLUDE_ALL_GCS
1561     case g1_pre_barrier_slow_id:
1562       {
1563         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1564         // arg0 : previous value of memory
1565 
1566         BarrierSet* bs = BarrierSet::barrier_set();
1567         if (bs->kind() != BarrierSet::G1BarrierSet) {
1568           __ movptr(rax, (int)id);
1569           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1570           __ should_not_reach_here();
1571           break;
1572         }
1573         __ push(rax);
1574         __ push(rdx);
1575 
1576         const Register pre_val = rax;
1577         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1578         const Register tmp = rdx;
1579 
1580         NOT_LP64(__ get_thread(thread);)
1581 
1582         Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
1583         Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
1584         Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
1585 
1586         Label done;


1613 
1614         save_live_registers(sasm, 3);
1615 
1616         // load the pre-value
1617         f.load_argument(0, rcx);
1618         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1619 
1620         restore_live_registers(sasm);
1621 
1622         __ bind(done);
1623 
1624         __ pop(rdx);
1625         __ pop(rax);
1626       }
1627       break;
1628 
1629     case g1_post_barrier_slow_id:
1630       {
1631         StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1632 
1633         BarrierSet* bs = BarrierSet::barrier_set();
1634         if (bs->kind() != BarrierSet::G1BarrierSet) {
1635           __ movptr(rax, (int)id);
1636           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1637           __ should_not_reach_here();
1638           break;
1639         }
1640 
1641         // arg0: store_address
1642         Address store_addr(rbp, 2*BytesPerWord);
1643 
1644         Label done;
1645         Label enqueued;
1646         Label runtime;
1647 
1648         // At this point we know new_value is non-NULL and the new_value crosses regions.
1649         // Must check to see if card is already dirty
1650 
1651         const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1652 
1653         Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));


< prev index next >