src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 6965570 Sdiff src/share/vm/c1

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




1542     // we need to patch the offset in the instruction so don't allow
1543     // generate_address to try to be smart about emitting the -1.
1544     // Otherwise the patching code won't know how to find the
1545     // instruction to patch.
1546     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1547   } else {
1548     address = generate_address(object.result(), x->offset(), field_type);
1549   }
1550 
1551   if (is_volatile && os::is_MP()) {
1552     __ membar_release();
1553   }
1554 
1555   if (is_oop) {
1556     // Do the pre-write barrier, if any.
1557     pre_barrier(LIR_OprFact::address(address),
1558                 needs_patching,
1559                 (info ? new CodeEmitInfo(info) : NULL));
1560   }
1561 
1562   if (is_volatile) {
1563     assert(!needs_patching && x->is_loaded(),
1564            "how do we know it's volatile if it's not loaded");
1565     volatile_field_store(value.result(), address, info);
1566   } else {
1567     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1568     __ store(value.result(), address, info, patch_code);
1569   }
1570 
1571   if (is_oop) {
1572     // Store to object so mark the card of the header
1573     post_barrier(object.result(), value.result());
1574   }
1575 
1576   if (is_volatile && os::is_MP()) {
1577     __ membar();
1578   }
1579 }
1580 
1581 
1582 void LIRGenerator::do_LoadField(LoadField* x) {
1583   bool needs_patching = x->needs_patching();
1584   bool is_volatile = x->field()->is_volatile();


1610 
1611   if (x->needs_null_check() &&
1612       (needs_patching ||
1613        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1614     // emit an explicit null check because the offset is too large
1615     __ null_check(object.result(), new CodeEmitInfo(info));
1616   }
1617 
1618   LIR_Opr reg = rlock_result(x, field_type);
1619   LIR_Address* address;
1620   if (needs_patching) {
1621     // we need to patch the offset in the instruction so don't allow
1622     // generate_address to try to be smart about emitting the -1.
1623     // Otherwise the patching code won't know how to find the
1624     // instruction to patch.
1625     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1626   } else {
1627     address = generate_address(object.result(), x->offset(), field_type);
1628   }
1629 
1630   if (is_volatile) {
1631     assert(!needs_patching && x->is_loaded(),
1632            "how do we know it's volatile if it's not loaded");
1633     volatile_field_load(address, reg, info);
1634   } else {
1635     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1636     __ load(address, reg, info, patch_code);
1637   }
1638 
1639   if (is_volatile && os::is_MP()) {
1640     __ membar_acquire();
1641   }
1642 }
1643 
1644 
1645 //------------------------java.nio.Buffer.checkIndex------------------------
1646 
1647 // int java.nio.Buffer.checkIndex(int)
1648 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1649   // NOTE: by the time we are in checkIndex() we are guaranteed that
1650   // the buffer is non-null (because checkIndex is package-private and
1651   // only called from within other methods in the buffer).
1652   assert(x->number_of_arguments() == 2, "wrong type");




1542     // we need to patch the offset in the instruction so don't allow
1543     // generate_address to try to be smart about emitting the -1.
1544     // Otherwise the patching code won't know how to find the
1545     // instruction to patch.
1546     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1547   } else {
1548     address = generate_address(object.result(), x->offset(), field_type);
1549   }
1550 
1551   if (is_volatile && os::is_MP()) {
1552     __ membar_release();
1553   }
1554 
1555   if (is_oop) {
1556     // Do the pre-write barrier, if any.
1557     pre_barrier(LIR_OprFact::address(address),
1558                 needs_patching,
1559                 (info ? new CodeEmitInfo(info) : NULL));
1560   }
1561 
1562   if (is_volatile && !needs_patching) {


1563     volatile_field_store(value.result(), address, info);
1564   } else {
1565     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1566     __ store(value.result(), address, info, patch_code);
1567   }
1568 
1569   if (is_oop) {
1570     // Store to object so mark the card of the header
1571     post_barrier(object.result(), value.result());
1572   }
1573 
1574   if (is_volatile && os::is_MP()) {
1575     __ membar();
1576   }
1577 }
1578 
1579 
1580 void LIRGenerator::do_LoadField(LoadField* x) {
1581   bool needs_patching = x->needs_patching();
1582   bool is_volatile = x->field()->is_volatile();


1608 
1609   if (x->needs_null_check() &&
1610       (needs_patching ||
1611        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1612     // emit an explicit null check because the offset is too large
1613     __ null_check(object.result(), new CodeEmitInfo(info));
1614   }
1615 
1616   LIR_Opr reg = rlock_result(x, field_type);
1617   LIR_Address* address;
1618   if (needs_patching) {
1619     // we need to patch the offset in the instruction so don't allow
1620     // generate_address to try to be smart about emitting the -1.
1621     // Otherwise the patching code won't know how to find the
1622     // instruction to patch.
1623     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1624   } else {
1625     address = generate_address(object.result(), x->offset(), field_type);
1626   }
1627 
1628   if (is_volatile && !needs_patching) {


1629     volatile_field_load(address, reg, info);
1630   } else {
1631     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1632     __ load(address, reg, info, patch_code);
1633   }
1634 
1635   if (is_volatile && os::is_MP()) {
1636     __ membar_acquire();
1637   }
1638 }
1639 
1640 
1641 //------------------------java.nio.Buffer.checkIndex------------------------
1642 
1643 // int java.nio.Buffer.checkIndex(int)
1644 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1645   // NOTE: by the time we are in checkIndex() we are guaranteed that
1646   // the buffer is non-null (because checkIndex is package-private and
1647   // only called from within other methods in the buffer).
1648   assert(x->number_of_arguments() == 2, "wrong type");


src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File