< prev index next >

src/hotspot/share/gc/shared/c1/barrierSetC1.cpp

Print this page




 118 
 119   LIR_Opr resolved = resolve_address(access, true);
 120   access.set_resolved_addr(resolved);
 121   return atomic_xchg_at_resolved(access, value);
 122 }
 123 
 124 LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
 125   DecoratorSet decorators = access.decorators();
 126   bool in_heap = (decorators & IN_HEAP) != 0;
 127   assert(in_heap, "not supported yet");
 128 
 129   access.load_address();
 130 
 131   LIR_Opr resolved = resolve_address(access, true);
 132   access.set_resolved_addr(resolved);
 133   return atomic_add_at_resolved(access, value);
 134 }
 135 
 136 void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 137   DecoratorSet decorators = access.decorators();
 138   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
 139   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 140   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 141   LIRGenerator* gen = access.gen();
 142 
 143   if (mask_boolean) {
 144     value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
 145   }
 146 
 147   if (is_volatile && os::is_MP()) {
 148     __ membar_release();
 149   }
 150 
 151   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 152   if (is_volatile && !needs_patching) {
 153     gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
 154   } else {
 155     __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
 156   }
 157 
 158   if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 159     __ membar();
 160   }
 161 }
 162 
 163 void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
 164   LIRGenerator *gen = access.gen();
 165   DecoratorSet decorators = access.decorators();
 166   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
 167   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 168   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 169   bool in_native = (decorators & IN_NATIVE) != 0;
 170 
 171   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
 172     __ membar();
 173   }
 174 
 175   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 176   if (in_native) {
 177     __ move_wide(access.resolved_addr()->as_address_ptr(), result);
 178   } else if (is_volatile && !needs_patching) {
 179     gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
 180   } else {
 181     __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
 182   }
 183 
 184   if (is_volatile && os::is_MP()) {
 185     __ membar_acquire();
 186   }
 187 
 188   /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
 189   if (mask_boolean) {
 190     LabelObj* equalZeroLabel = new LabelObj();
 191     __ cmp(lir_cond_equal, result, 0);
 192     __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
 193     __ move(LIR_OprFact::intConst(1), result);
 194     __ branch_destination(equalZeroLabel->label());
 195   }
 196 }
 197 
 198 LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
 199   LIRGenerator *gen = access.gen();
 200   return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
 201 }
 202 
 203 LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
 204   LIRGenerator *gen = access.gen();




 118 
 119   LIR_Opr resolved = resolve_address(access, true);
 120   access.set_resolved_addr(resolved);
 121   return atomic_xchg_at_resolved(access, value);
 122 }
 123 
 124 LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
 125   DecoratorSet decorators = access.decorators();
 126   bool in_heap = (decorators & IN_HEAP) != 0;
 127   assert(in_heap, "not supported yet");
 128 
 129   access.load_address();
 130 
 131   LIR_Opr resolved = resolve_address(access, true);
 132   access.set_resolved_addr(resolved);
 133   return atomic_add_at_resolved(access, value);
 134 }
 135 
 136 void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 137   DecoratorSet decorators = access.decorators();
 138   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
 139   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 140   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 141   LIRGenerator* gen = access.gen();
 142 
 143   if (mask_boolean) {
 144     value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
 145   }
 146 
 147   if (is_volatile) {
 148     __ membar_release();
 149   }
 150 
 151   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 152   if (is_volatile && !needs_patching) {
 153     gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
 154   } else {
 155     __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
 156   }
 157 
 158   if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 159     __ membar();
 160   }
 161 }
 162 
 163 void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
 164   LIRGenerator *gen = access.gen();
 165   DecoratorSet decorators = access.decorators();
 166   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
 167   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 168   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 169   bool in_native = (decorators & IN_NATIVE) != 0;
 170 
 171   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
 172     __ membar();
 173   }
 174 
 175   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 176   if (in_native) {
 177     __ move_wide(access.resolved_addr()->as_address_ptr(), result);
 178   } else if (is_volatile && !needs_patching) {
 179     gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
 180   } else {
 181     __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
 182   }
 183 
 184   if (is_volatile) {
 185     __ membar_acquire();
 186   }
 187 
 188   /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
 189   if (mask_boolean) {
 190     LabelObj* equalZeroLabel = new LabelObj();
 191     __ cmp(lir_cond_equal, result, 0);
 192     __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
 193     __ move(LIR_OprFact::intConst(1), result);
 194     __ branch_destination(equalZeroLabel->label());
 195   }
 196 }
 197 
 198 LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
 199   LIRGenerator *gen = access.gen();
 200   return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
 201 }
 202 
 203 LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
 204   LIRGenerator *gen = access.gen();


< prev index next >