< prev index next >

src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp

Print this page
rev 13551 : imported patch gcinterface-aarch64-5.patch


 127     return false;
 128   }
 129 }
 130 
 131 
 132 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
 133 
 134 
 135 LIR_Opr LIRGenerator::safepoint_poll_register() {
 136   return LIR_OprFact::illegalOpr;
 137 }
 138 
 139 
 140 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 141                                             int shift, int disp, BasicType type) {
 142   assert(base->is_register(), "must be");
 143   intx large_disp = disp;
 144 
 145   // accumulate fixed displacements
 146   if (index->is_constant()) {
 147     large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;







 148     index = LIR_OprFact::illegalOpr;







 149   }
 150 
 151   if (index->is_register()) {
 152     // apply the shift and accumulate the displacement
 153     if (shift > 0) {
 154       LIR_Opr tmp = new_pointer_register();
 155       __ shift_left(index, shift, tmp);
 156       index = tmp;
 157     }
 158     if (large_disp != 0) {
 159       LIR_Opr tmp = new_pointer_register();
 160       if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
 161         __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
 162         index = tmp;
 163       } else {
 164         __ move(tmp, LIR_OprFact::intptrConst(large_disp));
 165         __ add(tmp, index, tmp);
 166         index = tmp;
 167       }
 168       large_disp = 0;
 169     }
 170   } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
 171     // index is illegal so replace it with the displacement loaded into a register
 172     index = new_pointer_register();
 173     __ move(LIR_OprFact::intptrConst(large_disp), index);
 174     large_disp = 0;
 175   }
 176 
 177   // at this point we either have base + index or base + displacement
 178   if (large_disp == 0) {
 179     return new LIR_Address(base, index, type);
 180   } else {
 181     assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
 182     return new LIR_Address(base, large_disp, type);
 183   }
 184 }
 185 
 186 
 187 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 188                                               BasicType type, bool needs_card_mark) {
 189   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 190   int elem_size = type2aelembytes(type);
 191   int shift = exact_log2(elem_size);
 192 
 193   LIR_Address* addr;
 194   if (index_opr->is_constant()) {
 195     addr = new LIR_Address(array_opr,
 196                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 197   } else {
 198     if (offset_in_bytes) {
 199       LIR_Opr tmp = new_pointer_register();
 200       __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
 201       array_opr = tmp;
 202       offset_in_bytes = 0;
 203     }
 204     addr =  new LIR_Address(array_opr,
 205                             index_opr,
 206                             LIR_Address::scale(type),
 207                             offset_in_bytes, type);
 208   }
 209   if (needs_card_mark) {
 210     // This store will need a precise card mark, so go ahead and
 211     // compute the full adddres instead of computing once for the
 212     // store and again for the card mark.
 213     LIR_Opr tmp = new_pointer_register();
 214     __ leal(LIR_OprFact::address(addr), tmp);
 215     return new LIR_Address(tmp, type);
 216   } else {
 217     return addr;
 218   }
 219 }
 220 
 221 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 222   LIR_Opr r;
 223   if (type == T_LONG) {
 224     r = LIR_OprFact::longConst(x);
 225     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 226       LIR_Opr tmp = new_register(type);
 227       __ move(r, tmp);
 228       return tmp;
 229     }
 230   } else if (type == T_INT) {
 231     r = LIR_OprFact::intConst(x);
 232     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 233       // This is all rather nasty.  We don't know whether our constant
 234       // is required for a logical or an arithmetic operation, wo we
 235       // don't know what the range of valid values is!!
 236       LIR_Opr tmp = new_register(type);
 237       __ move(r, tmp);
 238       return tmp;


 288 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 289 
 290   if (is_power_of_2(c - 1)) {
 291     __ shift_left(left, exact_log2(c - 1), tmp);
 292     __ add(tmp, left, result);
 293     return true;
 294   } else if (is_power_of_2(c + 1)) {
 295     __ shift_left(left, exact_log2(c + 1), tmp);
 296     __ sub(tmp, left, result);
 297     return true;
 298   } else {
 299     return false;
 300   }
 301 }
 302 
 303 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 304   BasicType type = item->type();
 305   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 306 }
 307 
 308 //----------------------------------------------------------------------
 309 //             visitor functions
 310 //----------------------------------------------------------------------
 311 
 312 
 313 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
 314   assert(x->is_pinned(),"");
 315   bool needs_range_check = x->compute_needs_range_check();
 316   bool use_length = x->length() != NULL;
 317   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
 318   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
 319                                          !get_jobject_constant(x->value())->is_null_object() ||
 320                                          x->should_profile());
 321 
 322   LIRItem array(x->array(), this);
 323   LIRItem index(x->index(), this);
 324   LIRItem value(x->value(), this);
 325   LIRItem length(this);
 326 
 327   array.load_item();
 328   index.load_nonconstant();
 329 
 330   if (use_length && needs_range_check) {
 331     length.set_instruction(x->length());
 332     length.load_item();
 333 
 334   }
 335   if (needs_store_check || x->check_boolean()) {
 336     value.load_item();
 337   } else {
 338     value.load_for_store(x->elt_type());
 339   }
 340 
 341   set_no_result(x);
 342 
 343   // the CodeEmitInfo must be duplicated for each different
 344   // LIR-instruction because spilling can occur anywhere between two
 345   // instructions and so the debug information must be different
 346   CodeEmitInfo* range_check_info = state_for(x);
 347   CodeEmitInfo* null_check_info = NULL;
 348   if (x->needs_null_check()) {
 349     null_check_info = new CodeEmitInfo(range_check_info);
 350   }
 351 
 352   // emit array address setup early so it schedules better
 353   // FIXME?  No harm in this on aarch64, and it might help
 354   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 355 
 356   if (GenerateRangeChecks && needs_range_check) {
 357     if (use_length) {
 358       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 359       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 360     } else {
 361       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 362       // range_check also does the null check
 363       null_check_info = NULL;
 364     }
 365   }
 366 
 367   if (GenerateArrayStoreCheck && needs_store_check) {
 368     LIR_Opr tmp1 = new_register(objectType);
 369     LIR_Opr tmp2 = new_register(objectType);
 370     LIR_Opr tmp3 = new_register(objectType);
 371 
 372     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 373     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 374   }
 375 
 376   if (obj_store) {
 377     // Needs GC write barriers.
 378     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 379                 true /* do_load */, false /* patch */, NULL);
 380     __ move(value.result(), array_addr, null_check_info);
 381     // Seems to be a precise
 382     post_barrier(LIR_OprFact::address(array_addr), value.result());
 383   } else {
 384     LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
 385     __ move(result, array_addr, null_check_info);
 386   }
 387 }
 388 




 389 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 390   assert(x->is_pinned(),"");
 391   LIRItem obj(x->obj(), this);
 392   obj.load_item();
 393 
 394   set_no_result(x);
 395 
 396   // "lock" stores the address of the monitor stack slot, so this is not an oop
 397   LIR_Opr lock = new_register(T_INT);
 398   // Need a scratch register for biased locking
 399   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 400   if (UseBiasedLocking) {
 401     scratch = new_register(T_INT);
 402   }
 403 
 404   CodeEmitInfo* info_for_exception = NULL;
 405   if (x->needs_null_check()) {
 406     info_for_exception = state_for(x);
 407   }
 408   // this CodeEmitInfo must not have the xhandlers because here the


 754   LIRItem left(x->x(), this);
 755   LIRItem right(x->y(), this);
 756   ValueTag tag = x->x()->type()->tag();
 757   if (tag == longTag) {
 758     left.set_destroys_register();
 759   }
 760   left.load_item();
 761   right.load_item();
 762   LIR_Opr reg = rlock_result(x);
 763 
 764   if (x->x()->type()->is_float_kind()) {
 765     Bytecodes::Code code = x->op();
 766     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 767   } else if (x->x()->type()->tag() == longTag) {
 768     __ lcmp2int(left.result(), right.result(), reg);
 769   } else {
 770     Unimplemented();
 771   }
 772 }
 773 
 774 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
 775   assert(x->number_of_arguments() == 4, "wrong type");
 776   LIRItem obj   (x->argument_at(0), this);  // object
 777   LIRItem offset(x->argument_at(1), this);  // offset of field
 778   LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
 779   LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
 780 
 781   assert(obj.type()->tag() == objectTag, "invalid type");
 782 
 783   // In 64bit the type can be long, sparc doesn't have this assert
 784   // assert(offset.type()->tag() == intTag, "invalid type");
 785 
 786   assert(cmp.type()->tag() == type->tag(), "invalid type");
 787   assert(val.type()->tag() == type->tag(), "invalid type");
 788 
 789   // get address of field
 790   obj.load_item();
 791   offset.load_nonconstant();
 792   val.load_item();
 793   cmp.load_item();
 794 
 795   LIR_Address* a;
 796   if(offset.result()->is_constant()) {
 797     jlong c = offset.result()->as_jlong();
 798     if ((jlong)((jint)c) == c) {
 799       a = new LIR_Address(obj.result(),
 800                           (jint)c,
 801                           as_BasicType(type));
 802     } else {
 803       LIR_Opr tmp = new_register(T_LONG);
 804       __ move(offset.result(), tmp);
 805       a = new LIR_Address(obj.result(),
 806                           tmp,
 807                           as_BasicType(type));
 808     }
 809   } else {
 810     a = new LIR_Address(obj.result(),
 811                         offset.result(),
 812                         0,
 813                         as_BasicType(type));
 814   }
 815   LIR_Opr addr = new_pointer_register();
 816   __ leal(LIR_OprFact::address(a), addr);
 817 
 818   if (type == objectType) {  // Write-barrier needed for Object fields.
 819     // Do the pre-write barrier, if any.
 820     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 821                 true /* do_load */, false /* patch */, NULL);
 822   }
 823 
 824   LIR_Opr result = rlock_result(x);
 825 
 826   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 827   if (type == objectType)
 828     __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
 829                result);
 830   else if (type == intType)
 831     __ cas_int(addr, cmp.result(), val.result(), ill, ill);
 832   else if (type == longType)
 833     __ cas_long(addr, cmp.result(), val.result(), ill, ill);
 834   else {


 835     ShouldNotReachHere();

 836   }
 837 
 838   __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);


 839 
 840   if (type == objectType) {   // Write-barrier needed for Object fields.
 841     // Seems to be precise
 842     post_barrier(addr, val.result());
 843   }













 844 }
 845 
 846 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 847   switch (x->id()) {
 848     case vmIntrinsics::_dabs:
 849     case vmIntrinsics::_dsqrt: {
 850       assert(x->number_of_arguments() == 1, "wrong type");
 851       LIRItem value(x->argument_at(0), this);
 852       value.load_item();
 853       LIR_Opr dst = rlock_result(x);
 854 
 855       switch (x->id()) {
 856       case vmIntrinsics::_dsqrt: {
 857         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 858         break;
 859       }
 860       case vmIntrinsics::_dabs: {
 861         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 862         break;
 863       }


1341 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1342 
1343 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1344                                         CodeEmitInfo* info) {
1345   __ volatile_store_mem_reg(value, address, info);
1346 }
1347 
1348 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1349                                        CodeEmitInfo* info) {
1350   // 8179954: We need to make sure that the code generated for
1351   // volatile accesses forms a sequentially-consistent set of
1352   // operations when combined with STLR and LDAR.  Without a leading
1353   // membar it's possible for a simple Dekker test to fail if loads
1354   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1355   // the stores in one method and C1 compiles the loads in another.
1356   if (! UseBarriersForVolatile) {
1357     __ membar();
1358   }
1359 
1360   __ volatile_load_mem_reg(address, result, info);
1361 }
1362 
1363 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1364                                      BasicType type, bool is_volatile) {
1365   LIR_Address* addr = new LIR_Address(src, offset, type);
1366   __ load(addr, dst);
1367 }
1368 
1369 
1370 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1371                                      BasicType type, bool is_volatile) {
1372   LIR_Address* addr = new LIR_Address(src, offset, type);
1373   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1374   if (is_obj) {
1375     // Do the pre-write barrier, if any.
1376     pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1377                 true /* do_load */, false /* patch */, NULL);
1378     __ move(data, addr);
1379     assert(src->is_register(), "must be register");
1380     // Seems to be a precise address
1381     post_barrier(LIR_OprFact::address(addr), data);
1382   } else {
1383     __ move(data, addr);
1384   }
1385 }
1386 
1387 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1388   BasicType type = x->basic_type();
1389   LIRItem src(x->object(), this);
1390   LIRItem off(x->offset(), this);
1391   LIRItem value(x->value(), this);
1392 
1393   src.load_item();
1394   off.load_nonconstant();
1395 
1396   // We can cope with a constant increment in an xadd
1397   if (! (x->is_add()
1398          && value.is_constant()
1399          && can_inline_as_constant(x->value()))) {
1400     value.load_item();
1401   }
1402 
1403   LIR_Opr dst = rlock_result(x, type);
1404   LIR_Opr data = value.result();
1405   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1406   LIR_Opr offset = off.result();
1407 
1408   if (data == dst) {
1409     LIR_Opr tmp = new_register(data->type());
1410     __ move(data, tmp);
1411     data = tmp;
1412   }
1413 
1414   LIR_Address* addr;
1415   if (offset->is_constant()) {
1416     jlong l = offset->as_jlong();
1417     assert((jlong)((jint)l) == l, "offset too large for constant");
1418     jint c = (jint)l;
1419     addr = new LIR_Address(src.result(), c, type);
1420   } else {
1421     addr = new LIR_Address(src.result(), offset, type);
1422   }
1423 
1424   LIR_Opr tmp = new_register(T_INT);
1425   LIR_Opr ptr = LIR_OprFact::illegalOpr;
1426 
1427   if (x->is_add()) {
1428     __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1429   } else {
1430     if (is_obj) {
1431       // Do the pre-write barrier, if any.
1432       ptr = new_pointer_register();
1433       __ add(src.result(), off.result(), ptr);
1434       pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1435                   true /* do_load */, false /* patch */, NULL);
1436     }
1437     __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1438     if (is_obj) {
1439       post_barrier(ptr, data);
1440     }
1441   }
1442 }


 127     return false;
 128   }
 129 }
 130 
 131 
 132 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
 133 
 134 
 135 LIR_Opr LIRGenerator::safepoint_poll_register() {
 136   return LIR_OprFact::illegalOpr;
 137 }
 138 
 139 
 140 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
 141                                             int shift, int disp, BasicType type) {
 142   assert(base->is_register(), "must be");
 143   intx large_disp = disp;
 144 
 145   // accumulate fixed displacements
 146   if (index->is_constant()) {
 147     LIR_Const *constant = index->as_constant_ptr();
 148     if (constant->type() == T_INT) {
 149       large_disp += index->as_jint() << shift;
 150     } else {
 151       assert(constant->type() == T_LONG, "should be");
 152       jlong c = index->as_jlong() << shift;
 153       if ((jlong)((jint)c) == c) {
 154         large_disp += c;
 155         index = LIR_OprFact::illegalOpr;
 156       } else {
 157         LIR_Opr tmp = new_register(T_LONG);
 158         __ move(index, tmp);
 159         index = tmp;
 160         // apply shift and displacement below
 161       }
 162     }
 163   }
 164 
 165   if (index->is_register()) {
 166     // apply the shift and accumulate the displacement
 167     if (shift > 0) {
 168       LIR_Opr tmp = new_pointer_register();
 169       __ shift_left(index, shift, tmp);
 170       index = tmp;
 171     }
 172     if (large_disp != 0) {
 173       LIR_Opr tmp = new_pointer_register();
 174       if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
 175         __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
 176         index = tmp;
 177       } else {
 178         __ move(tmp, LIR_OprFact::intptrConst(large_disp));
 179         __ add(tmp, index, tmp);
 180         index = tmp;
 181       }
 182       large_disp = 0;
 183     }
 184   } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
 185     // index is illegal so replace it with the displacement loaded into a register
 186     index = new_pointer_register();
 187     __ move(LIR_OprFact::intptrConst(large_disp), index);
 188     large_disp = 0;
 189   }
 190 
 191   // at this point we either have base + index or base + displacement
 192   if (large_disp == 0) {
 193     return new LIR_Address(base, index, type);
 194   } else {
 195     assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
 196     return new LIR_Address(base, large_disp, type);
 197   }
 198 }
 199 

 200 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
 201                                               BasicType type) {
 202   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
 203   int elem_size = type2aelembytes(type);
 204   int shift = exact_log2(elem_size);
 205 
 206   LIR_Address* addr;
 207   if (index_opr->is_constant()) {
 208     addr = new LIR_Address(array_opr,
 209                            offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
 210   } else {
 211     if (offset_in_bytes) {
 212       LIR_Opr tmp = new_pointer_register();
 213       __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
 214       array_opr = tmp;
 215       offset_in_bytes = 0;
 216     }
 217     addr =  new LIR_Address(array_opr,
 218                             index_opr,
 219                             LIR_Address::scale(type),
 220                             offset_in_bytes, type);
 221   }








 222   return addr;

 223 }
 224 
 225 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
 226   LIR_Opr r;
 227   if (type == T_LONG) {
 228     r = LIR_OprFact::longConst(x);
 229     if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
 230       LIR_Opr tmp = new_register(type);
 231       __ move(r, tmp);
 232       return tmp;
 233     }
 234   } else if (type == T_INT) {
 235     r = LIR_OprFact::intConst(x);
 236     if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
 237       // This is all rather nasty.  We don't know whether our constant
 238       // is required for a logical or an arithmetic operation, wo we
 239       // don't know what the range of valid values is!!
 240       LIR_Opr tmp = new_register(type);
 241       __ move(r, tmp);
 242       return tmp;


 292 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
 293 
 294   if (is_power_of_2(c - 1)) {
 295     __ shift_left(left, exact_log2(c - 1), tmp);
 296     __ add(tmp, left, result);
 297     return true;
 298   } else if (is_power_of_2(c + 1)) {
 299     __ shift_left(left, exact_log2(c + 1), tmp);
 300     __ sub(tmp, left, result);
 301     return true;
 302   } else {
 303     return false;
 304   }
 305 }
 306 
 307 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 308   BasicType type = item->type();
 309   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 310 }
 311 
 312 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {



























































 313     LIR_Opr tmp1 = new_register(objectType);
 314     LIR_Opr tmp2 = new_register(objectType);
 315     LIR_Opr tmp3 = new_register(objectType);
 316     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);















 317 }
 318 
 319 //----------------------------------------------------------------------
 320 //             visitor functions
 321 //----------------------------------------------------------------------
 322 
 323 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 324   assert(x->is_pinned(),"");
 325   LIRItem obj(x->obj(), this);
 326   obj.load_item();
 327 
 328   set_no_result(x);
 329 
 330   // "lock" stores the address of the monitor stack slot, so this is not an oop
 331   LIR_Opr lock = new_register(T_INT);
 332   // Need a scratch register for biased locking
 333   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 334   if (UseBiasedLocking) {
 335     scratch = new_register(T_INT);
 336   }
 337 
 338   CodeEmitInfo* info_for_exception = NULL;
 339   if (x->needs_null_check()) {
 340     info_for_exception = state_for(x);
 341   }
 342   // this CodeEmitInfo must not have the xhandlers because here the


 688   LIRItem left(x->x(), this);
 689   LIRItem right(x->y(), this);
 690   ValueTag tag = x->x()->type()->tag();
 691   if (tag == longTag) {
 692     left.set_destroys_register();
 693   }
 694   left.load_item();
 695   right.load_item();
 696   LIR_Opr reg = rlock_result(x);
 697 
 698   if (x->x()->type()->is_float_kind()) {
 699     Bytecodes::Code code = x->op();
 700     __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
 701   } else if (x->x()->type()->tag() == longTag) {
 702     __ lcmp2int(left.result(), right.result(), reg);
 703   } else {
 704     Unimplemented();
 705   }
 706 }
 707 
 708 LIR_Opr LIRGenerator::cas(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {



















































 709   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 710   new_value.load_item();
 711   cmp_value.load_item();
 712   LIR_Opr result = new_register(T_INT);
 713   if (type == T_OBJECT || type == T_ARRAY) {
 714     __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
 715   } else if (type == T_INT) {
 716     __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 717   } else if (type == T_LONG) {
 718     __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
 719   } else {
 720     ShouldNotReachHere();
 721     Unimplemented();
 722   }

 723   __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
 724   return result;
 725 }
 726 
 727 LIR_Opr LIRGenerator::swap(BasicType type, LIR_Opr addr, LIRItem& value) {
 728   bool is_oop = type == T_OBJECT || type == T_ARRAY;
 729   LIR_Opr result = new_register(type);
 730   value.load_item();
 731   assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
 732   LIR_Opr tmp = new_register(T_INT);
 733   __ xchg(addr, value.result(), result, tmp);
 734   return result;
 735 }
 736 
 737 LIR_Opr LIRGenerator::add(BasicType type, LIR_Opr addr, LIRItem& value) {
 738   LIR_Opr result = new_register(type);
 739   value.load_item();
 740   assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
 741   LIR_Opr tmp = new_register(T_INT);
 742   __ xadd(addr, value.result(), result, tmp);
 743   return result;
 744 }
 745 
 746 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 747   switch (x->id()) {
 748     case vmIntrinsics::_dabs:
 749     case vmIntrinsics::_dsqrt: {
 750       assert(x->number_of_arguments() == 1, "wrong type");
 751       LIRItem value(x->argument_at(0), this);
 752       value.load_item();
 753       LIR_Opr dst = rlock_result(x);
 754 
 755       switch (x->id()) {
 756       case vmIntrinsics::_dsqrt: {
 757         __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
 758         break;
 759       }
 760       case vmIntrinsics::_dabs: {
 761         __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
 762         break;
 763       }


1241 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1242 
1243 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1244                                         CodeEmitInfo* info) {
1245   __ volatile_store_mem_reg(value, address, info);
1246 }
1247 
1248 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1249                                        CodeEmitInfo* info) {
1250   // 8179954: We need to make sure that the code generated for
1251   // volatile accesses forms a sequentially-consistent set of
1252   // operations when combined with STLR and LDAR.  Without a leading
1253   // membar it's possible for a simple Dekker test to fail if loads
1254   // use LD;DMB but stores use STLR.  This can happen if C2 compiles
1255   // the stores in one method and C1 compiles the loads in another.
1256   if (! UseBarriersForVolatile) {
1257     __ membar();
1258   }
1259 
1260   __ volatile_load_mem_reg(address, result, info);

















































































1261 }
< prev index next >