1 /* 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_Instruction.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_LIRGenerator.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArray.hpp" 35 #include "ci/ciObjArrayKlass.hpp" 36 #include "ci/ciTypeArrayKlass.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_aarch64.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 } 51 52 53 void LIRItem::load_nonconstant() { 54 LIR_Opr r = value()->operand(); 55 if (r->is_constant()) { 56 _result = r; 57 } else { 58 load_item(); 59 } 60 } 61 62 //-------------------------------------------------------------- 63 // LIRGenerator 64 //-------------------------------------------------------------- 65 66 67 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; } 68 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; } 69 LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 70 LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 71 LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 72 LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } 73 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; } 74 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 75 76 77 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 78 LIR_Opr opr; 79 switch (type->tag()) { 80 case intTag: opr = FrameMap::r0_opr; break; 81 case objectTag: opr = FrameMap::r0_oop_opr; break; 82 case longTag: opr = FrameMap::long0_opr; break; 83 case floatTag: opr = FrameMap::fpu0_float_opr; break; 84 case doubleTag: opr = FrameMap::fpu0_double_opr; break; 85 86 case addressTag: 87 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 88 } 89 90 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 91 return opr; 92 } 93 94 95 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 96 LIR_Opr reg = new_register(T_INT); 97 set_vreg_flag(reg, LIRGenerator::byte_reg); 98 return reg; 99 } 100 101 102 //--------- loading items into registers -------------------------------- 103 104 105 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 106 if (v->type()->as_IntConstant() != NULL) { 107 return v->type()->as_IntConstant()->value() == 0L; 108 } else if (v->type()->as_LongConstant() != NULL) { 109 return v->type()->as_LongConstant()->value() == 0L; 110 } else if (v->type()->as_ObjectConstant() != NULL) { 111 return v->type()->as_ObjectConstant()->value()->is_null_object(); 112 } else { 113 return false; 114 } 115 } 116 117 bool LIRGenerator::can_inline_as_constant(Value v) const { 118 // FIXME: Just a guess 119 if (v->type()->as_IntConstant() != NULL) { 120 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value()); 121 } else if (v->type()->as_LongConstant() != NULL) { 122 return v->type()->as_LongConstant()->value() == 0L; 123 } else if (v->type()->as_ObjectConstant() != NULL) { 124 return v->type()->as_ObjectConstant()->value()->is_null_object(); 125 } else { 126 return false; 127 } 128 } 129 130 131 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; } 132 133 134 LIR_Opr LIRGenerator::safepoint_poll_register() { 135 return LIR_OprFact::illegalOpr; 136 } 137 138 139 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 140 int shift, int disp, BasicType type) { 141 assert(base->is_register(), "must be"); 142 143 // accumulate fixed displacements 144 if (index->is_constant()) { 145 disp += index->as_constant_ptr()->as_jint() << shift; 146 index = LIR_OprFact::illegalOpr; 147 } 148 149 if (index->is_register()) { 150 // apply the shift and accumulate the displacement 151 if (shift > 0) { 152 LIR_Opr tmp = new_pointer_register(); 153 __ shift_left(index, shift, tmp); 154 index = tmp; 155 } 156 if (disp != 0) { 157 LIR_Opr tmp = new_pointer_register(); 158 if (Assembler::operand_valid_for_add_sub_immediate(disp)) { 159 __ add(tmp, tmp, LIR_OprFact::intptrConst(disp)); 160 index = tmp; 161 } else { 162 __ move(tmp, LIR_OprFact::intptrConst(disp)); 163 __ add(tmp, index, tmp); 164 index = tmp; 165 } 166 disp = 0; 167 } 168 } else if (disp != 0 && !Address::offset_ok_for_immed(disp, shift)) { 169 // index is illegal so replace it with the displacement loaded into a register 170 index = new_pointer_register(); 171 __ move(LIR_OprFact::intptrConst(disp), index); 172 disp = 0; 173 } 174 175 // at this point we either have base + index or base + displacement 176 if (disp == 0) { 177 return new LIR_Address(base, index, type); 178 } else { 179 assert(Address::offset_ok_for_immed(disp, 0), "must be"); 180 return new LIR_Address(base, disp, type); 181 } 182 } 183 184 185 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 186 BasicType type, bool needs_card_mark) { 187 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 188 int elem_size = type2aelembytes(type); 189 int shift = exact_log2(elem_size); 190 191 LIR_Address* addr; 192 if (index_opr->is_constant()) { 193 addr = new LIR_Address(array_opr, 194 offset_in_bytes + index_opr->as_jint() * elem_size, type); 195 } else { 196 if (offset_in_bytes) { 197 LIR_Opr tmp = new_pointer_register(); 198 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp); 199 array_opr = tmp; 200 offset_in_bytes = 0; 201 } 202 addr = new LIR_Address(array_opr, 203 index_opr, 204 LIR_Address::scale(type), 205 offset_in_bytes, type); 206 } 207 if (needs_card_mark) { 208 // This store will need a precise card mark, so go ahead and 209 // compute the full adddres instead of computing once for the 210 // store and again for the card mark. 211 LIR_Opr tmp = new_pointer_register(); 212 __ leal(LIR_OprFact::address(addr), tmp); 213 return new LIR_Address(tmp, type); 214 } else { 215 return addr; 216 } 217 } 218 219 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 220 LIR_Opr r; 221 if (type == T_LONG) { 222 r = LIR_OprFact::longConst(x); 223 if (!Assembler::operand_valid_for_logical_immediate(false, x)) { 224 LIR_Opr tmp = new_register(type); 225 __ move(r, tmp); 226 return tmp; 227 } 228 } else if (type == T_INT) { 229 r = LIR_OprFact::intConst(x); 230 if (!Assembler::operand_valid_for_logical_immediate(true, x)) { 231 // This is all rather nasty. We don't know whether our constant 232 // is required for a logical or an arithmetic operation, wo we 233 // don't know what the range of valid values is!! 234 LIR_Opr tmp = new_register(type); 235 __ move(r, tmp); 236 return tmp; 237 } 238 } else { 239 ShouldNotReachHere(); 240 } 241 return r; 242 } 243 244 245 246 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 247 LIR_Opr pointer = new_pointer_register(); 248 __ move(LIR_OprFact::intptrConst(counter), pointer); 249 LIR_Address* addr = new LIR_Address(pointer, type); 250 increment_counter(addr, step); 251 } 252 253 254 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 255 LIR_Opr imm = NULL; 256 switch(addr->type()) { 257 case T_INT: 258 imm = LIR_OprFact::intConst(step); 259 break; 260 case T_LONG: 261 imm = LIR_OprFact::longConst(step); 262 break; 263 default: 264 ShouldNotReachHere(); 265 } 266 LIR_Opr reg = new_register(addr->type()); 267 __ load(addr, reg); 268 __ add(reg, imm, reg); 269 __ store(reg, addr); 270 } 271 272 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 273 LIR_Opr reg = new_register(T_INT); 274 __ load(generate_address(base, disp, T_INT), reg, info); 275 __ cmp(condition, reg, LIR_OprFact::intConst(c)); 276 } 277 278 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 279 LIR_Opr reg1 = new_register(T_INT); 280 __ load(generate_address(base, disp, type), reg1, info); 281 __ cmp(condition, reg, reg1); 282 } 283 284 285 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { 286 287 if (is_power_of_2(c - 1)) { 288 __ shift_left(left, exact_log2(c - 1), tmp); 289 __ add(tmp, left, result); 290 return true; 291 } else if (is_power_of_2(c + 1)) { 292 __ shift_left(left, exact_log2(c + 1), tmp); 293 __ sub(tmp, left, result); 294 return true; 295 } else { 296 return false; 297 } 298 } 299 300 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 301 BasicType type = item->type(); 302 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); 303 } 304 305 //---------------------------------------------------------------------- 306 // visitor functions 307 //---------------------------------------------------------------------- 308 309 310 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 311 assert(x->is_pinned(),""); 312 bool needs_range_check = x->compute_needs_range_check(); 313 bool use_length = x->length() != NULL; 314 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; 315 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || 316 !get_jobject_constant(x->value())->is_null_object() || 317 x->should_profile()); 318 319 LIRItem array(x->array(), this); 320 LIRItem index(x->index(), this); 321 LIRItem value(x->value(), this); 322 LIRItem length(this); 323 324 array.load_item(); 325 index.load_nonconstant(); 326 327 if (use_length && needs_range_check) { 328 length.set_instruction(x->length()); 329 length.load_item(); 330 331 } 332 if (needs_store_check) { 333 value.load_item(); 334 } else { 335 value.load_for_store(x->elt_type()); 336 } 337 338 set_no_result(x); 339 340 // the CodeEmitInfo must be duplicated for each different 341 // LIR-instruction because spilling can occur anywhere between two 342 // instructions and so the debug information must be different 343 CodeEmitInfo* range_check_info = state_for(x); 344 CodeEmitInfo* null_check_info = NULL; 345 if (x->needs_null_check()) { 346 null_check_info = new CodeEmitInfo(range_check_info); 347 } 348 349 // emit array address setup early so it schedules better 350 // FIXME? No harm in this on aarch64, and it might help 351 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); 352 353 if (GenerateRangeChecks && needs_range_check) { 354 if (use_length) { 355 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 356 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 357 } else { 358 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 359 // range_check also does the null check 360 null_check_info = NULL; 361 } 362 } 363 364 if (GenerateArrayStoreCheck && needs_store_check) { 365 LIR_Opr tmp1 = new_register(objectType); 366 LIR_Opr tmp2 = new_register(objectType); 367 LIR_Opr tmp3 = new_register(objectType); 368 369 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); 370 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci()); 371 } 372 373 if (obj_store) { 374 // Needs GC write barriers. 375 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, 376 true /* do_load */, false /* patch */, NULL); 377 __ move(value.result(), array_addr, null_check_info); 378 // Seems to be a precise 379 post_barrier(LIR_OprFact::address(array_addr), value.result()); 380 } else { 381 __ move(value.result(), array_addr, null_check_info); 382 } 383 } 384 385 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 386 assert(x->is_pinned(),""); 387 LIRItem obj(x->obj(), this); 388 obj.load_item(); 389 390 set_no_result(x); 391 392 // "lock" stores the address of the monitor stack slot, so this is not an oop 393 LIR_Opr lock = new_register(T_INT); 394 // Need a scratch register for biased locking 395 LIR_Opr scratch = LIR_OprFact::illegalOpr; 396 if (UseBiasedLocking) { 397 scratch = new_register(T_INT); 398 } 399 400 CodeEmitInfo* info_for_exception = NULL; 401 if (x->needs_null_check()) { 402 info_for_exception = state_for(x); 403 } 404 // this CodeEmitInfo must not have the xhandlers because here the 405 // object is already locked (xhandlers expect object to be unlocked) 406 CodeEmitInfo* info = state_for(x, x->state(), true); 407 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 408 x->monitor_no(), info_for_exception, info); 409 } 410 411 412 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 413 assert(x->is_pinned(),""); 414 415 LIRItem obj(x->obj(), this); 416 obj.dont_load_item(); 417 418 LIR_Opr lock = new_register(T_INT); 419 LIR_Opr obj_temp = new_register(T_INT); 420 set_no_result(x); 421 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 422 } 423 424 425 void LIRGenerator::do_NegateOp(NegateOp* x) { 426 427 LIRItem from(x->x(), this); 428 from.load_item(); 429 LIR_Opr result = rlock_result(x); 430 __ negate (from.result(), result); 431 432 } 433 434 // for _fadd, _fmul, _fsub, _fdiv, _frem 435 // _dadd, _dmul, _dsub, _ddiv, _drem 436 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 437 438 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 439 // float remainder is implemented as a direct call into the runtime 440 LIRItem right(x->x(), this); 441 LIRItem left(x->y(), this); 442 443 BasicTypeList signature(2); 444 if (x->op() == Bytecodes::_frem) { 445 signature.append(T_FLOAT); 446 signature.append(T_FLOAT); 447 } else { 448 signature.append(T_DOUBLE); 449 signature.append(T_DOUBLE); 450 } 451 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 452 453 const LIR_Opr result_reg = result_register_for(x->type()); 454 left.load_item_force(cc->at(1)); 455 right.load_item(); 456 457 __ move(right.result(), cc->at(0)); 458 459 address entry; 460 if (x->op() == Bytecodes::_frem) { 461 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 462 } else { 463 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 464 } 465 466 LIR_Opr result = rlock_result(x); 467 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 468 __ move(result_reg, result); 469 470 return; 471 } 472 473 LIRItem left(x->x(), this); 474 LIRItem right(x->y(), this); 475 LIRItem* left_arg = &left; 476 LIRItem* right_arg = &right; 477 478 // Always load right hand side. 479 right.load_item(); 480 481 if (!left.is_register()) 482 left.load_item(); 483 484 LIR_Opr reg = rlock(x); 485 LIR_Opr tmp = LIR_OprFact::illegalOpr; 486 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 487 tmp = new_register(T_DOUBLE); 488 } 489 490 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL); 491 492 set_result(x, round_item(reg)); 493 } 494 495 // for _ladd, _lmul, _lsub, _ldiv, _lrem 496 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 497 498 // missing test if instr is commutative and if we should swap 499 LIRItem left(x->x(), this); 500 LIRItem right(x->y(), this); 501 502 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { 503 504 // the check for division by zero destroys the right operand 505 right.set_destroys_register(); 506 507 // check for division by zero (destroys registers of right operand!) 508 CodeEmitInfo* info = state_for(x); 509 510 left.load_item(); 511 right.load_item(); 512 513 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 514 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 515 516 rlock_result(x); 517 switch (x->op()) { 518 case Bytecodes::_lrem: 519 __ rem (left.result(), right.result(), x->operand()); 520 break; 521 case Bytecodes::_ldiv: 522 __ div (left.result(), right.result(), x->operand()); 523 break; 524 default: 525 ShouldNotReachHere(); 526 break; 527 } 528 529 530 } else { 531 assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, 532 "expect lmul, ladd or lsub"); 533 // add, sub, mul 534 left.load_item(); 535 if (! right.is_register()) { 536 if (x->op() == Bytecodes::_lmul 537 || ! right.is_constant() 538 || ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) { 539 right.load_item(); 540 } else { // add, sub 541 assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); 542 // don't load constants to save register 543 right.load_nonconstant(); 544 } 545 } 546 rlock_result(x); 547 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 548 } 549 } 550 551 // for: _iadd, _imul, _isub, _idiv, _irem 552 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 553 554 // Test if instr is commutative and if we should swap 555 LIRItem left(x->x(), this); 556 LIRItem right(x->y(), this); 557 LIRItem* left_arg = &left; 558 LIRItem* right_arg = &right; 559 if (x->is_commutative() && left.is_stack() && right.is_register()) { 560 // swap them if left is real stack (or cached) and right is real register(not cached) 561 left_arg = &right; 562 right_arg = &left; 563 } 564 565 left_arg->load_item(); 566 567 // do not need to load right, as we can handle stack and constants 568 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 569 570 right_arg->load_item(); 571 rlock_result(x); 572 573 CodeEmitInfo* info = state_for(x); 574 LIR_Opr tmp = new_register(T_INT); 575 __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); 576 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 577 info = state_for(x); 578 579 if (x->op() == Bytecodes::_irem) { 580 __ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 581 } else if (x->op() == Bytecodes::_idiv) { 582 __ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL); 583 } 584 585 } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { 586 if (right.is_constant() 587 && Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) { 588 right.load_nonconstant(); 589 } else { 590 right.load_item(); 591 } 592 rlock_result(x); 593 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); 594 } else { 595 assert (x->op() == Bytecodes::_imul, "expect imul"); 596 if (right.is_constant()) { 597 int c = right.get_jint_constant(); 598 if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) { 599 // Cannot use constant op. 600 right.load_item(); 601 } else { 602 right.dont_load_item(); 603 } 604 } else { 605 right.load_item(); 606 } 607 rlock_result(x); 608 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); 609 } 610 } 611 612 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 613 // when an operand with use count 1 is the left operand, then it is 614 // likely that no move for 2-operand-LIR-form is necessary 615 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 616 x->swap_operands(); 617 } 618 619 ValueTag tag = x->type()->tag(); 620 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 621 switch (tag) { 622 case floatTag: 623 case doubleTag: do_ArithmeticOp_FPU(x); return; 624 case longTag: do_ArithmeticOp_Long(x); return; 625 case intTag: do_ArithmeticOp_Int(x); return; 626 } 627 ShouldNotReachHere(); 628 } 629 630 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 631 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 632 633 LIRItem left(x->x(), this); 634 LIRItem right(x->y(), this); 635 636 left.load_item(); 637 638 rlock_result(x); 639 if (right.is_constant()) { 640 right.dont_load_item(); 641 642 switch (x->op()) { 643 case Bytecodes::_ishl: { 644 int c = right.get_jint_constant() & 0x1f; 645 __ shift_left(left.result(), c, x->operand()); 646 break; 647 } 648 case Bytecodes::_ishr: { 649 int c = right.get_jint_constant() & 0x1f; 650 __ shift_right(left.result(), c, x->operand()); 651 break; 652 } 653 case Bytecodes::_iushr: { 654 int c = right.get_jint_constant() & 0x1f; 655 __ unsigned_shift_right(left.result(), c, x->operand()); 656 break; 657 } 658 case Bytecodes::_lshl: { 659 int c = right.get_jint_constant() & 0x3f; 660 __ shift_left(left.result(), c, x->operand()); 661 break; 662 } 663 case Bytecodes::_lshr: { 664 int c = right.get_jint_constant() & 0x3f; 665 __ shift_right(left.result(), c, x->operand()); 666 break; 667 } 668 case Bytecodes::_lushr: { 669 int c = right.get_jint_constant() & 0x3f; 670 __ unsigned_shift_right(left.result(), c, x->operand()); 671 break; 672 } 673 default: 674 ShouldNotReachHere(); 675 } 676 } else { 677 right.load_item(); 678 LIR_Opr tmp = new_register(T_INT); 679 switch (x->op()) { 680 case Bytecodes::_ishl: { 681 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 682 __ shift_left(left.result(), tmp, x->operand(), tmp); 683 break; 684 } 685 case Bytecodes::_ishr: { 686 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 687 __ shift_right(left.result(), tmp, x->operand(), tmp); 688 break; 689 } 690 case Bytecodes::_iushr: { 691 __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); 692 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 693 break; 694 } 695 case Bytecodes::_lshl: { 696 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 697 __ shift_left(left.result(), tmp, x->operand(), tmp); 698 break; 699 } 700 case Bytecodes::_lshr: { 701 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 702 __ shift_right(left.result(), tmp, x->operand(), tmp); 703 break; 704 } 705 case Bytecodes::_lushr: { 706 __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); 707 __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); 708 break; 709 } 710 default: 711 ShouldNotReachHere(); 712 } 713 } 714 } 715 716 // _iand, _land, _ior, _lor, _ixor, _lxor 717 void LIRGenerator::do_LogicOp(LogicOp* x) { 718 719 LIRItem left(x->x(), this); 720 LIRItem right(x->y(), this); 721 722 left.load_item(); 723 724 rlock_result(x); 725 if (right.is_constant() 726 && ((right.type()->tag() == intTag 727 && Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant())) 728 || (right.type()->tag() == longTag 729 && Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) { 730 right.dont_load_item(); 731 } else { 732 right.load_item(); 733 } 734 switch (x->op()) { 735 case Bytecodes::_iand: 736 case Bytecodes::_land: 737 __ logical_and(left.result(), right.result(), x->operand()); break; 738 case Bytecodes::_ior: 739 case Bytecodes::_lor: 740 __ logical_or (left.result(), right.result(), x->operand()); break; 741 case Bytecodes::_ixor: 742 case Bytecodes::_lxor: 743 __ logical_xor(left.result(), right.result(), x->operand()); break; 744 default: Unimplemented(); 745 } 746 } 747 748 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 749 void LIRGenerator::do_CompareOp(CompareOp* x) { 750 LIRItem left(x->x(), this); 751 LIRItem right(x->y(), this); 752 ValueTag tag = x->x()->type()->tag(); 753 if (tag == longTag) { 754 left.set_destroys_register(); 755 } 756 left.load_item(); 757 right.load_item(); 758 LIR_Opr reg = rlock_result(x); 759 760 if (x->x()->type()->is_float_kind()) { 761 Bytecodes::Code code = x->op(); 762 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 763 } else if (x->x()->type()->tag() == longTag) { 764 __ lcmp2int(left.result(), right.result(), reg); 765 } else { 766 Unimplemented(); 767 } 768 } 769 770 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { 771 assert(x->number_of_arguments() == 4, "wrong type"); 772 LIRItem obj (x->argument_at(0), this); // object 773 LIRItem offset(x->argument_at(1), this); // offset of field 774 LIRItem cmp (x->argument_at(2), this); // value to compare with field 775 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp 776 777 assert(obj.type()->tag() == objectTag, "invalid type"); 778 779 // In 64bit the type can be long, sparc doesn't have this assert 780 // assert(offset.type()->tag() == intTag, "invalid type"); 781 782 assert(cmp.type()->tag() == type->tag(), "invalid type"); 783 assert(val.type()->tag() == type->tag(), "invalid type"); 784 785 // get address of field 786 obj.load_item(); 787 offset.load_nonconstant(); 788 val.load_item(); 789 cmp.load_item(); 790 791 LIR_Address* a; 792 if(offset.result()->is_constant()) { 793 jlong c = offset.result()->as_jlong(); 794 if ((jlong)((jint)c) == c) { 795 a = new LIR_Address(obj.result(), 796 (jint)c, 797 as_BasicType(type)); 798 } else { 799 LIR_Opr tmp = new_register(T_LONG); 800 __ move(offset.result(), tmp); 801 a = new LIR_Address(obj.result(), 802 tmp, 803 as_BasicType(type)); 804 } 805 } else { 806 a = new LIR_Address(obj.result(), 807 offset.result(), 808 LIR_Address::times_1, 809 0, 810 as_BasicType(type)); 811 } 812 LIR_Opr addr = new_pointer_register(); 813 __ leal(LIR_OprFact::address(a), addr); 814 815 if (type == objectType) { // Write-barrier needed for Object fields. 816 // Do the pre-write barrier, if any. 817 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, 818 true /* do_load */, false /* patch */, NULL); 819 } 820 821 LIR_Opr result = rlock_result(x); 822 823 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 824 if (type == objectType) 825 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT), 826 result); 827 else if (type == intType) 828 __ cas_int(addr, cmp.result(), val.result(), ill, ill); 829 else if (type == longType) 830 __ cas_long(addr, cmp.result(), val.result(), ill, ill); 831 else { 832 ShouldNotReachHere(); 833 } 834 835 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result); 836 837 if (type == objectType) { // Write-barrier needed for Object fields. 838 // Seems to be precise 839 post_barrier(addr, val.result()); 840 } 841 } 842 843 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 844 switch (x->id()) { 845 case vmIntrinsics::_dabs: 846 case vmIntrinsics::_dsqrt: { 847 assert(x->number_of_arguments() == 1, "wrong type"); 848 LIRItem value(x->argument_at(0), this); 849 value.load_item(); 850 LIR_Opr dst = rlock_result(x); 851 852 switch (x->id()) { 853 case vmIntrinsics::_dsqrt: { 854 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); 855 break; 856 } 857 case vmIntrinsics::_dabs: { 858 __ abs(value.result(), dst, LIR_OprFact::illegalOpr); 859 break; 860 } 861 } 862 break; 863 } 864 case vmIntrinsics::_dlog10: // fall through 865 case vmIntrinsics::_dlog: // fall through 866 case vmIntrinsics::_dsin: // fall through 867 case vmIntrinsics::_dtan: // fall through 868 case vmIntrinsics::_dcos: // fall through 869 case vmIntrinsics::_dexp: { 870 assert(x->number_of_arguments() == 1, "wrong type"); 871 872 address runtime_entry = NULL; 873 switch (x->id()) { 874 case vmIntrinsics::_dsin: 875 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); 876 break; 877 case vmIntrinsics::_dcos: 878 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); 879 break; 880 case vmIntrinsics::_dtan: 881 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); 882 break; 883 case vmIntrinsics::_dlog: 884 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); 885 break; 886 case vmIntrinsics::_dlog10: 887 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); 888 break; 889 case vmIntrinsics::_dexp: 890 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); 891 break; 892 default: 893 ShouldNotReachHere(); 894 } 895 896 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); 897 set_result(x, result); 898 break; 899 } 900 case vmIntrinsics::_dpow: { 901 assert(x->number_of_arguments() == 2, "wrong type"); 902 address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); 903 LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL); 904 set_result(x, result); 905 break; 906 } 907 } 908 } 909 910 911 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 912 assert(x->number_of_arguments() == 5, "wrong type"); 913 914 // Make all state_for calls early since they can emit code 915 CodeEmitInfo* info = state_for(x, x->state()); 916 917 LIRItem src(x->argument_at(0), this); 918 LIRItem src_pos(x->argument_at(1), this); 919 LIRItem dst(x->argument_at(2), this); 920 LIRItem dst_pos(x->argument_at(3), this); 921 LIRItem length(x->argument_at(4), this); 922 923 // operands for arraycopy must use fixed registers, otherwise 924 // LinearScan will fail allocation (because arraycopy always needs a 925 // call) 926 927 // The java calling convention will give us enough registers 928 // so that on the stub side the args will be perfect already. 929 // On the other slow/special case side we call C and the arg 930 // positions are not similar enough to pick one as the best. 931 // Also because the java calling convention is a "shifted" version 932 // of the C convention we can process the java args trivially into C 933 // args without worry of overwriting during the xfer 934 935 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 936 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 937 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 938 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 939 length.load_item_force (FrameMap::as_opr(j_rarg4)); 940 941 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 942 943 set_no_result(x); 944 945 int flags; 946 ciArrayKlass* expected_type; 947 arraycopy_helper(x, &flags, &expected_type); 948 949 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 950 } 951 952 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 953 assert(UseCRC32Intrinsics, "why are we here?"); 954 // Make all state_for calls early since they can emit code 955 LIR_Opr result = rlock_result(x); 956 int flags = 0; 957 switch (x->id()) { 958 case vmIntrinsics::_updateCRC32: { 959 LIRItem crc(x->argument_at(0), this); 960 LIRItem val(x->argument_at(1), this); 961 // val is destroyed by update_crc32 962 val.set_destroys_register(); 963 crc.load_item(); 964 val.load_item(); 965 __ update_crc32(crc.result(), val.result(), result); 966 break; 967 } 968 case vmIntrinsics::_updateBytesCRC32: 969 case vmIntrinsics::_updateByteBufferCRC32: { 970 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 971 972 LIRItem crc(x->argument_at(0), this); 973 LIRItem buf(x->argument_at(1), this); 974 LIRItem off(x->argument_at(2), this); 975 LIRItem len(x->argument_at(3), this); 976 buf.load_item(); 977 off.load_nonconstant(); 978 979 LIR_Opr index = off.result(); 980 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 981 if(off.result()->is_constant()) { 982 index = LIR_OprFact::illegalOpr; 983 offset += off.result()->as_jint(); 984 } 985 LIR_Opr base_op = buf.result(); 986 987 if (index->is_valid()) { 988 LIR_Opr tmp = new_register(T_LONG); 989 __ convert(Bytecodes::_i2l, index, tmp); 990 index = tmp; 991 } 992 993 if (offset) { 994 LIR_Opr tmp = new_pointer_register(); 995 __ add(base_op, LIR_OprFact::intConst(offset), tmp); 996 base_op = tmp; 997 offset = 0; 998 } 999 1000 LIR_Address* a = new LIR_Address(base_op, 1001 index, 1002 LIR_Address::times_1, 1003 offset, 1004 T_BYTE); 1005 BasicTypeList signature(3); 1006 signature.append(T_INT); 1007 signature.append(T_ADDRESS); 1008 signature.append(T_INT); 1009 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1010 const LIR_Opr result_reg = result_register_for(x->type()); 1011 1012 LIR_Opr addr = new_pointer_register(); 1013 __ leal(LIR_OprFact::address(a), addr); 1014 1015 crc.load_item_force(cc->at(0)); 1016 __ move(addr, cc->at(1)); 1017 len.load_item_force(cc->at(2)); 1018 1019 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1020 __ move(result_reg, result); 1021 1022 break; 1023 } 1024 default: { 1025 ShouldNotReachHere(); 1026 } 1027 } 1028 } 1029 1030 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1031 // _i2b, _i2c, _i2s 1032 void LIRGenerator::do_Convert(Convert* x) { 1033 LIRItem value(x->value(), this); 1034 value.load_item(); 1035 LIR_Opr input = value.result(); 1036 LIR_Opr result = rlock(x); 1037 1038 // arguments of lir_convert 1039 LIR_Opr conv_input = input; 1040 LIR_Opr conv_result = result; 1041 ConversionStub* stub = NULL; 1042 1043 __ convert(x->op(), conv_input, conv_result); 1044 1045 assert(result->is_virtual(), "result must be virtual register"); 1046 set_result(x, result); 1047 } 1048 1049 void LIRGenerator::do_NewInstance(NewInstance* x) { 1050 #ifndef PRODUCT 1051 if (PrintNotLoaded && !x->klass()->is_loaded()) { 1052 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 1053 } 1054 #endif 1055 CodeEmitInfo* info = state_for(x, x->state()); 1056 LIR_Opr reg = result_register_for(x->type()); 1057 new_instance(reg, x->klass(), x->is_unresolved(), 1058 FrameMap::r2_oop_opr, 1059 FrameMap::r5_oop_opr, 1060 FrameMap::r4_oop_opr, 1061 LIR_OprFact::illegalOpr, 1062 FrameMap::r3_metadata_opr, info); 1063 LIR_Opr result = rlock_result(x); 1064 __ move(reg, result); 1065 } 1066 1067 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1068 CodeEmitInfo* info = state_for(x, x->state()); 1069 1070 LIRItem length(x->length(), this); 1071 length.load_item_force(FrameMap::r19_opr); 1072 1073 LIR_Opr reg = result_register_for(x->type()); 1074 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1075 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1076 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1077 LIR_Opr tmp4 = reg; 1078 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1079 LIR_Opr len = length.result(); 1080 BasicType elem_type = x->elt_type(); 1081 1082 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1083 1084 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1085 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1086 1087 LIR_Opr result = rlock_result(x); 1088 __ move(reg, result); 1089 } 1090 1091 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1092 LIRItem length(x->length(), this); 1093 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1094 // and therefore provide the state before the parameters have been consumed 1095 CodeEmitInfo* patching_info = NULL; 1096 if (!x->klass()->is_loaded() || PatchALot) { 1097 patching_info = state_for(x, x->state_before()); 1098 } 1099 1100 CodeEmitInfo* info = state_for(x, x->state()); 1101 1102 LIR_Opr reg = result_register_for(x->type()); 1103 LIR_Opr tmp1 = FrameMap::r2_oop_opr; 1104 LIR_Opr tmp2 = FrameMap::r4_oop_opr; 1105 LIR_Opr tmp3 = FrameMap::r5_oop_opr; 1106 LIR_Opr tmp4 = reg; 1107 LIR_Opr klass_reg = FrameMap::r3_metadata_opr; 1108 1109 length.load_item_force(FrameMap::r19_opr); 1110 LIR_Opr len = length.result(); 1111 1112 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1113 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1114 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1115 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1116 } 1117 klass2reg_with_patching(klass_reg, obj, patching_info); 1118 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1119 1120 LIR_Opr result = rlock_result(x); 1121 __ move(reg, result); 1122 } 1123 1124 1125 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1126 Values* dims = x->dims(); 1127 int i = dims->length(); 1128 LIRItemList* items = new LIRItemList(dims->length(), NULL); 1129 while (i-- > 0) { 1130 LIRItem* size = new LIRItem(dims->at(i), this); 1131 items->at_put(i, size); 1132 } 1133 1134 // Evaluate state_for early since it may emit code. 1135 CodeEmitInfo* patching_info = NULL; 1136 if (!x->klass()->is_loaded() || PatchALot) { 1137 patching_info = state_for(x, x->state_before()); 1138 1139 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1140 // clone all handlers (NOTE: Usually this is handled transparently 1141 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1142 // is done explicitly here because a stub isn't being used). 1143 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1144 } 1145 CodeEmitInfo* info = state_for(x, x->state()); 1146 1147 i = dims->length(); 1148 while (i-- > 0) { 1149 LIRItem* size = items->at(i); 1150 size->load_item(); 1151 1152 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1153 } 1154 1155 LIR_Opr klass_reg = FrameMap::r0_metadata_opr; 1156 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1157 1158 LIR_Opr rank = FrameMap::r19_opr; 1159 __ move(LIR_OprFact::intConst(x->rank()), rank); 1160 LIR_Opr varargs = FrameMap::r2_opr; 1161 __ move(FrameMap::sp_opr, varargs); 1162 LIR_OprList* args = new LIR_OprList(3); 1163 args->append(klass_reg); 1164 args->append(rank); 1165 args->append(varargs); 1166 LIR_Opr reg = result_register_for(x->type()); 1167 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1168 LIR_OprFact::illegalOpr, 1169 reg, args, info); 1170 1171 LIR_Opr result = rlock_result(x); 1172 __ move(reg, result); 1173 } 1174 1175 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1176 // nothing to do for now 1177 } 1178 1179 void LIRGenerator::do_CheckCast(CheckCast* x) { 1180 LIRItem obj(x->obj(), this); 1181 1182 CodeEmitInfo* patching_info = NULL; 1183 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { 1184 // must do this before locking the destination register as an oop register, 1185 // and before the obj is loaded (the latter is for deoptimization) 1186 patching_info = state_for(x, x->state_before()); 1187 } 1188 obj.load_item(); 1189 1190 // info for exceptions 1191 CodeEmitInfo* info_for_exception = state_for(x); 1192 1193 CodeStub* stub; 1194 if (x->is_incompatible_class_change_check()) { 1195 assert(patching_info == NULL, "can't patch this"); 1196 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1197 } else { 1198 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1199 } 1200 LIR_Opr reg = rlock_result(x); 1201 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1202 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1203 tmp3 = new_register(objectType); 1204 } 1205 __ checkcast(reg, obj.result(), x->klass(), 1206 new_register(objectType), new_register(objectType), tmp3, 1207 x->direct_compare(), info_for_exception, patching_info, stub, 1208 x->profiled_method(), x->profiled_bci()); 1209 } 1210 1211 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1212 LIRItem obj(x->obj(), this); 1213 1214 // result and test object may not be in same register 1215 LIR_Opr reg = rlock_result(x); 1216 CodeEmitInfo* patching_info = NULL; 1217 if ((!x->klass()->is_loaded() || PatchALot)) { 1218 // must do this before locking the destination register as an oop register 1219 patching_info = state_for(x, x->state_before()); 1220 } 1221 obj.load_item(); 1222 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1223 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1224 tmp3 = new_register(objectType); 1225 } 1226 __ instanceof(reg, obj.result(), x->klass(), 1227 new_register(objectType), new_register(objectType), tmp3, 1228 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1229 } 1230 1231 void LIRGenerator::do_If(If* x) { 1232 assert(x->number_of_sux() == 2, "inconsistency"); 1233 ValueTag tag = x->x()->type()->tag(); 1234 bool is_safepoint = x->is_safepoint(); 1235 1236 If::Condition cond = x->cond(); 1237 1238 LIRItem xitem(x->x(), this); 1239 LIRItem yitem(x->y(), this); 1240 LIRItem* xin = &xitem; 1241 LIRItem* yin = &yitem; 1242 1243 if (tag == longTag) { 1244 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1245 // mirror for other conditions 1246 if (cond == If::gtr || cond == If::leq) { 1247 cond = Instruction::mirror(cond); 1248 xin = &yitem; 1249 yin = &xitem; 1250 } 1251 xin->set_destroys_register(); 1252 } 1253 xin->load_item(); 1254 1255 if (tag == longTag) { 1256 if (yin->is_constant() 1257 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) { 1258 yin->dont_load_item(); 1259 } else { 1260 yin->load_item(); 1261 } 1262 } else if (tag == intTag) { 1263 if (yin->is_constant() 1264 && Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) { 1265 yin->dont_load_item(); 1266 } else { 1267 yin->load_item(); 1268 } 1269 } else { 1270 yin->load_item(); 1271 } 1272 1273 // add safepoint before generating condition code so it can be recomputed 1274 if (x->is_safepoint()) { 1275 // increment backedge counter if needed 1276 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1277 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1278 } 1279 set_no_result(x); 1280 1281 LIR_Opr left = xin->result(); 1282 LIR_Opr right = yin->result(); 1283 1284 __ cmp(lir_cond(cond), left, right); 1285 // Generate branch profiling. Profiling code doesn't kill flags. 1286 profile_branch(x, cond); 1287 move_to_phi(x->state()); 1288 if (x->x()->type()->is_float_kind()) { 1289 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1290 } else { 1291 __ branch(lir_cond(cond), right->type(), x->tsux()); 1292 } 1293 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1294 __ jump(x->default_sux()); 1295 } 1296 1297 LIR_Opr LIRGenerator::getThreadPointer() { 1298 return FrameMap::as_pointer_opr(rthread); 1299 } 1300 1301 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } 1302 1303 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1304 CodeEmitInfo* info) { 1305 __ volatile_store_mem_reg(value, address, info); 1306 } 1307 1308 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1309 CodeEmitInfo* info) { 1310 __ volatile_load_mem_reg(address, result, info); 1311 } 1312 1313 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, 1314 BasicType type, bool is_volatile) { 1315 LIR_Address* addr = new LIR_Address(src, offset, type); 1316 __ load(addr, dst); 1317 } 1318 1319 1320 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, 1321 BasicType type, bool is_volatile) { 1322 LIR_Address* addr = new LIR_Address(src, offset, type); 1323 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1324 if (is_obj) { 1325 // Do the pre-write barrier, if any. 1326 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, 1327 true /* do_load */, false /* patch */, NULL); 1328 __ move(data, addr); 1329 assert(src->is_register(), "must be register"); 1330 // Seems to be a precise address 1331 post_barrier(LIR_OprFact::address(addr), data); 1332 } else { 1333 __ move(data, addr); 1334 } 1335 } 1336 1337 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { 1338 BasicType type = x->basic_type(); 1339 LIRItem src(x->object(), this); 1340 LIRItem off(x->offset(), this); 1341 LIRItem value(x->value(), this); 1342 1343 src.load_item(); 1344 off.load_nonconstant(); 1345 1346 // We can cope with a constant increment in an xadd 1347 if (! (x->is_add() 1348 && value.is_constant() 1349 && can_inline_as_constant(x->value()))) { 1350 value.load_item(); 1351 } 1352 1353 LIR_Opr dst = rlock_result(x, type); 1354 LIR_Opr data = value.result(); 1355 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 1356 LIR_Opr offset = off.result(); 1357 1358 if (data == dst) { 1359 LIR_Opr tmp = new_register(data->type()); 1360 __ move(data, tmp); 1361 data = tmp; 1362 } 1363 1364 LIR_Address* addr; 1365 if (offset->is_constant()) { 1366 jlong l = offset->as_jlong(); 1367 assert((jlong)((jint)l) == l, "offset too large for constant"); 1368 jint c = (jint)l; 1369 addr = new LIR_Address(src.result(), c, type); 1370 } else { 1371 addr = new LIR_Address(src.result(), offset, type); 1372 } 1373 1374 LIR_Opr tmp = new_register(T_INT); 1375 LIR_Opr ptr = LIR_OprFact::illegalOpr; 1376 1377 if (x->is_add()) { 1378 __ xadd(LIR_OprFact::address(addr), data, dst, tmp); 1379 } else { 1380 if (is_obj) { 1381 // Do the pre-write barrier, if any. 1382 ptr = new_pointer_register(); 1383 __ add(src.result(), off.result(), ptr); 1384 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, 1385 true /* do_load */, false /* patch */, NULL); 1386 } 1387 __ xchg(LIR_OprFact::address(addr), data, dst, tmp); 1388 if (is_obj) { 1389 post_barrier(ptr, data); 1390 } 1391 } 1392 }