1 /* 2 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "gc/shared/c1/barrierSetC1.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "vmreg_x86.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 LIR_Opr res = result(); 51 52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 53 // make sure that it is a byte register 54 assert(!value()->type()->is_float() && !value()->type()->is_double(), 55 "can't load floats in byte register"); 56 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 57 __ move(res, reg); 58 59 _result = reg; 60 } 61 } 62 63 64 void LIRItem::load_nonconstant() { 65 LIR_Opr r = value()->operand(); 66 if (r->is_constant()) { 67 _result = r; 68 } else { 69 load_item(); 70 } 71 } 72 73 //-------------------------------------------------------------- 74 // LIRGenerator 75 //-------------------------------------------------------------- 76 77 78 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 79 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 80 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 81 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 83 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 84 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 85 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 86 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 87 88 89 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 90 LIR_Opr opr; 91 switch (type->tag()) { 92 case intTag: opr = FrameMap::rax_opr; break; 93 case objectTag: opr = FrameMap::rax_oop_opr; break; 94 case longTag: opr = FrameMap::long0_opr; break; 95 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; 96 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; 97 98 case addressTag: 99 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 100 } 101 102 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 103 return opr; 104 } 105 106 107 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 108 LIR_Opr reg = new_register(T_INT); 109 set_vreg_flag(reg, LIRGenerator::byte_reg); 110 return reg; 111 } 112 113 114 //--------- loading items into registers -------------------------------- 115 116 117 // i486 instructions can inline constants 118 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 119 if (type == T_SHORT || type == T_CHAR) { 120 // there is no immediate move of word values in asembler_i486.?pp 121 return false; 122 } 123 Constant* c = v->as_Constant(); 124 if (c && c->state_before() == NULL) { 125 // constants of any type can be stored directly, except for 126 // unloaded object constants. 127 return true; 128 } 129 return false; 130 } 131 132 133 bool LIRGenerator::can_inline_as_constant(Value v) const { 134 if (v->type()->tag() == longTag) return false; 135 return v->type()->tag() != objectTag || 136 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 137 } 138 139 140 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 141 if (c->type() == T_LONG) return false; 142 return c->type() != T_OBJECT || c->as_jobject() == NULL; 143 } 144 145 146 LIR_Opr LIRGenerator::safepoint_poll_register() { 147 NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } ) 148 return LIR_OprFact::illegalOpr; 149 } 150 151 152 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 153 int shift, int disp, BasicType type) { 154 assert(base->is_register(), "must be"); 155 if (index->is_constant()) { 156 LIR_Const *constant = index->as_constant_ptr(); 157 #ifdef _LP64 158 jlong c; 159 if (constant->type() == T_INT) { 160 c = (jlong(index->as_jint()) << shift) + disp; 161 } else { 162 assert(constant->type() == T_LONG, "should be"); 163 c = (index->as_jlong() << shift) + disp; 164 } 165 if ((jlong)((jint)c) == c) { 166 return new LIR_Address(base, (jint)c, type); 167 } else { 168 LIR_Opr tmp = new_register(T_LONG); 169 __ move(index, tmp); 170 return new LIR_Address(base, tmp, type); 171 } 172 #else 173 return new LIR_Address(base, 174 ((intx)(constant->as_jint()) << shift) + disp, 175 type); 176 #endif 177 } else { 178 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 179 } 180 } 181 182 183 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 184 BasicType type) { 185 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 186 187 LIR_Address* addr; 188 if (index_opr->is_constant()) { 189 int elem_size = type2aelembytes(type); 190 addr = new LIR_Address(array_opr, 191 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 192 } else { 193 #ifdef _LP64 194 if (index_opr->type() == T_INT) { 195 LIR_Opr tmp = new_register(T_LONG); 196 __ convert(Bytecodes::_i2l, index_opr, tmp); 197 index_opr = tmp; 198 } 199 #endif // _LP64 200 addr = new LIR_Address(array_opr, 201 index_opr, 202 LIR_Address::scale(type), 203 offset_in_bytes, type); 204 } 205 return addr; 206 } 207 208 209 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 210 LIR_Opr r = NULL; 211 if (type == T_LONG) { 212 r = LIR_OprFact::longConst(x); 213 } else if (type == T_INT) { 214 r = LIR_OprFact::intConst(x); 215 } else { 216 ShouldNotReachHere(); 217 } 218 return r; 219 } 220 221 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 222 LIR_Opr pointer = new_pointer_register(); 223 __ move(LIR_OprFact::intptrConst(counter), pointer); 224 LIR_Address* addr = new LIR_Address(pointer, type); 225 increment_counter(addr, step); 226 } 227 228 229 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 230 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 231 } 232 233 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 234 __ cmp_mem_int(condition, base, disp, c, info); 235 } 236 237 238 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 239 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 240 } 241 242 243 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { 244 if (tmp->is_valid() && c > 0 && c < max_jint) { 245 if (is_power_of_2(c + 1)) { 246 __ move(left, tmp); 247 __ shift_left(left, log2_jint(c + 1), left); 248 __ sub(left, tmp, result); 249 return true; 250 } else if (is_power_of_2(c - 1)) { 251 __ move(left, tmp); 252 __ shift_left(left, log2_jint(c - 1), left); 253 __ add(left, tmp, result); 254 return true; 255 } 256 } 257 return false; 258 } 259 260 261 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 262 BasicType type = item->type(); 263 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 264 } 265 266 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 267 LIR_Opr tmp1 = new_register(objectType); 268 LIR_Opr tmp2 = new_register(objectType); 269 LIR_Opr tmp3 = new_register(objectType); 270 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 271 } 272 273 //---------------------------------------------------------------------- 274 // visitor functions 275 //---------------------------------------------------------------------- 276 277 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 278 assert(x->is_pinned(),""); 279 LIRItem obj(x->obj(), this); 280 obj.load_item(); 281 282 set_no_result(x); 283 284 // "lock" stores the address of the monitor stack slot, so this is not an oop 285 LIR_Opr lock = new_register(T_INT); 286 // Need a scratch register for biased locking on x86 287 LIR_Opr scratch = LIR_OprFact::illegalOpr; 288 if (UseBiasedLocking) { 289 scratch = new_register(T_INT); 290 } 291 292 CodeEmitInfo* info_for_exception = NULL; 293 if (x->needs_null_check()) { 294 info_for_exception = state_for(x); 295 } 296 // this CodeEmitInfo must not have the xhandlers because here the 297 // object is already locked (xhandlers expect object to be unlocked) 298 CodeEmitInfo* info = state_for(x, x->state(), true); 299 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 300 x->monitor_no(), info_for_exception, info); 301 } 302 303 304 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 305 assert(x->is_pinned(),""); 306 307 LIRItem obj(x->obj(), this); 308 obj.dont_load_item(); 309 310 LIR_Opr lock = new_register(T_INT); 311 LIR_Opr obj_temp = new_register(T_INT); 312 set_no_result(x); 313 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 314 } 315 316 317 // _ineg, _lneg, _fneg, _dneg 318 void LIRGenerator::do_NegateOp(NegateOp* x) { 319 LIRItem value(x->x(), this); 320 value.set_destroys_register(); 321 value.load_item(); 322 LIR_Opr reg = rlock(x); 323 324 LIR_Opr tmp = LIR_OprFact::illegalOpr; 325 #ifdef _LP64 326 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 327 if (x->type()->tag() == doubleTag) { 328 tmp = new_register(T_DOUBLE); 329 __ move(LIR_OprFact::doubleConst(-0.0), tmp); 330 } 331 else if (x->type()->tag() == floatTag) { 332 tmp = new_register(T_FLOAT); 333 __ move(LIR_OprFact::floatConst(-0.0), tmp); 334 } 335 } 336 #endif 337 __ negate(value.result(), reg, tmp); 338 339 set_result(x, round_item(reg)); 340 } 341 342 343 // for _fadd, _fmul, _fsub, _fdiv, _frem 344 // _dadd, _dmul, _dsub, _ddiv, _drem 345 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 346 LIRItem left(x->x(), this); 347 LIRItem right(x->y(), this); 348 LIRItem* left_arg = &left; 349 LIRItem* right_arg = &right; 350 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 351 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 352 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 353 left.load_item(); 354 } else { 355 left.dont_load_item(); 356 } 357 358 // do not load right operand if it is a constant. only 0 and 1 are 359 // loaded because there are special instructions for loading them 360 // without memory access (not needed for SSE2 instructions) 361 bool must_load_right = false; 362 if (right.is_constant()) { 363 LIR_Const* c = right.result()->as_constant_ptr(); 364 assert(c != NULL, "invalid constant"); 365 assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); 366 367 if (c->type() == T_FLOAT) { 368 must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); 369 } else { 370 must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); 371 } 372 } 373 374 if (must_load_both) { 375 // frem and drem destroy also right operand, so move it to a new register 376 right.set_destroys_register(); 377 right.load_item(); 378 } else if (right.is_register() || must_load_right) { 379 right.load_item(); 380 } else { 381 right.dont_load_item(); 382 } 383 LIR_Opr reg = rlock(x); 384 LIR_Opr tmp = LIR_OprFact::illegalOpr; 385 if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { 386 tmp = new_register(T_DOUBLE); 387 } 388 389 if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { 390 // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots 391 LIR_Opr fpu0, fpu1; 392 if (x->op() == Bytecodes::_frem) { 393 fpu0 = LIR_OprFact::single_fpu(0); 394 fpu1 = LIR_OprFact::single_fpu(1); 395 } else { 396 fpu0 = LIR_OprFact::double_fpu(0); 397 fpu1 = LIR_OprFact::double_fpu(1); 398 } 399 __ move(right.result(), fpu1); // order of left and right operand is important! 400 __ move(left.result(), fpu0); 401 __ rem (fpu0, fpu1, fpu0); 402 __ move(fpu0, reg); 403 404 } else { 405 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp); 406 } 407 408 set_result(x, round_item(reg)); 409 } 410 411 412 // for _ladd, _lmul, _lsub, _ldiv, _lrem 413 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 414 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 415 // long division is implemented as a direct call into the runtime 416 LIRItem left(x->x(), this); 417 LIRItem right(x->y(), this); 418 419 // the check for division by zero destroys the right operand 420 right.set_destroys_register(); 421 422 BasicTypeList signature(2); 423 signature.append(T_LONG); 424 signature.append(T_LONG); 425 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 426 427 // check for division by zero (destroys registers of right operand!) 428 CodeEmitInfo* info = state_for(x); 429 430 const LIR_Opr result_reg = result_register_for(x->type()); 431 left.load_item_force(cc->at(1)); 432 right.load_item(); 433 434 __ move(right.result(), cc->at(0)); 435 436 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 437 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); 438 439 address entry = NULL; 440 switch (x->op()) { 441 case Bytecodes::_lrem: 442 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 443 break; // check if dividend is 0 is done elsewhere 444 case Bytecodes::_ldiv: 445 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 446 break; // check if dividend is 0 is done elsewhere 447 case Bytecodes::_lmul: 448 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); 449 break; 450 default: 451 ShouldNotReachHere(); 452 } 453 454 LIR_Opr result = rlock_result(x); 455 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 456 __ move(result_reg, result); 457 } else if (x->op() == Bytecodes::_lmul) { 458 // missing test if instr is commutative and if we should swap 459 LIRItem left(x->x(), this); 460 LIRItem right(x->y(), this); 461 462 // right register is destroyed by the long mul, so it must be 463 // copied to a new register. 464 right.set_destroys_register(); 465 466 left.load_item(); 467 right.load_item(); 468 469 LIR_Opr reg = FrameMap::long0_opr; 470 arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); 471 LIR_Opr result = rlock_result(x); 472 __ move(reg, result); 473 } else { 474 // missing test if instr is commutative and if we should swap 475 LIRItem left(x->x(), this); 476 LIRItem right(x->y(), this); 477 478 left.load_item(); 479 // don't load constants to save register 480 right.load_nonconstant(); 481 rlock_result(x); 482 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); 483 } 484 } 485 486 487 488 // for: _iadd, _imul, _isub, _idiv, _irem 489 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 490 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 491 // The requirements for division and modulo 492 // input : rax,: dividend min_int 493 // reg: divisor (may not be rax,/rdx) -1 494 // 495 // output: rax,: quotient (= rax, idiv reg) min_int 496 // rdx: remainder (= rax, irem reg) 0 497 498 // rax, and rdx will be destroyed 499 500 // Note: does this invalidate the spec ??? 501 LIRItem right(x->y(), this); 502 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 503 504 // call state_for before load_item_force because state_for may 505 // force the evaluation of other instructions that are needed for 506 // correct debug info. Otherwise the live range of the fix 507 // register might be too long. 508 CodeEmitInfo* info = state_for(x); 509 510 left.load_item_force(divInOpr()); 511 512 right.load_item(); 513 514 LIR_Opr result = rlock_result(x); 515 LIR_Opr result_reg; 516 if (x->op() == Bytecodes::_idiv) { 517 result_reg = divOutOpr(); 518 } else { 519 result_reg = remOutOpr(); 520 } 521 522 if (!ImplicitDiv0Checks) { 523 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 524 __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); 525 // Idiv/irem cannot trap (passing info would generate an assertion). 526 info = NULL; 527 } 528 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 529 if (x->op() == Bytecodes::_irem) { 530 __ irem(left.result(), right.result(), result_reg, tmp, info); 531 } else if (x->op() == Bytecodes::_idiv) { 532 __ idiv(left.result(), right.result(), result_reg, tmp, info); 533 } else { 534 ShouldNotReachHere(); 535 } 536 537 __ move(result_reg, result); 538 } else { 539 // missing test if instr is commutative and if we should swap 540 LIRItem left(x->x(), this); 541 LIRItem right(x->y(), this); 542 LIRItem* left_arg = &left; 543 LIRItem* right_arg = &right; 544 if (x->is_commutative() && left.is_stack() && right.is_register()) { 545 // swap them if left is real stack (or cached) and right is real register(not cached) 546 left_arg = &right; 547 right_arg = &left; 548 } 549 550 left_arg->load_item(); 551 552 // do not need to load right, as we can handle stack and constants 553 if (x->op() == Bytecodes::_imul ) { 554 // check if we can use shift instead 555 bool use_constant = false; 556 bool use_tmp = false; 557 if (right_arg->is_constant()) { 558 jint iconst = right_arg->get_jint_constant(); 559 if (iconst > 0 && iconst < max_jint) { 560 if (is_power_of_2(iconst)) { 561 use_constant = true; 562 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 563 use_constant = true; 564 use_tmp = true; 565 } 566 } 567 } 568 if (use_constant) { 569 right_arg->dont_load_item(); 570 } else { 571 right_arg->load_item(); 572 } 573 LIR_Opr tmp = LIR_OprFact::illegalOpr; 574 if (use_tmp) { 575 tmp = new_register(T_INT); 576 } 577 rlock_result(x); 578 579 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 580 } else { 581 right_arg->dont_load_item(); 582 rlock_result(x); 583 LIR_Opr tmp = LIR_OprFact::illegalOpr; 584 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 585 } 586 } 587 } 588 589 590 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 591 // when an operand with use count 1 is the left operand, then it is 592 // likely that no move for 2-operand-LIR-form is necessary 593 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 594 x->swap_operands(); 595 } 596 597 ValueTag tag = x->type()->tag(); 598 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 599 switch (tag) { 600 case floatTag: 601 case doubleTag: do_ArithmeticOp_FPU(x); return; 602 case longTag: do_ArithmeticOp_Long(x); return; 603 case intTag: do_ArithmeticOp_Int(x); return; 604 default: ShouldNotReachHere(); return; 605 } 606 } 607 608 609 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 610 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 611 // count must always be in rcx 612 LIRItem value(x->x(), this); 613 LIRItem count(x->y(), this); 614 615 ValueTag elemType = x->type()->tag(); 616 bool must_load_count = !count.is_constant() || elemType == longTag; 617 if (must_load_count) { 618 // count for long must be in register 619 count.load_item_force(shiftCountOpr()); 620 } else { 621 count.dont_load_item(); 622 } 623 value.load_item(); 624 LIR_Opr reg = rlock_result(x); 625 626 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 627 } 628 629 630 // _iand, _land, _ior, _lor, _ixor, _lxor 631 void LIRGenerator::do_LogicOp(LogicOp* x) { 632 // when an operand with use count 1 is the left operand, then it is 633 // likely that no move for 2-operand-LIR-form is necessary 634 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { 635 x->swap_operands(); 636 } 637 638 LIRItem left(x->x(), this); 639 LIRItem right(x->y(), this); 640 641 left.load_item(); 642 right.load_nonconstant(); 643 LIR_Opr reg = rlock_result(x); 644 645 logic_op(x->op(), reg, left.result(), right.result()); 646 } 647 648 649 650 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 651 void LIRGenerator::do_CompareOp(CompareOp* x) { 652 LIRItem left(x->x(), this); 653 LIRItem right(x->y(), this); 654 ValueTag tag = x->x()->type()->tag(); 655 if (tag == longTag) { 656 left.set_destroys_register(); 657 } 658 left.load_item(); 659 right.load_item(); 660 LIR_Opr reg = rlock_result(x); 661 662 if (x->x()->type()->is_float_kind()) { 663 Bytecodes::Code code = x->op(); 664 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 665 } else if (x->x()->type()->tag() == longTag) { 666 __ lcmp2int(left.result(), right.result(), reg); 667 } else { 668 Unimplemented(); 669 } 670 } 671 672 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 673 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 674 if (type == T_OBJECT || type == T_ARRAY) { 675 cmp_value.load_item_force(FrameMap::rax_oop_opr); 676 new_value.load_item(); 677 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 678 } else if (type == T_INT) { 679 cmp_value.load_item_force(FrameMap::rax_opr); 680 new_value.load_item(); 681 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 682 } else if (type == T_LONG) { 683 cmp_value.load_item_force(FrameMap::long0_opr); 684 new_value.load_item_force(FrameMap::long1_opr); 685 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 686 } else { 687 Unimplemented(); 688 } 689 LIR_Opr result = new_register(T_INT); 690 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 691 result, T_INT); 692 return result; 693 } 694 695 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 696 bool is_oop = type == T_OBJECT || type == T_ARRAY; 697 LIR_Opr result = new_register(type); 698 value.load_item(); 699 // Because we want a 2-arg form of xchg and xadd 700 __ move(value.result(), result); 701 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 702 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 703 return result; 704 } 705 706 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 707 LIR_Opr result = new_register(type); 708 value.load_item(); 709 // Because we want a 2-arg form of xchg and xadd 710 __ move(value.result(), result); 711 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); 712 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 713 return result; 714 } 715 716 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 717 assert(x->number_of_arguments() == 3, "wrong type"); 718 assert(UseFMA, "Needs FMA instructions support."); 719 LIRItem value(x->argument_at(0), this); 720 LIRItem value1(x->argument_at(1), this); 721 LIRItem value2(x->argument_at(2), this); 722 723 value2.set_destroys_register(); 724 725 value.load_item(); 726 value1.load_item(); 727 value2.load_item(); 728 729 LIR_Opr calc_input = value.result(); 730 LIR_Opr calc_input1 = value1.result(); 731 LIR_Opr calc_input2 = value2.result(); 732 LIR_Opr calc_result = rlock_result(x); 733 734 switch (x->id()) { 735 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 736 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 737 default: ShouldNotReachHere(); 738 } 739 740 } 741 742 743 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 744 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 745 746 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 747 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 748 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 749 x->id() == vmIntrinsics::_dlog10) { 750 do_LibmIntrinsic(x); 751 return; 752 } 753 754 LIRItem value(x->argument_at(0), this); 755 756 bool use_fpu = false; 757 if (UseSSE < 2) { 758 value.set_destroys_register(); 759 } 760 value.load_item(); 761 762 LIR_Opr calc_input = value.result(); 763 LIR_Opr calc_result = rlock_result(x); 764 765 LIR_Opr tmp = LIR_OprFact::illegalOpr; 766 #ifdef _LP64 767 if (UseAVX > 2 && (!VM_Version::supports_avx512vl()) && 768 (x->id() == vmIntrinsics::_dabs)) { 769 tmp = new_register(T_DOUBLE); 770 __ move(LIR_OprFact::doubleConst(-0.0), tmp); 771 } 772 #endif 773 774 switch(x->id()) { 775 case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, tmp); break; 776 case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; 777 default: ShouldNotReachHere(); 778 } 779 780 if (use_fpu) { 781 __ move(calc_result, x->operand()); 782 } 783 } 784 785 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 786 LIRItem value(x->argument_at(0), this); 787 value.set_destroys_register(); 788 789 LIR_Opr calc_result = rlock_result(x); 790 LIR_Opr result_reg = result_register_for(x->type()); 791 792 CallingConvention* cc = NULL; 793 794 if (x->id() == vmIntrinsics::_dpow) { 795 LIRItem value1(x->argument_at(1), this); 796 797 value1.set_destroys_register(); 798 799 BasicTypeList signature(2); 800 signature.append(T_DOUBLE); 801 signature.append(T_DOUBLE); 802 cc = frame_map()->c_calling_convention(&signature); 803 value.load_item_force(cc->at(0)); 804 value1.load_item_force(cc->at(1)); 805 } else { 806 BasicTypeList signature(1); 807 signature.append(T_DOUBLE); 808 cc = frame_map()->c_calling_convention(&signature); 809 value.load_item_force(cc->at(0)); 810 } 811 812 #ifndef _LP64 813 LIR_Opr tmp = FrameMap::fpu0_double_opr; 814 result_reg = tmp; 815 switch(x->id()) { 816 case vmIntrinsics::_dexp: 817 if (StubRoutines::dexp() != NULL) { 818 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 819 } else { 820 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 821 } 822 break; 823 case vmIntrinsics::_dlog: 824 if (StubRoutines::dlog() != NULL) { 825 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 826 } else { 827 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 828 } 829 break; 830 case vmIntrinsics::_dlog10: 831 if (StubRoutines::dlog10() != NULL) { 832 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 833 } else { 834 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 835 } 836 break; 837 case vmIntrinsics::_dpow: 838 if (StubRoutines::dpow() != NULL) { 839 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 840 } else { 841 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 842 } 843 break; 844 case vmIntrinsics::_dsin: 845 if (VM_Version::supports_sse2() && StubRoutines::dsin() != NULL) { 846 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 847 } else { 848 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 849 } 850 break; 851 case vmIntrinsics::_dcos: 852 if (VM_Version::supports_sse2() && StubRoutines::dcos() != NULL) { 853 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 854 } else { 855 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 856 } 857 break; 858 case vmIntrinsics::_dtan: 859 if (StubRoutines::dtan() != NULL) { 860 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 861 } else { 862 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 863 } 864 break; 865 default: ShouldNotReachHere(); 866 } 867 #else 868 switch (x->id()) { 869 case vmIntrinsics::_dexp: 870 if (StubRoutines::dexp() != NULL) { 871 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 872 } else { 873 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 874 } 875 break; 876 case vmIntrinsics::_dlog: 877 if (StubRoutines::dlog() != NULL) { 878 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 879 } else { 880 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 881 } 882 break; 883 case vmIntrinsics::_dlog10: 884 if (StubRoutines::dlog10() != NULL) { 885 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 886 } else { 887 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 888 } 889 break; 890 case vmIntrinsics::_dpow: 891 if (StubRoutines::dpow() != NULL) { 892 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 893 } else { 894 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 895 } 896 break; 897 case vmIntrinsics::_dsin: 898 if (StubRoutines::dsin() != NULL) { 899 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 900 } else { 901 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 902 } 903 break; 904 case vmIntrinsics::_dcos: 905 if (StubRoutines::dcos() != NULL) { 906 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 907 } else { 908 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 909 } 910 break; 911 case vmIntrinsics::_dtan: 912 if (StubRoutines::dtan() != NULL) { 913 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 914 } else { 915 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 916 } 917 break; 918 default: ShouldNotReachHere(); 919 } 920 #endif // _LP64 921 __ move(result_reg, calc_result); 922 } 923 924 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 925 assert(x->number_of_arguments() == 5, "wrong type"); 926 927 // Make all state_for calls early since they can emit code 928 CodeEmitInfo* info = state_for(x, x->state()); 929 930 LIRItem src(x->argument_at(0), this); 931 LIRItem src_pos(x->argument_at(1), this); 932 LIRItem dst(x->argument_at(2), this); 933 LIRItem dst_pos(x->argument_at(3), this); 934 LIRItem length(x->argument_at(4), this); 935 936 // operands for arraycopy must use fixed registers, otherwise 937 // LinearScan will fail allocation (because arraycopy always needs a 938 // call) 939 940 #ifndef _LP64 941 src.load_item_force (FrameMap::rcx_oop_opr); 942 src_pos.load_item_force (FrameMap::rdx_opr); 943 dst.load_item_force (FrameMap::rax_oop_opr); 944 dst_pos.load_item_force (FrameMap::rbx_opr); 945 length.load_item_force (FrameMap::rdi_opr); 946 LIR_Opr tmp = (FrameMap::rsi_opr); 947 #else 948 949 // The java calling convention will give us enough registers 950 // so that on the stub side the args will be perfect already. 951 // On the other slow/special case side we call C and the arg 952 // positions are not similar enough to pick one as the best. 953 // Also because the java calling convention is a "shifted" version 954 // of the C convention we can process the java args trivially into C 955 // args without worry of overwriting during the xfer 956 957 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 958 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 959 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 960 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 961 length.load_item_force (FrameMap::as_opr(j_rarg4)); 962 963 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 964 #endif // LP64 965 966 set_no_result(x); 967 968 int flags; 969 ciArrayKlass* expected_type; 970 arraycopy_helper(x, &flags, &expected_type); 971 972 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 973 } 974 975 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 976 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 977 // Make all state_for calls early since they can emit code 978 LIR_Opr result = rlock_result(x); 979 int flags = 0; 980 switch (x->id()) { 981 case vmIntrinsics::_updateCRC32: { 982 LIRItem crc(x->argument_at(0), this); 983 LIRItem val(x->argument_at(1), this); 984 // val is destroyed by update_crc32 985 val.set_destroys_register(); 986 crc.load_item(); 987 val.load_item(); 988 __ update_crc32(crc.result(), val.result(), result); 989 break; 990 } 991 case vmIntrinsics::_updateBytesCRC32: 992 case vmIntrinsics::_updateByteBufferCRC32: { 993 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 994 995 LIRItem crc(x->argument_at(0), this); 996 LIRItem buf(x->argument_at(1), this); 997 LIRItem off(x->argument_at(2), this); 998 LIRItem len(x->argument_at(3), this); 999 buf.load_item(); 1000 off.load_nonconstant(); 1001 1002 LIR_Opr index = off.result(); 1003 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1004 if(off.result()->is_constant()) { 1005 index = LIR_OprFact::illegalOpr; 1006 offset += off.result()->as_jint(); 1007 } 1008 LIR_Opr base_op = buf.result(); 1009 1010 #ifndef _LP64 1011 if (!is_updateBytes) { // long b raw address 1012 base_op = new_register(T_INT); 1013 __ convert(Bytecodes::_l2i, buf.result(), base_op); 1014 } 1015 #else 1016 if (index->is_valid()) { 1017 LIR_Opr tmp = new_register(T_LONG); 1018 __ convert(Bytecodes::_i2l, index, tmp); 1019 index = tmp; 1020 } 1021 #endif 1022 1023 if (is_updateBytes) { 1024 base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op); 1025 } 1026 1027 LIR_Address* a = new LIR_Address(base_op, 1028 index, 1029 offset, 1030 T_BYTE); 1031 BasicTypeList signature(3); 1032 signature.append(T_INT); 1033 signature.append(T_ADDRESS); 1034 signature.append(T_INT); 1035 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1036 const LIR_Opr result_reg = result_register_for(x->type()); 1037 1038 LIR_Opr addr = new_pointer_register(); 1039 __ leal(LIR_OprFact::address(a), addr); 1040 1041 crc.load_item_force(cc->at(0)); 1042 __ move(addr, cc->at(1)); 1043 len.load_item_force(cc->at(2)); 1044 1045 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1046 __ move(result_reg, result); 1047 1048 break; 1049 } 1050 default: { 1051 ShouldNotReachHere(); 1052 } 1053 } 1054 } 1055 1056 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1057 Unimplemented(); 1058 } 1059 1060 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1061 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1062 1063 // Make all state_for calls early since they can emit code 1064 LIR_Opr result = rlock_result(x); 1065 1066 LIRItem a(x->argument_at(0), this); // Object 1067 LIRItem aOffset(x->argument_at(1), this); // long 1068 LIRItem b(x->argument_at(2), this); // Object 1069 LIRItem bOffset(x->argument_at(3), this); // long 1070 LIRItem length(x->argument_at(4), this); // int 1071 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1072 1073 a.load_item(); 1074 aOffset.load_nonconstant(); 1075 b.load_item(); 1076 bOffset.load_nonconstant(); 1077 1078 long constant_aOffset = 0; 1079 LIR_Opr result_aOffset = aOffset.result(); 1080 if (result_aOffset->is_constant()) { 1081 constant_aOffset = result_aOffset->as_jlong(); 1082 result_aOffset = LIR_OprFact::illegalOpr; 1083 } 1084 LIR_Opr result_a = access_resolve(ACCESS_READ, a.result()); 1085 1086 long constant_bOffset = 0; 1087 LIR_Opr result_bOffset = bOffset.result(); 1088 if (result_bOffset->is_constant()) { 1089 constant_bOffset = result_bOffset->as_jlong(); 1090 result_bOffset = LIR_OprFact::illegalOpr; 1091 } 1092 LIR_Opr result_b = access_resolve(ACCESS_READ, b.result()); 1093 1094 #ifndef _LP64 1095 result_a = new_register(T_INT); 1096 __ convert(Bytecodes::_l2i, a.result(), result_a); 1097 result_b = new_register(T_INT); 1098 __ convert(Bytecodes::_l2i, b.result(), result_b); 1099 #endif 1100 1101 1102 LIR_Address* addr_a = new LIR_Address(result_a, 1103 result_aOffset, 1104 constant_aOffset, 1105 T_BYTE); 1106 1107 LIR_Address* addr_b = new LIR_Address(result_b, 1108 result_bOffset, 1109 constant_bOffset, 1110 T_BYTE); 1111 1112 BasicTypeList signature(4); 1113 signature.append(T_ADDRESS); 1114 signature.append(T_ADDRESS); 1115 signature.append(T_INT); 1116 signature.append(T_INT); 1117 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1118 const LIR_Opr result_reg = result_register_for(x->type()); 1119 1120 LIR_Opr ptr_addr_a = new_pointer_register(); 1121 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1122 1123 LIR_Opr ptr_addr_b = new_pointer_register(); 1124 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1125 1126 __ move(ptr_addr_a, cc->at(0)); 1127 __ move(ptr_addr_b, cc->at(1)); 1128 length.load_item_force(cc->at(2)); 1129 log2ArrayIndexScale.load_item_force(cc->at(3)); 1130 1131 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1132 __ move(result_reg, result); 1133 } 1134 1135 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 1136 // _i2b, _i2c, _i2s 1137 LIR_Opr fixed_register_for(BasicType type) { 1138 switch (type) { 1139 case T_FLOAT: return FrameMap::fpu0_float_opr; 1140 case T_DOUBLE: return FrameMap::fpu0_double_opr; 1141 case T_INT: return FrameMap::rax_opr; 1142 case T_LONG: return FrameMap::long0_opr; 1143 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 1144 } 1145 } 1146 1147 void LIRGenerator::do_Convert(Convert* x) { 1148 // flags that vary for the different operations and different SSE-settings 1149 bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false; 1150 1151 switch (x->op()) { 1152 case Bytecodes::_i2l: // fall through 1153 case Bytecodes::_l2i: // fall through 1154 case Bytecodes::_i2b: // fall through 1155 case Bytecodes::_i2c: // fall through 1156 case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1157 1158 case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; 1159 case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; 1160 case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; 1161 case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1162 case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1163 case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1164 case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; 1165 case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; 1166 case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1167 case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1168 default: ShouldNotReachHere(); 1169 } 1170 1171 LIRItem value(x->value(), this); 1172 value.load_item(); 1173 LIR_Opr input = value.result(); 1174 LIR_Opr result = rlock(x); 1175 1176 // arguments of lir_convert 1177 LIR_Opr conv_input = input; 1178 LIR_Opr conv_result = result; 1179 ConversionStub* stub = NULL; 1180 1181 if (fixed_input) { 1182 conv_input = fixed_register_for(input->type()); 1183 __ move(input, conv_input); 1184 } 1185 1186 assert(fixed_result == false || round_result == false, "cannot set both"); 1187 if (fixed_result) { 1188 conv_result = fixed_register_for(result->type()); 1189 } else if (round_result) { 1190 result = new_register(result->type()); 1191 set_vreg_flag(result, must_start_in_memory); 1192 } 1193 1194 if (needs_stub) { 1195 stub = new ConversionStub(x->op(), conv_input, conv_result); 1196 } 1197 1198 __ convert(x->op(), conv_input, conv_result, stub); 1199 1200 if (result != conv_result) { 1201 __ move(conv_result, result); 1202 } 1203 1204 assert(result->is_virtual(), "result must be virtual register"); 1205 set_result(x, result); 1206 } 1207 1208 1209 void LIRGenerator::do_NewInstance(NewInstance* x) { 1210 print_if_not_loaded(x); 1211 1212 CodeEmitInfo* info = state_for(x, x->state()); 1213 LIR_Opr reg = result_register_for(x->type()); 1214 new_instance(reg, x->klass(), x->is_unresolved(), 1215 FrameMap::rcx_oop_opr, 1216 FrameMap::rdi_oop_opr, 1217 FrameMap::rsi_oop_opr, 1218 LIR_OprFact::illegalOpr, 1219 FrameMap::rdx_metadata_opr, info); 1220 LIR_Opr result = rlock_result(x); 1221 __ move(reg, result); 1222 } 1223 1224 1225 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1226 CodeEmitInfo* info = state_for(x, x->state()); 1227 1228 LIRItem length(x->length(), this); 1229 length.load_item_force(FrameMap::rbx_opr); 1230 1231 LIR_Opr reg = result_register_for(x->type()); 1232 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1233 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1234 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1235 LIR_Opr tmp4 = reg; 1236 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1237 LIR_Opr len = length.result(); 1238 BasicType elem_type = x->elt_type(); 1239 1240 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1241 1242 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1243 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 1244 1245 LIR_Opr result = rlock_result(x); 1246 __ move(reg, result); 1247 } 1248 1249 1250 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1251 LIRItem length(x->length(), this); 1252 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1253 // and therefore provide the state before the parameters have been consumed 1254 CodeEmitInfo* patching_info = NULL; 1255 if (!x->klass()->is_loaded() || PatchALot) { 1256 patching_info = state_for(x, x->state_before()); 1257 } 1258 1259 CodeEmitInfo* info = state_for(x, x->state()); 1260 1261 const LIR_Opr reg = result_register_for(x->type()); 1262 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1263 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1264 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1265 LIR_Opr tmp4 = reg; 1266 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1267 1268 length.load_item_force(FrameMap::rbx_opr); 1269 LIR_Opr len = length.result(); 1270 1271 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1272 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1273 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1274 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1275 } 1276 klass2reg_with_patching(klass_reg, obj, patching_info); 1277 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1278 1279 LIR_Opr result = rlock_result(x); 1280 __ move(reg, result); 1281 } 1282 1283 1284 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1285 Values* dims = x->dims(); 1286 int i = dims->length(); 1287 LIRItemList* items = new LIRItemList(i, i, NULL); 1288 while (i-- > 0) { 1289 LIRItem* size = new LIRItem(dims->at(i), this); 1290 items->at_put(i, size); 1291 } 1292 1293 // Evaluate state_for early since it may emit code. 1294 CodeEmitInfo* patching_info = NULL; 1295 if (!x->klass()->is_loaded() || PatchALot) { 1296 patching_info = state_for(x, x->state_before()); 1297 1298 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1299 // clone all handlers (NOTE: Usually this is handled transparently 1300 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1301 // is done explicitly here because a stub isn't being used). 1302 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1303 } 1304 CodeEmitInfo* info = state_for(x, x->state()); 1305 1306 i = dims->length(); 1307 while (i-- > 0) { 1308 LIRItem* size = items->at(i); 1309 size->load_nonconstant(); 1310 1311 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1312 } 1313 1314 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1315 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1316 1317 LIR_Opr rank = FrameMap::rbx_opr; 1318 __ move(LIR_OprFact::intConst(x->rank()), rank); 1319 LIR_Opr varargs = FrameMap::rcx_opr; 1320 __ move(FrameMap::rsp_opr, varargs); 1321 LIR_OprList* args = new LIR_OprList(3); 1322 args->append(klass_reg); 1323 args->append(rank); 1324 args->append(varargs); 1325 LIR_Opr reg = result_register_for(x->type()); 1326 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), 1327 LIR_OprFact::illegalOpr, 1328 reg, args, info); 1329 1330 LIR_Opr result = rlock_result(x); 1331 __ move(reg, result); 1332 } 1333 1334 1335 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1336 // nothing to do for now 1337 } 1338 1339 1340 void LIRGenerator::do_CheckCast(CheckCast* x) { 1341 LIRItem obj(x->obj(), this); 1342 1343 CodeEmitInfo* patching_info = NULL; 1344 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1345 // must do this before locking the destination register as an oop register, 1346 // and before the obj is loaded (the latter is for deoptimization) 1347 patching_info = state_for(x, x->state_before()); 1348 } 1349 obj.load_item(); 1350 1351 // info for exceptions 1352 CodeEmitInfo* info_for_exception = 1353 (x->needs_exception_state() ? state_for(x) : 1354 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1355 1356 CodeStub* stub; 1357 if (x->is_incompatible_class_change_check()) { 1358 assert(patching_info == NULL, "can't patch this"); 1359 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1360 } else if (x->is_invokespecial_receiver_check()) { 1361 assert(patching_info == NULL, "can't patch this"); 1362 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); 1363 } else { 1364 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 1365 } 1366 LIR_Opr reg = rlock_result(x); 1367 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1368 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1369 tmp3 = new_register(objectType); 1370 } 1371 __ checkcast(reg, obj.result(), x->klass(), 1372 new_register(objectType), new_register(objectType), tmp3, 1373 x->direct_compare(), info_for_exception, patching_info, stub, 1374 x->profiled_method(), x->profiled_bci()); 1375 } 1376 1377 1378 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1379 LIRItem obj(x->obj(), this); 1380 1381 // result and test object may not be in same register 1382 LIR_Opr reg = rlock_result(x); 1383 CodeEmitInfo* patching_info = NULL; 1384 if ((!x->klass()->is_loaded() || PatchALot)) { 1385 // must do this before locking the destination register as an oop register 1386 patching_info = state_for(x, x->state_before()); 1387 } 1388 obj.load_item(); 1389 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1390 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1391 tmp3 = new_register(objectType); 1392 } 1393 __ instanceof(reg, obj.result(), x->klass(), 1394 new_register(objectType), new_register(objectType), tmp3, 1395 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1396 } 1397 1398 1399 void LIRGenerator::do_If(If* x) { 1400 assert(x->number_of_sux() == 2, "inconsistency"); 1401 ValueTag tag = x->x()->type()->tag(); 1402 bool is_safepoint = x->is_safepoint(); 1403 1404 If::Condition cond = x->cond(); 1405 1406 LIRItem xitem(x->x(), this); 1407 LIRItem yitem(x->y(), this); 1408 LIRItem* xin = &xitem; 1409 LIRItem* yin = &yitem; 1410 1411 if (tag == longTag) { 1412 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1413 // mirror for other conditions 1414 if (cond == If::gtr || cond == If::leq) { 1415 cond = Instruction::mirror(cond); 1416 xin = &yitem; 1417 yin = &xitem; 1418 } 1419 xin->set_destroys_register(); 1420 } 1421 xin->load_item(); 1422 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1423 // inline long zero 1424 yin->dont_load_item(); 1425 } else if (tag == longTag || tag == floatTag || tag == doubleTag) { 1426 // longs cannot handle constants at right side 1427 yin->load_item(); 1428 } else { 1429 yin->dont_load_item(); 1430 } 1431 1432 LIR_Opr left = xin->result(); 1433 LIR_Opr right = yin->result(); 1434 1435 set_no_result(x); 1436 1437 // add safepoint before generating condition code so it can be recomputed 1438 if (x->is_safepoint()) { 1439 // increment backedge counter if needed 1440 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1441 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1442 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1443 } 1444 1445 __ cmp(lir_cond(cond), left, right); 1446 // Generate branch profiling. Profiling code doesn't kill flags. 1447 profile_branch(x, cond); 1448 move_to_phi(x->state()); 1449 if (x->x()->type()->is_float_kind()) { 1450 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); 1451 } else { 1452 __ branch(lir_cond(cond), right->type(), x->tsux()); 1453 } 1454 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1455 __ jump(x->default_sux()); 1456 } 1457 1458 1459 LIR_Opr LIRGenerator::getThreadPointer() { 1460 #ifdef _LP64 1461 return FrameMap::as_pointer_opr(r15_thread); 1462 #else 1463 LIR_Opr result = new_register(T_INT); 1464 __ get_thread(result); 1465 return result; 1466 #endif // 1467 } 1468 1469 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1470 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1471 LIR_OprList* args = new LIR_OprList(); 1472 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1473 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1474 } 1475 1476 1477 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1478 CodeEmitInfo* info) { 1479 if (address->type() == T_LONG) { 1480 address = new LIR_Address(address->base(), 1481 address->index(), address->scale(), 1482 address->disp(), T_DOUBLE); 1483 // Transfer the value atomically by using FP moves. This means 1484 // the value has to be moved between CPU and FPU registers. It 1485 // always has to be moved through spill slot since there's no 1486 // quick way to pack the value into an SSE register. 1487 LIR_Opr temp_double = new_register(T_DOUBLE); 1488 LIR_Opr spill = new_register(T_LONG); 1489 set_vreg_flag(spill, must_start_in_memory); 1490 __ move(value, spill); 1491 __ volatile_move(spill, temp_double, T_LONG); 1492 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1493 } else { 1494 __ store(value, address, info); 1495 } 1496 } 1497 1498 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1499 CodeEmitInfo* info) { 1500 if (address->type() == T_LONG) { 1501 address = new LIR_Address(address->base(), 1502 address->index(), address->scale(), 1503 address->disp(), T_DOUBLE); 1504 // Transfer the value atomically by using FP moves. This means 1505 // the value has to be moved between CPU and FPU registers. In 1506 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1507 // SSE2+ mode it can be moved directly. 1508 LIR_Opr temp_double = new_register(T_DOUBLE); 1509 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1510 __ volatile_move(temp_double, result, T_LONG); 1511 if (UseSSE < 2) { 1512 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible 1513 set_vreg_flag(result, must_start_in_memory); 1514 } 1515 } else { 1516 __ load(address, result, info); 1517 } 1518 }