Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/c1/c1_LIRGenerator.cpp
+++ new/src/share/vm/c1/c1_LIRGenerator.cpp
1 1 /*
2 2 * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 # include "incls/_precompiled.incl"
26 26 # include "incls/_c1_LIRGenerator.cpp.incl"
27 27
28 28 #ifdef ASSERT
29 29 #define __ gen()->lir(__FILE__, __LINE__)->
30 30 #else
31 31 #define __ gen()->lir()->
32 32 #endif
33 33
34 34
35 35 void PhiResolverState::reset(int max_vregs) {
36 36 // Initialize array sizes
37 37 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
38 38 _virtual_operands.trunc_to(0);
39 39 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
40 40 _other_operands.trunc_to(0);
41 41 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
42 42 _vreg_table.trunc_to(0);
43 43 }
44 44
45 45
46 46
47 47 //--------------------------------------------------------------
48 48 // PhiResolver
49 49
50 50 // Resolves cycles:
51 51 //
52 52 // r1 := r2 becomes temp := r1
53 53 // r2 := r1 r1 := r2
54 54 // r2 := temp
55 55 // and orders moves:
56 56 //
57 57 // r2 := r3 becomes r1 := r2
58 58 // r1 := r2 r2 := r3
59 59
60 60 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
61 61 : _gen(gen)
62 62 , _state(gen->resolver_state())
63 63 , _temp(LIR_OprFact::illegalOpr)
64 64 {
65 65 // reinitialize the shared state arrays
66 66 _state.reset(max_vregs);
67 67 }
68 68
69 69
70 70 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
71 71 assert(src->is_valid(), "");
72 72 assert(dest->is_valid(), "");
73 73 __ move(src, dest);
74 74 }
75 75
76 76
77 77 void PhiResolver::move_temp_to(LIR_Opr dest) {
78 78 assert(_temp->is_valid(), "");
79 79 emit_move(_temp, dest);
80 80 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
81 81 }
82 82
83 83
84 84 void PhiResolver::move_to_temp(LIR_Opr src) {
85 85 assert(_temp->is_illegal(), "");
86 86 _temp = _gen->new_register(src->type());
87 87 emit_move(src, _temp);
88 88 }
89 89
90 90
91 91 // Traverse assignment graph in depth first order and generate moves in post order
92 92 // ie. two assignments: b := c, a := b start with node c:
93 93 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
94 94 // Generates moves in this order: move b to a and move c to b
95 95 // ie. cycle a := b, b := a start with node a
96 96 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
97 97 // Generates moves in this order: move b to temp, move a to b, move temp to a
98 98 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
99 99 if (!dest->visited()) {
100 100 dest->set_visited();
101 101 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
102 102 move(dest, dest->destination_at(i));
103 103 }
104 104 } else if (!dest->start_node()) {
105 105 // cylce in graph detected
106 106 assert(_loop == NULL, "only one loop valid!");
107 107 _loop = dest;
108 108 move_to_temp(src->operand());
109 109 return;
110 110 } // else dest is a start node
111 111
112 112 if (!dest->assigned()) {
113 113 if (_loop == dest) {
114 114 move_temp_to(dest->operand());
115 115 dest->set_assigned();
116 116 } else if (src != NULL) {
117 117 emit_move(src->operand(), dest->operand());
118 118 dest->set_assigned();
119 119 }
120 120 }
121 121 }
122 122
123 123
124 124 PhiResolver::~PhiResolver() {
125 125 int i;
126 126 // resolve any cycles in moves from and to virtual registers
127 127 for (i = virtual_operands().length() - 1; i >= 0; i --) {
128 128 ResolveNode* node = virtual_operands()[i];
129 129 if (!node->visited()) {
130 130 _loop = NULL;
131 131 move(NULL, node);
132 132 node->set_start_node();
133 133 assert(_temp->is_illegal(), "move_temp_to() call missing");
134 134 }
135 135 }
136 136
137 137 // generate move for move from non virtual register to abitrary destination
138 138 for (i = other_operands().length() - 1; i >= 0; i --) {
139 139 ResolveNode* node = other_operands()[i];
140 140 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
141 141 emit_move(node->operand(), node->destination_at(j)->operand());
142 142 }
143 143 }
144 144 }
145 145
146 146
147 147 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
148 148 ResolveNode* node;
149 149 if (opr->is_virtual()) {
150 150 int vreg_num = opr->vreg_number();
151 151 node = vreg_table().at_grow(vreg_num, NULL);
152 152 assert(node == NULL || node->operand() == opr, "");
153 153 if (node == NULL) {
154 154 node = new ResolveNode(opr);
155 155 vreg_table()[vreg_num] = node;
156 156 }
157 157 // Make sure that all virtual operands show up in the list when
158 158 // they are used as the source of a move.
159 159 if (source && !virtual_operands().contains(node)) {
160 160 virtual_operands().append(node);
161 161 }
162 162 } else {
163 163 assert(source, "");
164 164 node = new ResolveNode(opr);
165 165 other_operands().append(node);
166 166 }
167 167 return node;
168 168 }
169 169
170 170
171 171 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
172 172 assert(dest->is_virtual(), "");
173 173 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
174 174 assert(src->is_valid(), "");
175 175 assert(dest->is_valid(), "");
176 176 ResolveNode* source = source_node(src);
177 177 source->append(destination_node(dest));
178 178 }
179 179
180 180
181 181 //--------------------------------------------------------------
182 182 // LIRItem
183 183
184 184 void LIRItem::set_result(LIR_Opr opr) {
185 185 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
186 186 value()->set_operand(opr);
187 187
188 188 if (opr->is_virtual()) {
189 189 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
190 190 }
191 191
192 192 _result = opr;
193 193 }
194 194
195 195 void LIRItem::load_item() {
196 196 if (result()->is_illegal()) {
197 197 // update the items result
198 198 _result = value()->operand();
199 199 }
200 200 if (!result()->is_register()) {
201 201 LIR_Opr reg = _gen->new_register(value()->type());
202 202 __ move(result(), reg);
203 203 if (result()->is_constant()) {
204 204 _result = reg;
205 205 } else {
206 206 set_result(reg);
207 207 }
208 208 }
209 209 }
210 210
211 211
212 212 void LIRItem::load_for_store(BasicType type) {
213 213 if (_gen->can_store_as_constant(value(), type)) {
214 214 _result = value()->operand();
215 215 if (!_result->is_constant()) {
216 216 _result = LIR_OprFact::value_type(value()->type());
217 217 }
218 218 } else if (type == T_BYTE || type == T_BOOLEAN) {
219 219 load_byte_item();
220 220 } else {
221 221 load_item();
222 222 }
223 223 }
224 224
225 225 void LIRItem::load_item_force(LIR_Opr reg) {
226 226 LIR_Opr r = result();
227 227 if (r != reg) {
228 228 if (r->type() != reg->type()) {
229 229 // moves between different types need an intervening spill slot
230 230 LIR_Opr tmp = _gen->force_to_spill(r, reg->type());
231 231 __ move(tmp, reg);
232 232 } else {
233 233 __ move(r, reg);
234 234 }
235 235 _result = reg;
236 236 }
237 237 }
238 238
239 239 ciObject* LIRItem::get_jobject_constant() const {
240 240 ObjectType* oc = type()->as_ObjectType();
241 241 if (oc) {
242 242 return oc->constant_value();
243 243 }
244 244 return NULL;
245 245 }
246 246
247 247
248 248 jint LIRItem::get_jint_constant() const {
249 249 assert(is_constant() && value() != NULL, "");
250 250 assert(type()->as_IntConstant() != NULL, "type check");
251 251 return type()->as_IntConstant()->value();
252 252 }
253 253
254 254
255 255 jint LIRItem::get_address_constant() const {
256 256 assert(is_constant() && value() != NULL, "");
257 257 assert(type()->as_AddressConstant() != NULL, "type check");
258 258 return type()->as_AddressConstant()->value();
259 259 }
260 260
261 261
262 262 jfloat LIRItem::get_jfloat_constant() const {
263 263 assert(is_constant() && value() != NULL, "");
264 264 assert(type()->as_FloatConstant() != NULL, "type check");
265 265 return type()->as_FloatConstant()->value();
266 266 }
267 267
268 268
269 269 jdouble LIRItem::get_jdouble_constant() const {
270 270 assert(is_constant() && value() != NULL, "");
271 271 assert(type()->as_DoubleConstant() != NULL, "type check");
272 272 return type()->as_DoubleConstant()->value();
273 273 }
274 274
275 275
276 276 jlong LIRItem::get_jlong_constant() const {
277 277 assert(is_constant() && value() != NULL, "");
278 278 assert(type()->as_LongConstant() != NULL, "type check");
279 279 return type()->as_LongConstant()->value();
280 280 }
281 281
282 282
283 283
284 284 //--------------------------------------------------------------
285 285
286 286
287 287 void LIRGenerator::init() {
288 288 _bs = Universe::heap()->barrier_set();
289 289 }
290 290
291 291
292 292 void LIRGenerator::block_do_prolog(BlockBegin* block) {
293 293 #ifndef PRODUCT
294 294 if (PrintIRWithLIR) {
295 295 block->print();
296 296 }
297 297 #endif
298 298
299 299 // set up the list of LIR instructions
300 300 assert(block->lir() == NULL, "LIR list already computed for this block");
301 301 _lir = new LIR_List(compilation(), block);
302 302 block->set_lir(_lir);
303 303
304 304 __ branch_destination(block->label());
305 305
306 306 if (LIRTraceExecution &&
307 307 Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
308 308 !block->is_set(BlockBegin::exception_entry_flag)) {
309 309 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
310 310 trace_block_entry(block);
311 311 }
312 312 }
313 313
314 314
315 315 void LIRGenerator::block_do_epilog(BlockBegin* block) {
316 316 #ifndef PRODUCT
317 317 if (PrintIRWithLIR) {
318 318 tty->cr();
319 319 }
320 320 #endif
321 321
322 322 // LIR_Opr for unpinned constants shouldn't be referenced by other
323 323 // blocks so clear them out after processing the block.
324 324 for (int i = 0; i < _unpinned_constants.length(); i++) {
325 325 _unpinned_constants.at(i)->clear_operand();
326 326 }
327 327 _unpinned_constants.trunc_to(0);
328 328
329 329 // clear our any registers for other local constants
330 330 _constants.trunc_to(0);
331 331 _reg_for_constants.trunc_to(0);
332 332 }
333 333
334 334
335 335 void LIRGenerator::block_do(BlockBegin* block) {
336 336 CHECK_BAILOUT();
337 337
338 338 block_do_prolog(block);
339 339 set_block(block);
340 340
341 341 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
342 342 if (instr->is_pinned()) do_root(instr);
343 343 }
344 344
345 345 set_block(NULL);
346 346 block_do_epilog(block);
347 347 }
348 348
349 349
350 350 //-------------------------LIRGenerator-----------------------------
351 351
352 352 // This is where the tree-walk starts; instr must be root;
353 353 void LIRGenerator::do_root(Value instr) {
354 354 CHECK_BAILOUT();
355 355
356 356 InstructionMark im(compilation(), instr);
357 357
358 358 assert(instr->is_pinned(), "use only with roots");
359 359 assert(instr->subst() == instr, "shouldn't have missed substitution");
360 360
361 361 instr->visit(this);
362 362
363 363 assert(!instr->has_uses() || instr->operand()->is_valid() ||
364 364 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
365 365 }
366 366
367 367
368 368 // This is called for each node in tree; the walk stops if a root is reached
369 369 void LIRGenerator::walk(Value instr) {
370 370 InstructionMark im(compilation(), instr);
371 371 //stop walk when encounter a root
372 372 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
373 373 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
374 374 } else {
375 375 assert(instr->subst() == instr, "shouldn't have missed substitution");
376 376 instr->visit(this);
377 377 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
378 378 }
379 379 }
380 380
381 381
382 382 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
383 383 int index;
384 384 Value value;
385 385 for_each_stack_value(state, index, value) {
386 386 assert(value->subst() == value, "missed substition");
387 387 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
388 388 walk(value);
389 389 assert(value->operand()->is_valid(), "must be evaluated now");
390 390 }
391 391 }
392 392 ValueStack* s = state;
393 393 int bci = x->bci();
394 394 for_each_state(s) {
395 395 IRScope* scope = s->scope();
396 396 ciMethod* method = scope->method();
397 397
398 398 MethodLivenessResult liveness = method->liveness_at_bci(bci);
399 399 if (bci == SynchronizationEntryBCI) {
400 400 if (x->as_ExceptionObject() || x->as_Throw()) {
401 401 // all locals are dead on exit from the synthetic unlocker
402 402 liveness.clear();
403 403 } else {
404 404 assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
405 405 }
406 406 }
407 407 if (!liveness.is_valid()) {
408 408 // Degenerate or breakpointed method.
409 409 bailout("Degenerate or breakpointed method");
410 410 } else {
411 411 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
412 412 for_each_local_value(s, index, value) {
413 413 assert(value->subst() == value, "missed substition");
414 414 if (liveness.at(index) && !value->type()->is_illegal()) {
415 415 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
416 416 walk(value);
417 417 assert(value->operand()->is_valid(), "must be evaluated now");
418 418 }
419 419 } else {
420 420 // NULL out this local so that linear scan can assume that all non-NULL values are live.
421 421 s->invalidate_local(index);
422 422 }
423 423 }
424 424 }
425 425 bci = scope->caller_bci();
426 426 }
427 427
428 428 return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers());
429 429 }
430 430
431 431
432 432 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
433 433 return state_for(x, x->lock_stack());
434 434 }
435 435
436 436
437 437 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
438 438 if (!obj->is_loaded() || PatchALot) {
439 439 assert(info != NULL, "info must be set if class is not loaded");
440 440 __ oop2reg_patch(NULL, r, info);
441 441 } else {
442 442 // no patching needed
443 443 __ oop2reg(obj->constant_encoding(), r);
444 444 }
445 445 }
446 446
447 447
448 448 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
449 449 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
450 450 CodeStub* stub = new RangeCheckStub(range_check_info, index);
451 451 if (index->is_constant()) {
452 452 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
453 453 index->as_jint(), null_check_info);
454 454 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
455 455 } else {
456 456 cmp_reg_mem(lir_cond_aboveEqual, index, array,
457 457 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
458 458 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
459 459 }
460 460 }
461 461
462 462
463 463 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
464 464 CodeStub* stub = new RangeCheckStub(info, index, true);
465 465 if (index->is_constant()) {
466 466 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
467 467 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
468 468 } else {
469 469 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
470 470 java_nio_Buffer::limit_offset(), T_INT, info);
471 471 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
472 472 }
473 473 __ move(index, result);
474 474 }
475 475
476 476
477 477 // increment a counter returning the incremented value
478 478 LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
479 479 LIR_Address* counter = new LIR_Address(base, offset, T_INT);
480 480 LIR_Opr result = new_register(T_INT);
481 481 __ load(counter, result);
482 482 __ add(result, LIR_OprFact::intConst(increment), result);
483 483 __ store(result, counter);
484 484 return result;
485 485 }
486 486
487 487
488 488 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
489 489 LIR_Opr result_op = result;
490 490 LIR_Opr left_op = left;
491 491 LIR_Opr right_op = right;
492 492
493 493 if (TwoOperandLIRForm && left_op != result_op) {
494 494 assert(right_op != result_op, "malformed");
495 495 __ move(left_op, result_op);
496 496 left_op = result_op;
497 497 }
498 498
499 499 switch(code) {
500 500 case Bytecodes::_dadd:
501 501 case Bytecodes::_fadd:
502 502 case Bytecodes::_ladd:
503 503 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
504 504 case Bytecodes::_fmul:
505 505 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
506 506
507 507 case Bytecodes::_dmul:
508 508 {
509 509 if (is_strictfp) {
510 510 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
511 511 } else {
512 512 __ mul(left_op, right_op, result_op); break;
513 513 }
514 514 }
515 515 break;
516 516
517 517 case Bytecodes::_imul:
518 518 {
519 519 bool did_strength_reduce = false;
520 520
521 521 if (right->is_constant()) {
522 522 int c = right->as_jint();
523 523 if (is_power_of_2(c)) {
524 524 // do not need tmp here
525 525 __ shift_left(left_op, exact_log2(c), result_op);
526 526 did_strength_reduce = true;
527 527 } else {
528 528 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
529 529 }
530 530 }
531 531 // we couldn't strength reduce so just emit the multiply
532 532 if (!did_strength_reduce) {
533 533 __ mul(left_op, right_op, result_op);
534 534 }
535 535 }
536 536 break;
537 537
538 538 case Bytecodes::_dsub:
539 539 case Bytecodes::_fsub:
540 540 case Bytecodes::_lsub:
541 541 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
542 542
543 543 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
544 544 // ldiv and lrem are implemented with a direct runtime call
545 545
546 546 case Bytecodes::_ddiv:
547 547 {
548 548 if (is_strictfp) {
549 549 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
550 550 } else {
551 551 __ div (left_op, right_op, result_op); break;
552 552 }
553 553 }
554 554 break;
555 555
556 556 case Bytecodes::_drem:
557 557 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
558 558
559 559 default: ShouldNotReachHere();
560 560 }
561 561 }
562 562
563 563
564 564 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
565 565 arithmetic_op(code, result, left, right, false, tmp);
566 566 }
567 567
568 568
569 569 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
570 570 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
571 571 }
572 572
573 573
574 574 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
575 575 arithmetic_op(code, result, left, right, is_strictfp, tmp);
576 576 }
577 577
578 578
579 579 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
580 580 if (TwoOperandLIRForm && value != result_op) {
581 581 assert(count != result_op, "malformed");
582 582 __ move(value, result_op);
583 583 value = result_op;
584 584 }
585 585
586 586 assert(count->is_constant() || count->is_register(), "must be");
587 587 switch(code) {
588 588 case Bytecodes::_ishl:
589 589 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
590 590 case Bytecodes::_ishr:
591 591 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
592 592 case Bytecodes::_iushr:
593 593 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
594 594 default: ShouldNotReachHere();
595 595 }
596 596 }
597 597
598 598
599 599 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
600 600 if (TwoOperandLIRForm && left_op != result_op) {
601 601 assert(right_op != result_op, "malformed");
602 602 __ move(left_op, result_op);
603 603 left_op = result_op;
604 604 }
605 605
606 606 switch(code) {
607 607 case Bytecodes::_iand:
608 608 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
609 609
610 610 case Bytecodes::_ior:
611 611 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
612 612
613 613 case Bytecodes::_ixor:
614 614 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
615 615
616 616 default: ShouldNotReachHere();
617 617 }
618 618 }
619 619
620 620
621 621 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
622 622 if (!GenerateSynchronizationCode) return;
623 623 // for slow path, use debug info for state after successful locking
624 624 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
625 625 __ load_stack_address_monitor(monitor_no, lock);
626 626 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
627 627 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
628 628 }
629 629
630 630
631 631 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) {
632 632 if (!GenerateSynchronizationCode) return;
633 633 // setup registers
634 634 LIR_Opr hdr = lock;
635 635 lock = new_hdr;
636 636 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
637 637 __ load_stack_address_monitor(monitor_no, lock);
638 638 __ unlock_object(hdr, object, lock, slow_path);
639 639 }
640 640
641 641
642 642 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
643 643 jobject2reg_with_patching(klass_reg, klass, info);
644 644 // If klass is not loaded we do not know if the klass has finalizers:
645 645 if (UseFastNewInstance && klass->is_loaded()
646 646 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
647 647
648 648 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
649 649
650 650 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
651 651
652 652 assert(klass->is_loaded(), "must be loaded");
653 653 // allocate space for instance
654 654 assert(klass->size_helper() >= 0, "illegal instance size");
655 655 const int instance_size = align_object_size(klass->size_helper());
656 656 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
657 657 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
658 658 } else {
659 659 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
660 660 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
661 661 __ branch_destination(slow_path->continuation());
662 662 }
663 663 }
664 664
665 665
666 666 static bool is_constant_zero(Instruction* inst) {
667 667 IntConstant* c = inst->type()->as_IntConstant();
668 668 if (c) {
669 669 return (c->value() == 0);
670 670 }
671 671 return false;
672 672 }
673 673
674 674
675 675 static bool positive_constant(Instruction* inst) {
676 676 IntConstant* c = inst->type()->as_IntConstant();
677 677 if (c) {
678 678 return (c->value() >= 0);
679 679 }
680 680 return false;
681 681 }
682 682
683 683
684 684 static ciArrayKlass* as_array_klass(ciType* type) {
685 685 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
686 686 return (ciArrayKlass*)type;
687 687 } else {
688 688 return NULL;
689 689 }
690 690 }
691 691
692 692 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
693 693 Instruction* src = x->argument_at(0);
694 694 Instruction* src_pos = x->argument_at(1);
695 695 Instruction* dst = x->argument_at(2);
696 696 Instruction* dst_pos = x->argument_at(3);
697 697 Instruction* length = x->argument_at(4);
698 698
699 699 // first try to identify the likely type of the arrays involved
700 700 ciArrayKlass* expected_type = NULL;
701 701 bool is_exact = false;
702 702 {
703 703 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
704 704 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
705 705 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
706 706 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
707 707 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
708 708 // the types exactly match so the type is fully known
709 709 is_exact = true;
710 710 expected_type = src_exact_type;
711 711 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
712 712 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
713 713 ciArrayKlass* src_type = NULL;
714 714 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
715 715 src_type = (ciArrayKlass*) src_exact_type;
716 716 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
717 717 src_type = (ciArrayKlass*) src_declared_type;
718 718 }
719 719 if (src_type != NULL) {
720 720 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
721 721 is_exact = true;
722 722 expected_type = dst_type;
723 723 }
724 724 }
725 725 }
726 726 // at least pass along a good guess
727 727 if (expected_type == NULL) expected_type = dst_exact_type;
728 728 if (expected_type == NULL) expected_type = src_declared_type;
729 729 if (expected_type == NULL) expected_type = dst_declared_type;
730 730 }
731 731
732 732 // if a probable array type has been identified, figure out if any
733 733 // of the required checks for a fast case can be elided.
734 734 int flags = LIR_OpArrayCopy::all_flags;
735 735 if (expected_type != NULL) {
736 736 // try to skip null checks
737 737 if (src->as_NewArray() != NULL)
738 738 flags &= ~LIR_OpArrayCopy::src_null_check;
739 739 if (dst->as_NewArray() != NULL)
740 740 flags &= ~LIR_OpArrayCopy::dst_null_check;
741 741
742 742 // check from incoming constant values
743 743 if (positive_constant(src_pos))
744 744 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
745 745 if (positive_constant(dst_pos))
746 746 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
747 747 if (positive_constant(length))
748 748 flags &= ~LIR_OpArrayCopy::length_positive_check;
749 749
750 750 // see if the range check can be elided, which might also imply
751 751 // that src or dst is non-null.
752 752 ArrayLength* al = length->as_ArrayLength();
753 753 if (al != NULL) {
754 754 if (al->array() == src) {
755 755 // it's the length of the source array
756 756 flags &= ~LIR_OpArrayCopy::length_positive_check;
757 757 flags &= ~LIR_OpArrayCopy::src_null_check;
758 758 if (is_constant_zero(src_pos))
759 759 flags &= ~LIR_OpArrayCopy::src_range_check;
760 760 }
761 761 if (al->array() == dst) {
762 762 // it's the length of the destination array
763 763 flags &= ~LIR_OpArrayCopy::length_positive_check;
764 764 flags &= ~LIR_OpArrayCopy::dst_null_check;
765 765 if (is_constant_zero(dst_pos))
766 766 flags &= ~LIR_OpArrayCopy::dst_range_check;
767 767 }
768 768 }
769 769 if (is_exact) {
770 770 flags &= ~LIR_OpArrayCopy::type_check;
771 771 }
772 772 }
773 773
774 774 if (src == dst) {
775 775 // moving within a single array so no type checks are needed
776 776 if (flags & LIR_OpArrayCopy::type_check) {
777 777 flags &= ~LIR_OpArrayCopy::type_check;
778 778 }
779 779 }
780 780 *flagsp = flags;
781 781 *expected_typep = (ciArrayKlass*)expected_type;
782 782 }
783 783
784 784
785 785 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
786 786 assert(opr->is_register(), "why spill if item is not register?");
787 787
788 788 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
789 789 LIR_Opr result = new_register(T_FLOAT);
790 790 set_vreg_flag(result, must_start_in_memory);
791 791 assert(opr->is_register(), "only a register can be spilled");
792 792 assert(opr->value_type()->is_float(), "rounding only for floats available");
793 793 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
794 794 return result;
795 795 }
796 796 return opr;
797 797 }
798 798
799 799
800 800 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
801 801 assert(type2size[t] == type2size[value->type()], "size mismatch");
802 802 if (!value->is_register()) {
803 803 // force into a register
804 804 LIR_Opr r = new_register(value->type());
805 805 __ move(value, r);
806 806 value = r;
807 807 }
808 808
809 809 // create a spill location
810 810 LIR_Opr tmp = new_register(t);
811 811 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
812 812
813 813 // move from register to spill
814 814 __ move(value, tmp);
815 815 return tmp;
816 816 }
817 817
818 818
819 819 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
820 820 if (if_instr->should_profile()) {
821 821 ciMethod* method = if_instr->profiled_method();
822 822 assert(method != NULL, "method should be set if branch is profiled");
823 823 ciMethodData* md = method->method_data();
824 824 if (md == NULL) {
825 825 bailout("out of memory building methodDataOop");
826 826 return;
827 827 }
828 828 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
829 829 assert(data != NULL, "must have profiling data");
830 830 assert(data->is_BranchData(), "need BranchData for two-way branches");
831 831 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
832 832 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
833 833 LIR_Opr md_reg = new_register(T_OBJECT);
834 834 __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
835 835 LIR_Opr data_offset_reg = new_register(T_INT);
836 836 __ cmove(lir_cond(cond),
837 837 LIR_OprFact::intConst(taken_count_offset),
838 838 LIR_OprFact::intConst(not_taken_count_offset),
839 839 data_offset_reg);
840 840 LIR_Opr data_reg = new_register(T_INT);
841 841 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
842 842 __ move(LIR_OprFact::address(data_addr), data_reg);
843 843 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
844 844 // Use leal instead of add to avoid destroying condition codes on x86
845 845 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
846 846 __ move(data_reg, LIR_OprFact::address(data_addr));
847 847 }
848 848 }
849 849
850 850
851 851 // Phi technique:
852 852 // This is about passing live values from one basic block to the other.
853 853 // In code generated with Java it is rather rare that more than one
854 854 // value is on the stack from one basic block to the other.
855 855 // We optimize our technique for efficient passing of one value
856 856 // (of type long, int, double..) but it can be extended.
857 857 // When entering or leaving a basic block, all registers and all spill
858 858 // slots are release and empty. We use the released registers
859 859 // and spill slots to pass the live values from one block
860 860 // to the other. The topmost value, i.e., the value on TOS of expression
861 861 // stack is passed in registers. All other values are stored in spilling
862 862 // area. Every Phi has an index which designates its spill slot
863 863 // At exit of a basic block, we fill the register(s) and spill slots.
864 864 // At entry of a basic block, the block_prolog sets up the content of phi nodes
865 865 // and locks necessary registers and spilling slots.
866 866
867 867
868 868 // move current value to referenced phi function
869 869 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
870 870 Phi* phi = sux_val->as_Phi();
871 871 // cur_val can be null without phi being null in conjunction with inlining
872 872 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
873 873 LIR_Opr operand = cur_val->operand();
874 874 if (cur_val->operand()->is_illegal()) {
875 875 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
876 876 "these can be produced lazily");
877 877 operand = operand_for_instruction(cur_val);
878 878 }
879 879 resolver->move(operand, operand_for_instruction(phi));
880 880 }
881 881 }
882 882
883 883
884 884 // Moves all stack values into their PHI position
885 885 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
886 886 BlockBegin* bb = block();
887 887 if (bb->number_of_sux() == 1) {
888 888 BlockBegin* sux = bb->sux_at(0);
889 889 assert(sux->number_of_preds() > 0, "invalid CFG");
890 890
891 891 // a block with only one predecessor never has phi functions
892 892 if (sux->number_of_preds() > 1) {
893 893 int max_phis = cur_state->stack_size() + cur_state->locals_size();
894 894 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
895 895
896 896 ValueStack* sux_state = sux->state();
897 897 Value sux_value;
898 898 int index;
899 899
900 900 for_each_stack_value(sux_state, index, sux_value) {
901 901 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
902 902 }
903 903
904 904 // Inlining may cause the local state not to match up, so walk up
905 905 // the caller state until we get to the same scope as the
906 906 // successor and then start processing from there.
907 907 while (cur_state->scope() != sux_state->scope()) {
908 908 cur_state = cur_state->caller_state();
909 909 assert(cur_state != NULL, "scopes don't match up");
910 910 }
911 911
912 912 for_each_local_value(sux_state, index, sux_value) {
913 913 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
914 914 }
915 915
916 916 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
917 917 }
918 918 }
919 919 }
920 920
921 921
922 922 LIR_Opr LIRGenerator::new_register(BasicType type) {
923 923 int vreg = _virtual_register_number;
924 924 // add a little fudge factor for the bailout, since the bailout is
925 925 // only checked periodically. This gives a few extra registers to
926 926 // hand out before we really run out, which helps us keep from
927 927 // tripping over assertions.
928 928 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
929 929 bailout("out of virtual registers");
930 930 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
931 931 // wrap it around
932 932 _virtual_register_number = LIR_OprDesc::vreg_base;
933 933 }
934 934 }
935 935 _virtual_register_number += 1;
936 936 if (type == T_ADDRESS) type = T_INT;
937 937 return LIR_OprFact::virtual_register(vreg, type);
938 938 }
939 939
940 940
941 941 // Try to lock using register in hint
942 942 LIR_Opr LIRGenerator::rlock(Value instr) {
943 943 return new_register(instr->type());
944 944 }
945 945
946 946
947 947 // does an rlock and sets result
948 948 LIR_Opr LIRGenerator::rlock_result(Value x) {
949 949 LIR_Opr reg = rlock(x);
950 950 set_result(x, reg);
951 951 return reg;
952 952 }
953 953
954 954
955 955 // does an rlock and sets result
956 956 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
957 957 LIR_Opr reg;
958 958 switch (type) {
959 959 case T_BYTE:
960 960 case T_BOOLEAN:
961 961 reg = rlock_byte(type);
962 962 break;
963 963 default:
964 964 reg = rlock(x);
965 965 break;
966 966 }
967 967
968 968 set_result(x, reg);
969 969 return reg;
970 970 }
971 971
972 972
973 973 //---------------------------------------------------------------------
974 974 ciObject* LIRGenerator::get_jobject_constant(Value value) {
975 975 ObjectType* oc = value->type()->as_ObjectType();
976 976 if (oc) {
977 977 return oc->constant_value();
978 978 }
979 979 return NULL;
980 980 }
981 981
982 982
983 983 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
984 984 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
985 985 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
986 986
987 987 // no moves are created for phi functions at the begin of exception
988 988 // handlers, so assign operands manually here
989 989 for_each_phi_fun(block(), phi,
990 990 operand_for_instruction(phi));
991 991
992 992 LIR_Opr thread_reg = getThreadPointer();
993 993 __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
994 994 exceptionOopOpr());
995 995 __ move(LIR_OprFact::oopConst(NULL),
996 996 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
997 997 __ move(LIR_OprFact::oopConst(NULL),
998 998 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
999 999
1000 1000 LIR_Opr result = new_register(T_OBJECT);
1001 1001 __ move(exceptionOopOpr(), result);
1002 1002 set_result(x, result);
1003 1003 }
1004 1004
1005 1005
1006 1006 //----------------------------------------------------------------------
1007 1007 //----------------------------------------------------------------------
1008 1008 //----------------------------------------------------------------------
1009 1009 //----------------------------------------------------------------------
1010 1010 // visitor functions
1011 1011 //----------------------------------------------------------------------
1012 1012 //----------------------------------------------------------------------
1013 1013 //----------------------------------------------------------------------
1014 1014 //----------------------------------------------------------------------
1015 1015
1016 1016 void LIRGenerator::do_Phi(Phi* x) {
1017 1017 // phi functions are never visited directly
1018 1018 ShouldNotReachHere();
1019 1019 }
1020 1020
1021 1021
1022 1022 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1023 1023 void LIRGenerator::do_Constant(Constant* x) {
1024 1024 if (x->state() != NULL) {
1025 1025 // Any constant with a ValueStack requires patching so emit the patch here
1026 1026 LIR_Opr reg = rlock_result(x);
1027 1027 CodeEmitInfo* info = state_for(x, x->state());
1028 1028 __ oop2reg_patch(NULL, reg, info);
1029 1029 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1030 1030 if (!x->is_pinned()) {
1031 1031 // unpinned constants are handled specially so that they can be
1032 1032 // put into registers when they are used multiple times within a
1033 1033 // block. After the block completes their operand will be
1034 1034 // cleared so that other blocks can't refer to that register.
1035 1035 set_result(x, load_constant(x));
1036 1036 } else {
1037 1037 LIR_Opr res = x->operand();
1038 1038 if (!res->is_valid()) {
1039 1039 res = LIR_OprFact::value_type(x->type());
1040 1040 }
1041 1041 if (res->is_constant()) {
1042 1042 LIR_Opr reg = rlock_result(x);
1043 1043 __ move(res, reg);
1044 1044 } else {
1045 1045 set_result(x, res);
1046 1046 }
1047 1047 }
1048 1048 } else {
1049 1049 set_result(x, LIR_OprFact::value_type(x->type()));
1050 1050 }
1051 1051 }
1052 1052
1053 1053
1054 1054 void LIRGenerator::do_Local(Local* x) {
1055 1055 // operand_for_instruction has the side effect of setting the result
1056 1056 // so there's no need to do it here.
1057 1057 operand_for_instruction(x);
1058 1058 }
1059 1059
1060 1060
1061 1061 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1062 1062 Unimplemented();
1063 1063 }
1064 1064
1065 1065
1066 1066 void LIRGenerator::do_Return(Return* x) {
1067 1067 if (compilation()->env()->dtrace_method_probes()) {
1068 1068 BasicTypeList signature;
1069 1069 signature.append(T_INT); // thread
1070 1070 signature.append(T_OBJECT); // methodOop
1071 1071 LIR_OprList* args = new LIR_OprList();
1072 1072 args->append(getThreadPointer());
1073 1073 LIR_Opr meth = new_register(T_OBJECT);
1074 1074 __ oop2reg(method()->constant_encoding(), meth);
1075 1075 args->append(meth);
1076 1076 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1077 1077 }
1078 1078
1079 1079 if (x->type()->is_void()) {
1080 1080 __ return_op(LIR_OprFact::illegalOpr);
1081 1081 } else {
1082 1082 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1083 1083 LIRItem result(x->result(), this);
1084 1084
1085 1085 result.load_item_force(reg);
1086 1086 __ return_op(result.result());
1087 1087 }
1088 1088 set_no_result(x);
1089 1089 }
1090 1090
1091 1091
1092 1092 // Example: object.getClass ()
1093 1093 void LIRGenerator::do_getClass(Intrinsic* x) {
1094 1094 assert(x->number_of_arguments() == 1, "wrong type");
1095 1095
1096 1096 LIRItem rcvr(x->argument_at(0), this);
1097 1097 rcvr.load_item();
1098 1098 LIR_Opr result = rlock_result(x);
1099 1099
1100 1100 // need to perform the null check on the rcvr
1101 1101 CodeEmitInfo* info = NULL;
1102 1102 if (x->needs_null_check()) {
1103 1103 info = state_for(x, x->state()->copy_locks());
1104 1104 }
1105 1105 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1106 1106 __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
1107 1107 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
1108 1108 }
1109 1109
1110 1110
1111 1111 // Example: Thread.currentThread()
1112 1112 void LIRGenerator::do_currentThread(Intrinsic* x) {
1113 1113 assert(x->number_of_arguments() == 0, "wrong type");
1114 1114 LIR_Opr reg = rlock_result(x);
1115 1115 __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1116 1116 }
1117 1117
1118 1118
1119 1119 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1120 1120 assert(x->number_of_arguments() == 1, "wrong type");
1121 1121 LIRItem receiver(x->argument_at(0), this);
1122 1122
1123 1123 receiver.load_item();
1124 1124 BasicTypeList signature;
1125 1125 signature.append(T_OBJECT); // receiver
1126 1126 LIR_OprList* args = new LIR_OprList();
1127 1127 args->append(receiver.result());
1128 1128 CodeEmitInfo* info = state_for(x, x->state());
1129 1129 call_runtime(&signature, args,
1130 1130 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1131 1131 voidType, info);
1132 1132
1133 1133 set_no_result(x);
1134 1134 }
1135 1135
1136 1136
1137 1137 //------------------------local access--------------------------------------
1138 1138
1139 1139 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1140 1140 if (x->operand()->is_illegal()) {
1141 1141 Constant* c = x->as_Constant();
1142 1142 if (c != NULL) {
1143 1143 x->set_operand(LIR_OprFact::value_type(c->type()));
1144 1144 } else {
1145 1145 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1146 1146 // allocate a virtual register for this local or phi
1147 1147 x->set_operand(rlock(x));
1148 1148 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1149 1149 }
1150 1150 }
1151 1151 return x->operand();
1152 1152 }
1153 1153
1154 1154
1155 1155 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1156 1156 if (opr->is_virtual()) {
1157 1157 return instruction_for_vreg(opr->vreg_number());
1158 1158 }
1159 1159 return NULL;
1160 1160 }
1161 1161
1162 1162
1163 1163 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1164 1164 if (reg_num < _instruction_for_operand.length()) {
1165 1165 return _instruction_for_operand.at(reg_num);
1166 1166 }
1167 1167 return NULL;
1168 1168 }
1169 1169
1170 1170
1171 1171 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1172 1172 if (_vreg_flags.size_in_bits() == 0) {
1173 1173 BitMap2D temp(100, num_vreg_flags);
1174 1174 temp.clear();
1175 1175 _vreg_flags = temp;
1176 1176 }
1177 1177 _vreg_flags.at_put_grow(vreg_num, f, true);
1178 1178 }
1179 1179
1180 1180 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1181 1181 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1182 1182 return false;
1183 1183 }
1184 1184 return _vreg_flags.at(vreg_num, f);
1185 1185 }
1186 1186
1187 1187
1188 1188 // Block local constant handling. This code is useful for keeping
1189 1189 // unpinned constants and constants which aren't exposed in the IR in
1190 1190 // registers. Unpinned Constant instructions have their operands
1191 1191 // cleared when the block is finished so that other blocks can't end
1192 1192 // up referring to their registers.
1193 1193
1194 1194 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1195 1195 assert(!x->is_pinned(), "only for unpinned constants");
1196 1196 _unpinned_constants.append(x);
1197 1197 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1198 1198 }
1199 1199
1200 1200
1201 1201 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1202 1202 BasicType t = c->type();
1203 1203 for (int i = 0; i < _constants.length(); i++) {
1204 1204 LIR_Const* other = _constants.at(i);
1205 1205 if (t == other->type()) {
1206 1206 switch (t) {
1207 1207 case T_INT:
1208 1208 case T_FLOAT:
1209 1209 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1210 1210 break;
1211 1211 case T_LONG:
1212 1212 case T_DOUBLE:
1213 1213 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1214 1214 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1215 1215 break;
1216 1216 case T_OBJECT:
1217 1217 if (c->as_jobject() != other->as_jobject()) continue;
1218 1218 break;
1219 1219 }
1220 1220 return _reg_for_constants.at(i);
1221 1221 }
1222 1222 }
1223 1223
1224 1224 LIR_Opr result = new_register(t);
1225 1225 __ move((LIR_Opr)c, result);
1226 1226 _constants.append(c);
1227 1227 _reg_for_constants.append(result);
1228 1228 return result;
1229 1229 }
1230 1230
1231 1231 // Various barriers
1232 1232
1233 1233 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1234 1234 // Do the pre-write barrier, if any.
1235 1235 switch (_bs->kind()) {
1236 1236 #ifndef SERIALGC
1237 1237 case BarrierSet::G1SATBCT:
1238 1238 case BarrierSet::G1SATBCTLogging:
1239 1239 G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
1240 1240 break;
1241 1241 #endif // SERIALGC
1242 1242 case BarrierSet::CardTableModRef:
1243 1243 case BarrierSet::CardTableExtension:
1244 1244 // No pre barriers
1245 1245 break;
1246 1246 case BarrierSet::ModRef:
1247 1247 case BarrierSet::Other:
1248 1248 // No pre barriers
1249 1249 break;
1250 1250 default :
1251 1251 ShouldNotReachHere();
1252 1252
1253 1253 }
1254 1254 }
1255 1255
1256 1256 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1257 1257 switch (_bs->kind()) {
1258 1258 #ifndef SERIALGC
1259 1259 case BarrierSet::G1SATBCT:
1260 1260 case BarrierSet::G1SATBCTLogging:
1261 1261 G1SATBCardTableModRef_post_barrier(addr, new_val);
1262 1262 break;
1263 1263 #endif // SERIALGC
1264 1264 case BarrierSet::CardTableModRef:
1265 1265 case BarrierSet::CardTableExtension:
1266 1266 CardTableModRef_post_barrier(addr, new_val);
1267 1267 break;
1268 1268 case BarrierSet::ModRef:
1269 1269 case BarrierSet::Other:
1270 1270 // No post barriers
1271 1271 break;
1272 1272 default :
1273 1273 ShouldNotReachHere();
1274 1274 }
1275 1275 }
1276 1276
1277 1277 ////////////////////////////////////////////////////////////////////////
1278 1278 #ifndef SERIALGC
1279 1279
1280 1280 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1281 1281 if (G1DisablePreBarrier) return;
1282 1282
1283 1283 // First we test whether marking is in progress.
1284 1284 BasicType flag_type;
1285 1285 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1286 1286 flag_type = T_INT;
1287 1287 } else {
1288 1288 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1289 1289 "Assumption");
1290 1290 flag_type = T_BYTE;
1291 1291 }
1292 1292 LIR_Opr thrd = getThreadPointer();
1293 1293 LIR_Address* mark_active_flag_addr =
1294 1294 new LIR_Address(thrd,
1295 1295 in_bytes(JavaThread::satb_mark_queue_offset() +
1296 1296 PtrQueue::byte_offset_of_active()),
1297 1297 flag_type);
1298 1298 // Read the marking-in-progress flag.
1299 1299 LIR_Opr flag_val = new_register(T_INT);
1300 1300 __ load(mark_active_flag_addr, flag_val);
1301 1301
1302 1302 LabelObj* start_store = new LabelObj();
1303 1303
1304 1304 LIR_PatchCode pre_val_patch_code =
1305 1305 patch ? lir_patch_normal : lir_patch_none;
1306 1306
1307 1307 LIR_Opr pre_val = new_register(T_OBJECT);
1308 1308
1309 1309 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1310 1310 if (!addr_opr->is_address()) {
1311 1311 assert(addr_opr->is_register(), "must be");
1312 1312 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
1313 1313 }
1314 1314 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
1315 1315 info);
1316 1316 __ branch(lir_cond_notEqual, T_INT, slow);
1317 1317 __ branch_destination(slow->continuation());
1318 1318 }
1319 1319
1320 1320 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1321 1321 if (G1DisablePostBarrier) return;
1322 1322
1323 1323 // If the "new_val" is a constant NULL, no barrier is necessary.
1324 1324 if (new_val->is_constant() &&
1325 1325 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1326 1326
1327 1327 if (!new_val->is_register()) {
1328 1328 LIR_Opr new_val_reg = new_pointer_register();
1329 1329 if (new_val->is_constant()) {
1330 1330 __ move(new_val, new_val_reg);
1331 1331 } else {
1332 1332 __ leal(new_val, new_val_reg);
1333 1333 }
1334 1334 new_val = new_val_reg;
1335 1335 }
1336 1336 assert(new_val->is_register(), "must be a register at this point");
1337 1337
1338 1338 if (addr->is_address()) {
1339 1339 LIR_Address* address = addr->as_address_ptr();
1340 1340 LIR_Opr ptr = new_pointer_register();
1341 1341 if (!address->index()->is_valid() && address->disp() == 0) {
1342 1342 __ move(address->base(), ptr);
1343 1343 } else {
1344 1344 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1345 1345 __ leal(addr, ptr);
1346 1346 }
1347 1347 addr = ptr;
1348 1348 }
1349 1349 assert(addr->is_register(), "must be a register at this point");
1350 1350
1351 1351 LIR_Opr xor_res = new_pointer_register();
1352 1352 LIR_Opr xor_shift_res = new_pointer_register();
1353 1353
1354 1354 if (TwoOperandLIRForm ) {
1355 1355 __ move(addr, xor_res);
1356 1356 __ logical_xor(xor_res, new_val, xor_res);
1357 1357 __ move(xor_res, xor_shift_res);
1358 1358 __ unsigned_shift_right(xor_shift_res,
1359 1359 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1360 1360 xor_shift_res,
1361 1361 LIR_OprDesc::illegalOpr());
1362 1362 } else {
1363 1363 __ logical_xor(addr, new_val, xor_res);
1364 1364 __ unsigned_shift_right(xor_res,
1365 1365 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1366 1366 xor_shift_res,
1367 1367 LIR_OprDesc::illegalOpr());
1368 1368 }
1369 1369
1370 1370 if (!new_val->is_register()) {
1371 1371 LIR_Opr new_val_reg = new_pointer_register();
1372 1372 __ leal(new_val, new_val_reg);
1373 1373 new_val = new_val_reg;
1374 1374 }
1375 1375 assert(new_val->is_register(), "must be a register at this point");
1376 1376
1377 1377 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1378 1378
1379 1379 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1380 1380 __ branch(lir_cond_notEqual, T_INT, slow);
1381 1381 __ branch_destination(slow->continuation());
1382 1382 }
1383 1383
1384 1384 #endif // SERIALGC
1385 1385 ////////////////////////////////////////////////////////////////////////
1386 1386
1387 1387 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1388 1388
1389 1389 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1390 1390 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1391 1391 if (addr->is_address()) {
1392 1392 LIR_Address* address = addr->as_address_ptr();
1393 1393 LIR_Opr ptr = new_register(T_OBJECT);
1394 1394 if (!address->index()->is_valid() && address->disp() == 0) {
1395 1395 __ move(address->base(), ptr);
1396 1396 } else {
1397 1397 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1398 1398 __ leal(addr, ptr);
1399 1399 }
1400 1400 addr = ptr;
1401 1401 }
1402 1402 assert(addr->is_register(), "must be a register at this point");
1403 1403
1404 1404 LIR_Opr tmp = new_pointer_register();
1405 1405 if (TwoOperandLIRForm) {
1406 1406 __ move(addr, tmp);
1407 1407 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1408 1408 } else {
1409 1409 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1410 1410 }
1411 1411 if (can_inline_as_constant(card_table_base)) {
1412 1412 __ move(LIR_OprFact::intConst(0),
1413 1413 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1414 1414 } else {
1415 1415 __ move(LIR_OprFact::intConst(0),
1416 1416 new LIR_Address(tmp, load_constant(card_table_base),
1417 1417 T_BYTE));
1418 1418 }
1419 1419 }
1420 1420
1421 1421
1422 1422 //------------------------field access--------------------------------------
1423 1423
1424 1424 // Comment copied form templateTable_i486.cpp
1425 1425 // ----------------------------------------------------------------------------
1426 1426 // Volatile variables demand their effects be made known to all CPU's in
1427 1427 // order. Store buffers on most chips allow reads & writes to reorder; the
1428 1428 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1429 1429 // memory barrier (i.e., it's not sufficient that the interpreter does not
1430 1430 // reorder volatile references, the hardware also must not reorder them).
1431 1431 //
1432 1432 // According to the new Java Memory Model (JMM):
1433 1433 // (1) All volatiles are serialized wrt to each other.
1434 1434 // ALSO reads & writes act as aquire & release, so:
1435 1435 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1436 1436 // the read float up to before the read. It's OK for non-volatile memory refs
1437 1437 // that happen before the volatile read to float down below it.
1438 1438 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1439 1439 // that happen BEFORE the write float down to after the write. It's OK for
1440 1440 // non-volatile memory refs that happen after the volatile write to float up
1441 1441 // before it.
1442 1442 //
1443 1443 // We only put in barriers around volatile refs (they are expensive), not
1444 1444 // _between_ memory refs (that would require us to track the flavor of the
1445 1445 // previous memory refs). Requirements (2) and (3) require some barriers
1446 1446 // before volatile stores and after volatile loads. These nearly cover
1447 1447 // requirement (1) but miss the volatile-store-volatile-load case. This final
1448 1448 // case is placed after volatile-stores although it could just as well go
1449 1449 // before volatile-loads.
1450 1450
1451 1451
1452 1452 void LIRGenerator::do_StoreField(StoreField* x) {
1453 1453 bool needs_patching = x->needs_patching();
1454 1454 bool is_volatile = x->field()->is_volatile();
1455 1455 BasicType field_type = x->field_type();
1456 1456 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1457 1457
1458 1458 CodeEmitInfo* info = NULL;
1459 1459 if (needs_patching) {
1460 1460 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1461 1461 info = state_for(x, x->state_before());
1462 1462 } else if (x->needs_null_check()) {
1463 1463 NullCheck* nc = x->explicit_null_check();
1464 1464 if (nc == NULL) {
1465 1465 info = state_for(x, x->lock_stack());
1466 1466 } else {
1467 1467 info = state_for(nc);
1468 1468 }
1469 1469 }
1470 1470
1471 1471
1472 1472 LIRItem object(x->obj(), this);
1473 1473 LIRItem value(x->value(), this);
1474 1474
1475 1475 object.load_item();
1476 1476
1477 1477 if (is_volatile || needs_patching) {
1478 1478 // load item if field is volatile (fewer special cases for volatiles)
1479 1479 // load item if field not initialized
1480 1480 // load item if field not constant
1481 1481 // because of code patching we cannot inline constants
1482 1482 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1483 1483 value.load_byte_item();
1484 1484 } else {
1485 1485 value.load_item();
1486 1486 }
1487 1487 } else {
1488 1488 value.load_for_store(field_type);
1489 1489 }
1490 1490
1491 1491 set_no_result(x);
1492 1492
1493 1493 if (PrintNotLoaded && needs_patching) {
1494 1494 tty->print_cr(" ###class not loaded at store_%s bci %d",
1495 1495 x->is_static() ? "static" : "field", x->bci());
1496 1496 }
1497 1497
1498 1498 if (x->needs_null_check() &&
1499 1499 (needs_patching ||
1500 1500 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1501 1501 // emit an explicit null check because the offset is too large
1502 1502 __ null_check(object.result(), new CodeEmitInfo(info));
1503 1503 }
1504 1504
1505 1505 LIR_Address* address;
1506 1506 if (needs_patching) {
1507 1507 // we need to patch the offset in the instruction so don't allow
1508 1508 // generate_address to try to be smart about emitting the -1.
1509 1509 // Otherwise the patching code won't know how to find the
1510 1510 // instruction to patch.
1511 1511 address = new LIR_Address(object.result(), max_jint, field_type);
1512 1512 } else {
1513 1513 address = generate_address(object.result(), x->offset(), field_type);
1514 1514 }
1515 1515
1516 1516 if (is_volatile && os::is_MP()) {
1517 1517 __ membar_release();
1518 1518 }
1519 1519
1520 1520 if (is_oop) {
1521 1521 // Do the pre-write barrier, if any.
1522 1522 pre_barrier(LIR_OprFact::address(address),
1523 1523 needs_patching,
1524 1524 (info ? new CodeEmitInfo(info) : NULL));
1525 1525 }
1526 1526
1527 1527 if (is_volatile) {
1528 1528 assert(!needs_patching && x->is_loaded(),
1529 1529 "how do we know it's volatile if it's not loaded");
1530 1530 volatile_field_store(value.result(), address, info);
1531 1531 } else {
1532 1532 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1533 1533 __ store(value.result(), address, info, patch_code);
1534 1534 }
1535 1535
1536 1536 if (is_oop) {
1537 1537 // Store to object so mark the card of the header
1538 1538 post_barrier(object.result(), value.result());
1539 1539 }
1540 1540
1541 1541 if (is_volatile && os::is_MP()) {
1542 1542 __ membar();
1543 1543 }
1544 1544 }
1545 1545
1546 1546
1547 1547 void LIRGenerator::do_LoadField(LoadField* x) {
1548 1548 bool needs_patching = x->needs_patching();
1549 1549 bool is_volatile = x->field()->is_volatile();
1550 1550 BasicType field_type = x->field_type();
1551 1551
1552 1552 CodeEmitInfo* info = NULL;
1553 1553 if (needs_patching) {
1554 1554 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1555 1555 info = state_for(x, x->state_before());
1556 1556 } else if (x->needs_null_check()) {
1557 1557 NullCheck* nc = x->explicit_null_check();
1558 1558 if (nc == NULL) {
1559 1559 info = state_for(x, x->lock_stack());
1560 1560 } else {
1561 1561 info = state_for(nc);
1562 1562 }
1563 1563 }
1564 1564
1565 1565 LIRItem object(x->obj(), this);
1566 1566
1567 1567 object.load_item();
1568 1568
1569 1569 if (PrintNotLoaded && needs_patching) {
1570 1570 tty->print_cr(" ###class not loaded at load_%s bci %d",
1571 1571 x->is_static() ? "static" : "field", x->bci());
1572 1572 }
1573 1573
1574 1574 if (x->needs_null_check() &&
1575 1575 (needs_patching ||
1576 1576 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1577 1577 // emit an explicit null check because the offset is too large
1578 1578 __ null_check(object.result(), new CodeEmitInfo(info));
1579 1579 }
1580 1580
1581 1581 LIR_Opr reg = rlock_result(x, field_type);
1582 1582 LIR_Address* address;
1583 1583 if (needs_patching) {
1584 1584 // we need to patch the offset in the instruction so don't allow
1585 1585 // generate_address to try to be smart about emitting the -1.
1586 1586 // Otherwise the patching code won't know how to find the
1587 1587 // instruction to patch.
1588 1588 address = new LIR_Address(object.result(), max_jint, field_type);
1589 1589 } else {
1590 1590 address = generate_address(object.result(), x->offset(), field_type);
1591 1591 }
1592 1592
1593 1593 if (is_volatile) {
1594 1594 assert(!needs_patching && x->is_loaded(),
1595 1595 "how do we know it's volatile if it's not loaded");
1596 1596 volatile_field_load(address, reg, info);
1597 1597 } else {
1598 1598 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1599 1599 __ load(address, reg, info, patch_code);
1600 1600 }
1601 1601
1602 1602 if (is_volatile && os::is_MP()) {
1603 1603 __ membar_acquire();
1604 1604 }
1605 1605 }
1606 1606
1607 1607
1608 1608 //------------------------java.nio.Buffer.checkIndex------------------------
1609 1609
1610 1610 // int java.nio.Buffer.checkIndex(int)
1611 1611 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1612 1612 // NOTE: by the time we are in checkIndex() we are guaranteed that
1613 1613 // the buffer is non-null (because checkIndex is package-private and
1614 1614 // only called from within other methods in the buffer).
1615 1615 assert(x->number_of_arguments() == 2, "wrong type");
1616 1616 LIRItem buf (x->argument_at(0), this);
1617 1617 LIRItem index(x->argument_at(1), this);
1618 1618 buf.load_item();
1619 1619 index.load_item();
1620 1620
1621 1621 LIR_Opr result = rlock_result(x);
1622 1622 if (GenerateRangeChecks) {
1623 1623 CodeEmitInfo* info = state_for(x);
1624 1624 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1625 1625 if (index.result()->is_constant()) {
1626 1626 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1627 1627 __ branch(lir_cond_belowEqual, T_INT, stub);
1628 1628 } else {
1629 1629 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1630 1630 java_nio_Buffer::limit_offset(), T_INT, info);
1631 1631 __ branch(lir_cond_aboveEqual, T_INT, stub);
1632 1632 }
1633 1633 __ move(index.result(), result);
1634 1634 } else {
1635 1635 // Just load the index into the result register
1636 1636 __ move(index.result(), result);
1637 1637 }
1638 1638 }
1639 1639
1640 1640
1641 1641 //------------------------array access--------------------------------------
1642 1642
1643 1643
1644 1644 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1645 1645 LIRItem array(x->array(), this);
1646 1646 array.load_item();
1647 1647 LIR_Opr reg = rlock_result(x);
1648 1648
1649 1649 CodeEmitInfo* info = NULL;
1650 1650 if (x->needs_null_check()) {
1651 1651 NullCheck* nc = x->explicit_null_check();
1652 1652 if (nc == NULL) {
1653 1653 info = state_for(x);
1654 1654 } else {
1655 1655 info = state_for(nc);
1656 1656 }
1657 1657 }
1658 1658 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1659 1659 }
1660 1660
1661 1661
1662 1662 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1663 1663 bool use_length = x->length() != NULL;
1664 1664 LIRItem array(x->array(), this);
1665 1665 LIRItem index(x->index(), this);
1666 1666 LIRItem length(this);
1667 1667 bool needs_range_check = true;
1668 1668
1669 1669 if (use_length) {
1670 1670 needs_range_check = x->compute_needs_range_check();
1671 1671 if (needs_range_check) {
1672 1672 length.set_instruction(x->length());
1673 1673 length.load_item();
1674 1674 }
1675 1675 }
1676 1676
1677 1677 array.load_item();
1678 1678 if (index.is_constant() && can_inline_as_constant(x->index())) {
1679 1679 // let it be a constant
1680 1680 index.dont_load_item();
1681 1681 } else {
1682 1682 index.load_item();
1683 1683 }
1684 1684
1685 1685 CodeEmitInfo* range_check_info = state_for(x);
1686 1686 CodeEmitInfo* null_check_info = NULL;
1687 1687 if (x->needs_null_check()) {
1688 1688 NullCheck* nc = x->explicit_null_check();
1689 1689 if (nc != NULL) {
1690 1690 null_check_info = state_for(nc);
1691 1691 } else {
1692 1692 null_check_info = range_check_info;
1693 1693 }
1694 1694 }
1695 1695
1696 1696 // emit array address setup early so it schedules better
1697 1697 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1698 1698
1699 1699 if (GenerateRangeChecks && needs_range_check) {
1700 1700 if (use_length) {
1701 1701 // TODO: use a (modified) version of array_range_check that does not require a
1702 1702 // constant length to be loaded to a register
1703 1703 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1704 1704 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1705 1705 } else {
1706 1706 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1707 1707 // The range check performs the null check, so clear it out for the load
1708 1708 null_check_info = NULL;
1709 1709 }
1710 1710 }
1711 1711
1712 1712 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1713 1713 }
1714 1714
1715 1715
1716 1716 void LIRGenerator::do_NullCheck(NullCheck* x) {
1717 1717 if (x->can_trap()) {
1718 1718 LIRItem value(x->obj(), this);
1719 1719 value.load_item();
1720 1720 CodeEmitInfo* info = state_for(x);
1721 1721 __ null_check(value.result(), info);
1722 1722 }
1723 1723 }
1724 1724
1725 1725
1726 1726 void LIRGenerator::do_Throw(Throw* x) {
1727 1727 LIRItem exception(x->exception(), this);
1728 1728 exception.load_item();
1729 1729 set_no_result(x);
1730 1730 LIR_Opr exception_opr = exception.result();
1731 1731 CodeEmitInfo* info = state_for(x, x->state());
1732 1732
1733 1733 #ifndef PRODUCT
1734 1734 if (PrintC1Statistics) {
1735 1735 increment_counter(Runtime1::throw_count_address());
1736 1736 }
1737 1737 #endif
1738 1738
1739 1739 // check if the instruction has an xhandler in any of the nested scopes
1740 1740 bool unwind = false;
1741 1741 if (info->exception_handlers()->length() == 0) {
1742 1742 // this throw is not inside an xhandler
1743 1743 unwind = true;
1744 1744 } else {
1745 1745 // get some idea of the throw type
1746 1746 bool type_is_exact = true;
1747 1747 ciType* throw_type = x->exception()->exact_type();
1748 1748 if (throw_type == NULL) {
1749 1749 type_is_exact = false;
1750 1750 throw_type = x->exception()->declared_type();
1751 1751 }
1752 1752 if (throw_type != NULL && throw_type->is_instance_klass()) {
1753 1753 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1754 1754 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1755 1755 }
1756 1756 }
1757 1757
1758 1758 // do null check before moving exception oop into fixed register
1759 1759 // to avoid a fixed interval with an oop during the null check.
1760 1760 // Use a copy of the CodeEmitInfo because debug information is
1761 1761 // different for null_check and throw.
1762 1762 if (GenerateCompilerNullChecks &&
1763 1763 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1764 1764 // if the exception object wasn't created using new then it might be null.
1765 1765 __ null_check(exception_opr, new CodeEmitInfo(info, true));
1766 1766 }
1767 1767
1768 1768 if (compilation()->env()->jvmti_can_post_on_exceptions() &&
1769 1769 !block()->is_set(BlockBegin::default_exception_handler_flag)) {
1770 1770 // we need to go through the exception lookup path to get JVMTI
1771 1771 // notification done
1772 1772 unwind = false;
1773 1773 }
1774 1774
1775 1775 assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
1776 1776 "should be no more handlers to dispatch to");
1777 1777
1778 1778 if (compilation()->env()->dtrace_method_probes() &&
1779 1779 block()->is_set(BlockBegin::default_exception_handler_flag)) {
1780 1780 // notify that this frame is unwinding
1781 1781 BasicTypeList signature;
1782 1782 signature.append(T_INT); // thread
1783 1783 signature.append(T_OBJECT); // methodOop
1784 1784 LIR_OprList* args = new LIR_OprList();
1785 1785 args->append(getThreadPointer());
1786 1786 LIR_Opr meth = new_register(T_OBJECT);
1787 1787 __ oop2reg(method()->constant_encoding(), meth);
1788 1788 args->append(meth);
1789 1789 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1790 1790 }
1791 1791
1792 1792 // move exception oop into fixed register
1793 1793 __ move(exception_opr, exceptionOopOpr());
1794 1794
1795 1795 if (unwind) {
1796 1796 __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
1797 1797 } else {
1798 1798 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1799 1799 }
1800 1800 }
1801 1801
1802 1802
1803 1803 void LIRGenerator::do_RoundFP(RoundFP* x) {
1804 1804 LIRItem input(x->input(), this);
1805 1805 input.load_item();
1806 1806 LIR_Opr input_opr = input.result();
1807 1807 assert(input_opr->is_register(), "why round if value is not in a register?");
1808 1808 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1809 1809 if (input_opr->is_single_fpu()) {
1810 1810 set_result(x, round_item(input_opr)); // This code path not currently taken
1811 1811 } else {
1812 1812 LIR_Opr result = new_register(T_DOUBLE);
1813 1813 set_vreg_flag(result, must_start_in_memory);
1814 1814 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1815 1815 set_result(x, result);
1816 1816 }
1817 1817 }
1818 1818
1819 1819 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1820 1820 LIRItem base(x->base(), this);
1821 1821 LIRItem idx(this);
1822 1822
1823 1823 base.load_item();
1824 1824 if (x->has_index()) {
1825 1825 idx.set_instruction(x->index());
1826 1826 idx.load_nonconstant();
1827 1827 }
1828 1828
1829 1829 LIR_Opr reg = rlock_result(x, x->basic_type());
1830 1830
1831 1831 int log2_scale = 0;
1832 1832 if (x->has_index()) {
1833 1833 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1834 1834 log2_scale = x->log2_scale();
1835 1835 }
1836 1836
1837 1837 assert(!x->has_index() || idx.value() == x->index(), "should match");
1838 1838
1839 1839 LIR_Opr base_op = base.result();
1840 1840 #ifndef _LP64
1841 1841 if (x->base()->type()->tag() == longTag) {
1842 1842 base_op = new_register(T_INT);
1843 1843 __ convert(Bytecodes::_l2i, base.result(), base_op);
1844 1844 } else {
1845 1845 assert(x->base()->type()->tag() == intTag, "must be");
1846 1846 }
1847 1847 #endif
1848 1848
1849 1849 BasicType dst_type = x->basic_type();
1850 1850 LIR_Opr index_op = idx.result();
1851 1851
1852 1852 LIR_Address* addr;
1853 1853 if (index_op->is_constant()) {
1854 1854 assert(log2_scale == 0, "must not have a scale");
1855 1855 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
1856 1856 } else {
1857 1857 #ifdef X86
1858 1858 #ifdef _LP64
1859 1859 if (!index_op->is_illegal() && index_op->type() == T_INT) {
1860 1860 LIR_Opr tmp = new_pointer_register();
1861 1861 __ convert(Bytecodes::_i2l, index_op, tmp);
1862 1862 index_op = tmp;
1863 1863 }
1864 1864 #endif
1865 1865 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
1866 1866 #else
1867 1867 if (index_op->is_illegal() || log2_scale == 0) {
1868 1868 #ifdef _LP64
1869 1869 if (!index_op->is_illegal() && index_op->type() == T_INT) {
1870 1870 LIR_Opr tmp = new_pointer_register();
1871 1871 __ convert(Bytecodes::_i2l, index_op, tmp);
1872 1872 index_op = tmp;
1873 1873 }
1874 1874 #endif
1875 1875 addr = new LIR_Address(base_op, index_op, dst_type);
1876 1876 } else {
1877 1877 LIR_Opr tmp = new_pointer_register();
1878 1878 __ shift_left(index_op, log2_scale, tmp);
1879 1879 addr = new LIR_Address(base_op, tmp, dst_type);
1880 1880 }
1881 1881 #endif
1882 1882 }
1883 1883
1884 1884 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
1885 1885 __ unaligned_move(addr, reg);
1886 1886 } else {
1887 1887 __ move(addr, reg);
1888 1888 }
1889 1889 }
1890 1890
1891 1891
1892 1892 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
1893 1893 int log2_scale = 0;
1894 1894 BasicType type = x->basic_type();
1895 1895
1896 1896 if (x->has_index()) {
1897 1897 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1898 1898 log2_scale = x->log2_scale();
1899 1899 }
1900 1900
1901 1901 LIRItem base(x->base(), this);
1902 1902 LIRItem value(x->value(), this);
1903 1903 LIRItem idx(this);
1904 1904
1905 1905 base.load_item();
1906 1906 if (x->has_index()) {
1907 1907 idx.set_instruction(x->index());
1908 1908 idx.load_item();
1909 1909 }
1910 1910
1911 1911 if (type == T_BYTE || type == T_BOOLEAN) {
1912 1912 value.load_byte_item();
1913 1913 } else {
1914 1914 value.load_item();
1915 1915 }
1916 1916
1917 1917 set_no_result(x);
1918 1918
1919 1919 LIR_Opr base_op = base.result();
1920 1920 #ifndef _LP64
1921 1921 if (x->base()->type()->tag() == longTag) {
1922 1922 base_op = new_register(T_INT);
1923 1923 __ convert(Bytecodes::_l2i, base.result(), base_op);
1924 1924 } else {
1925 1925 assert(x->base()->type()->tag() == intTag, "must be");
1926 1926 }
1927 1927 #endif
1928 1928
1929 1929 LIR_Opr index_op = idx.result();
1930 1930 if (log2_scale != 0) {
1931 1931 // temporary fix (platform dependent code without shift on Intel would be better)
1932 1932 index_op = new_pointer_register();
1933 1933 #ifdef _LP64
1934 1934 if(idx.result()->type() == T_INT) {
1935 1935 __ convert(Bytecodes::_i2l, idx.result(), index_op);
1936 1936 } else {
1937 1937 #endif
1938 1938 __ move(idx.result(), index_op);
1939 1939 #ifdef _LP64
1940 1940 }
1941 1941 #endif
1942 1942 __ shift_left(index_op, log2_scale, index_op);
1943 1943 }
1944 1944 #ifdef _LP64
1945 1945 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
1946 1946 LIR_Opr tmp = new_pointer_register();
1947 1947 __ convert(Bytecodes::_i2l, index_op, tmp);
1948 1948 index_op = tmp;
1949 1949 }
1950 1950 #endif
1951 1951
1952 1952 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
1953 1953 __ move(value.result(), addr);
1954 1954 }
1955 1955
1956 1956
1957 1957 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
1958 1958 BasicType type = x->basic_type();
1959 1959 LIRItem src(x->object(), this);
1960 1960 LIRItem off(x->offset(), this);
1961 1961
1962 1962 off.load_item();
1963 1963 src.load_item();
1964 1964
1965 1965 LIR_Opr reg = reg = rlock_result(x, x->basic_type());
1966 1966
1967 1967 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
1968 1968 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
1969 1969 if (x->is_volatile() && os::is_MP()) __ membar();
1970 1970 }
1971 1971
1972 1972
1973 1973 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
1974 1974 BasicType type = x->basic_type();
1975 1975 LIRItem src(x->object(), this);
1976 1976 LIRItem off(x->offset(), this);
1977 1977 LIRItem data(x->value(), this);
1978 1978
1979 1979 src.load_item();
1980 1980 if (type == T_BOOLEAN || type == T_BYTE) {
1981 1981 data.load_byte_item();
1982 1982 } else {
1983 1983 data.load_item();
1984 1984 }
1985 1985 off.load_item();
1986 1986
1987 1987 set_no_result(x);
1988 1988
1989 1989 if (x->is_volatile() && os::is_MP()) __ membar_release();
1990 1990 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
1991 1991 }
1992 1992
1993 1993
1994 1994 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
1995 1995 LIRItem src(x->object(), this);
1996 1996 LIRItem off(x->offset(), this);
1997 1997
1998 1998 src.load_item();
1999 1999 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2000 2000 // let it be a constant
2001 2001 off.dont_load_item();
2002 2002 } else {
2003 2003 off.load_item();
2004 2004 }
2005 2005
2006 2006 set_no_result(x);
2007 2007
2008 2008 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2009 2009 __ prefetch(addr, is_store);
2010 2010 }
2011 2011
2012 2012
2013 2013 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2014 2014 do_UnsafePrefetch(x, false);
2015 2015 }
2016 2016
2017 2017
2018 2018 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2019 2019 do_UnsafePrefetch(x, true);
2020 2020 }
2021 2021
2022 2022
2023 2023 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2024 2024 int lng = x->length();
2025 2025
2026 2026 for (int i = 0; i < lng; i++) {
2027 2027 SwitchRange* one_range = x->at(i);
2028 2028 int low_key = one_range->low_key();
2029 2029 int high_key = one_range->high_key();
2030 2030 BlockBegin* dest = one_range->sux();
2031 2031 if (low_key == high_key) {
2032 2032 __ cmp(lir_cond_equal, value, low_key);
2033 2033 __ branch(lir_cond_equal, T_INT, dest);
2034 2034 } else if (high_key - low_key == 1) {
2035 2035 __ cmp(lir_cond_equal, value, low_key);
2036 2036 __ branch(lir_cond_equal, T_INT, dest);
2037 2037 __ cmp(lir_cond_equal, value, high_key);
2038 2038 __ branch(lir_cond_equal, T_INT, dest);
2039 2039 } else {
2040 2040 LabelObj* L = new LabelObj();
2041 2041 __ cmp(lir_cond_less, value, low_key);
2042 2042 __ branch(lir_cond_less, L->label());
2043 2043 __ cmp(lir_cond_lessEqual, value, high_key);
2044 2044 __ branch(lir_cond_lessEqual, T_INT, dest);
2045 2045 __ branch_destination(L->label());
2046 2046 }
2047 2047 }
2048 2048 __ jump(default_sux);
2049 2049 }
2050 2050
2051 2051
2052 2052 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2053 2053 SwitchRangeList* res = new SwitchRangeList();
2054 2054 int len = x->length();
2055 2055 if (len > 0) {
2056 2056 BlockBegin* sux = x->sux_at(0);
2057 2057 int key = x->lo_key();
2058 2058 BlockBegin* default_sux = x->default_sux();
2059 2059 SwitchRange* range = new SwitchRange(key, sux);
2060 2060 for (int i = 0; i < len; i++, key++) {
2061 2061 BlockBegin* new_sux = x->sux_at(i);
2062 2062 if (sux == new_sux) {
2063 2063 // still in same range
2064 2064 range->set_high_key(key);
2065 2065 } else {
2066 2066 // skip tests which explicitly dispatch to the default
2067 2067 if (sux != default_sux) {
2068 2068 res->append(range);
2069 2069 }
2070 2070 range = new SwitchRange(key, new_sux);
2071 2071 }
2072 2072 sux = new_sux;
2073 2073 }
2074 2074 if (res->length() == 0 || res->last() != range) res->append(range);
2075 2075 }
2076 2076 return res;
2077 2077 }
2078 2078
2079 2079
2080 2080 // we expect the keys to be sorted by increasing value
2081 2081 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2082 2082 SwitchRangeList* res = new SwitchRangeList();
2083 2083 int len = x->length();
2084 2084 if (len > 0) {
2085 2085 BlockBegin* default_sux = x->default_sux();
2086 2086 int key = x->key_at(0);
2087 2087 BlockBegin* sux = x->sux_at(0);
2088 2088 SwitchRange* range = new SwitchRange(key, sux);
2089 2089 for (int i = 1; i < len; i++) {
2090 2090 int new_key = x->key_at(i);
2091 2091 BlockBegin* new_sux = x->sux_at(i);
2092 2092 if (key+1 == new_key && sux == new_sux) {
2093 2093 // still in same range
2094 2094 range->set_high_key(new_key);
2095 2095 } else {
2096 2096 // skip tests which explicitly dispatch to the default
2097 2097 if (range->sux() != default_sux) {
2098 2098 res->append(range);
2099 2099 }
2100 2100 range = new SwitchRange(new_key, new_sux);
2101 2101 }
2102 2102 key = new_key;
2103 2103 sux = new_sux;
2104 2104 }
2105 2105 if (res->length() == 0 || res->last() != range) res->append(range);
2106 2106 }
2107 2107 return res;
2108 2108 }
2109 2109
2110 2110
2111 2111 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2112 2112 LIRItem tag(x->tag(), this);
2113 2113 tag.load_item();
2114 2114 set_no_result(x);
2115 2115
2116 2116 if (x->is_safepoint()) {
2117 2117 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2118 2118 }
2119 2119
2120 2120 // move values into phi locations
2121 2121 move_to_phi(x->state());
2122 2122
2123 2123 int lo_key = x->lo_key();
2124 2124 int hi_key = x->hi_key();
2125 2125 int len = x->length();
2126 2126 CodeEmitInfo* info = state_for(x, x->state());
2127 2127 LIR_Opr value = tag.result();
2128 2128 if (UseTableRanges) {
2129 2129 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2130 2130 } else {
2131 2131 for (int i = 0; i < len; i++) {
2132 2132 __ cmp(lir_cond_equal, value, i + lo_key);
2133 2133 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2134 2134 }
2135 2135 __ jump(x->default_sux());
2136 2136 }
2137 2137 }
2138 2138
2139 2139
2140 2140 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2141 2141 LIRItem tag(x->tag(), this);
2142 2142 tag.load_item();
2143 2143 set_no_result(x);
2144 2144
2145 2145 if (x->is_safepoint()) {
2146 2146 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2147 2147 }
2148 2148
2149 2149 // move values into phi locations
2150 2150 move_to_phi(x->state());
2151 2151
2152 2152 LIR_Opr value = tag.result();
2153 2153 if (UseTableRanges) {
2154 2154 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2155 2155 } else {
2156 2156 int len = x->length();
2157 2157 for (int i = 0; i < len; i++) {
2158 2158 __ cmp(lir_cond_equal, value, x->key_at(i));
2159 2159 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2160 2160 }
2161 2161 __ jump(x->default_sux());
2162 2162 }
2163 2163 }
2164 2164
2165 2165
2166 2166 void LIRGenerator::do_Goto(Goto* x) {
2167 2167 set_no_result(x);
2168 2168
2169 2169 if (block()->next()->as_OsrEntry()) {
2170 2170 // need to free up storage used for OSR entry point
2171 2171 LIR_Opr osrBuffer = block()->next()->operand();
2172 2172 BasicTypeList signature;
2173 2173 signature.append(T_INT);
2174 2174 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2175 2175 __ move(osrBuffer, cc->args()->at(0));
2176 2176 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2177 2177 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2178 2178 }
2179 2179
2180 2180 if (x->is_safepoint()) {
2181 2181 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2182 2182
2183 2183 // increment backedge counter if needed
2184 2184 increment_backedge_counter(state_for(x, state));
2185 2185
2186 2186 CodeEmitInfo* safepoint_info = state_for(x, state);
2187 2187 __ safepoint(safepoint_poll_register(), safepoint_info);
2188 2188 }
2189 2189
2190 2190 // emit phi-instruction move after safepoint since this simplifies
2191 2191 // describing the state as the safepoint.
2192 2192 move_to_phi(x->state());
2193 2193
2194 2194 __ jump(x->default_sux());
2195 2195 }
2196 2196
2197 2197
2198 2198 void LIRGenerator::do_Base(Base* x) {
2199 2199 __ std_entry(LIR_OprFact::illegalOpr);
2200 2200 // Emit moves from physical registers / stack slots to virtual registers
2201 2201 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2202 2202 IRScope* irScope = compilation()->hir()->top_scope();
2203 2203 int java_index = 0;
2204 2204 for (int i = 0; i < args->length(); i++) {
2205 2205 LIR_Opr src = args->at(i);
2206 2206 assert(!src->is_illegal(), "check");
2207 2207 BasicType t = src->type();
2208 2208
2209 2209 // Types which are smaller than int are passed as int, so
2210 2210 // correct the type which passed.
2211 2211 switch (t) {
2212 2212 case T_BYTE:
2213 2213 case T_BOOLEAN:
2214 2214 case T_SHORT:
2215 2215 case T_CHAR:
2216 2216 t = T_INT;
2217 2217 break;
2218 2218 }
2219 2219
2220 2220 LIR_Opr dest = new_register(t);
2221 2221 __ move(src, dest);
2222 2222
2223 2223 // Assign new location to Local instruction for this local
2224 2224 Local* local = x->state()->local_at(java_index)->as_Local();
2225 2225 assert(local != NULL, "Locals for incoming arguments must have been created");
2226 2226 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2227 2227 local->set_operand(dest);
2228 2228 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2229 2229 java_index += type2size[t];
2230 2230 }
2231 2231
2232 2232 if (compilation()->env()->dtrace_method_probes()) {
2233 2233 BasicTypeList signature;
2234 2234 signature.append(T_INT); // thread
2235 2235 signature.append(T_OBJECT); // methodOop
2236 2236 LIR_OprList* args = new LIR_OprList();
2237 2237 args->append(getThreadPointer());
2238 2238 LIR_Opr meth = new_register(T_OBJECT);
2239 2239 __ oop2reg(method()->constant_encoding(), meth);
2240 2240 args->append(meth);
2241 2241 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2242 2242 }
2243 2243
2244 2244 if (method()->is_synchronized()) {
2245 2245 LIR_Opr obj;
2246 2246 if (method()->is_static()) {
2247 2247 obj = new_register(T_OBJECT);
2248 2248 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2249 2249 } else {
2250 2250 Local* receiver = x->state()->local_at(0)->as_Local();
2251 2251 assert(receiver != NULL, "must already exist");
2252 2252 obj = receiver->operand();
2253 2253 }
2254 2254 assert(obj->is_valid(), "must be valid");
2255 2255
2256 2256 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2257 2257 LIR_Opr lock = new_register(T_INT);
2258 2258 __ load_stack_address_monitor(0, lock);
2259 2259
2260 2260 CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
2261 2261 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2262 2262
2263 2263 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2264 2264 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2265 2265 }
2266 2266 }
2267 2267
2268 2268 // increment invocation counters if needed
2269 2269 increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
2270 2270
2271 2271 // all blocks with a successor must end with an unconditional jump
2272 2272 // to the successor even if they are consecutive
2273 2273 __ jump(x->default_sux());
2274 2274 }
2275 2275
2276 2276
↓ open down ↓ |
2276 lines elided |
↑ open up ↑ |
2277 2277 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2278 2278 // construct our frame and model the production of incoming pointer
2279 2279 // to the OSR buffer.
2280 2280 __ osr_entry(LIR_Assembler::osrBufferPointer());
2281 2281 LIR_Opr result = rlock_result(x);
2282 2282 __ move(LIR_Assembler::osrBufferPointer(), result);
2283 2283 }
2284 2284
2285 2285
2286 2286 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2287 - int i = x->has_receiver() ? 1 : 0;
2287 + int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2288 2288 for (; i < args->length(); i++) {
2289 2289 LIRItem* param = args->at(i);
2290 2290 LIR_Opr loc = arg_list->at(i);
2291 2291 if (loc->is_register()) {
2292 2292 param->load_item_force(loc);
2293 2293 } else {
2294 2294 LIR_Address* addr = loc->as_address_ptr();
2295 2295 param->load_for_store(addr->type());
2296 2296 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2297 2297 __ unaligned_move(param->result(), addr);
2298 2298 } else {
2299 2299 __ move(param->result(), addr);
2300 2300 }
2301 2301 }
2302 2302 }
2303 2303
2304 2304 if (x->has_receiver()) {
2305 2305 LIRItem* receiver = args->at(0);
2306 2306 LIR_Opr loc = arg_list->at(0);
2307 2307 if (loc->is_register()) {
2308 2308 receiver->load_item_force(loc);
2309 2309 } else {
2310 2310 assert(loc->is_address(), "just checking");
2311 2311 receiver->load_for_store(T_OBJECT);
2312 2312 __ move(receiver->result(), loc);
2313 2313 }
2314 2314 }
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2315 2315 }
2316 2316
2317 2317
2318 2318 // Visits all arguments, returns appropriate items without loading them
2319 2319 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2320 2320 LIRItemList* argument_items = new LIRItemList();
2321 2321 if (x->has_receiver()) {
2322 2322 LIRItem* receiver = new LIRItem(x->receiver(), this);
2323 2323 argument_items->append(receiver);
2324 2324 }
2325 + if (x->is_invokedynamic()) {
2326 + // Insert a dummy for the synthetic MethodHandle argument.
2327 + argument_items->append(NULL);
2328 + }
2325 2329 int idx = x->has_receiver() ? 1 : 0;
2326 2330 for (int i = 0; i < x->number_of_arguments(); i++) {
2327 2331 LIRItem* param = new LIRItem(x->argument_at(i), this);
2328 2332 argument_items->append(param);
2329 2333 idx += (param->type()->is_double_word() ? 2 : 1);
2330 2334 }
2331 2335 return argument_items;
2332 2336 }
2333 2337
2334 2338
2335 2339 // The invoke with receiver has following phases:
2336 2340 // a) traverse and load/lock receiver;
2337 2341 // b) traverse all arguments -> item-array (invoke_visit_argument)
2338 2342 // c) push receiver on stack
2339 2343 // d) load each of the items and push on stack
2340 2344 // e) unlock receiver
2341 2345 // f) move receiver into receiver-register %o0
2342 2346 // g) lock result registers and emit call operation
2343 2347 //
2344 2348 // Before issuing a call, we must spill-save all values on stack
2345 2349 // that are in caller-save register. "spill-save" moves thos registers
2346 2350 // either in a free callee-save register or spills them if no free
2347 2351 // callee save register is available.
2348 2352 //
2349 2353 // The problem is where to invoke spill-save.
2350 2354 // - if invoked between e) and f), we may lock callee save
2351 2355 // register in "spill-save" that destroys the receiver register
2352 2356 // before f) is executed
2353 2357 // - if we rearange the f) to be earlier, by loading %o0, it
2354 2358 // may destroy a value on the stack that is currently in %o0
2355 2359 // and is waiting to be spilled
2356 2360 // - if we keep the receiver locked while doing spill-save,
2357 2361 // we cannot spill it as it is spill-locked
2358 2362 //
2359 2363 void LIRGenerator::do_Invoke(Invoke* x) {
2360 2364 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2361 2365
2362 2366 LIR_OprList* arg_list = cc->args();
2363 2367 LIRItemList* args = invoke_visit_arguments(x);
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2364 2368 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2365 2369
2366 2370 // setup result register
2367 2371 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2368 2372 if (x->type() != voidType) {
2369 2373 result_register = result_register_for(x->type());
2370 2374 }
2371 2375
2372 2376 CodeEmitInfo* info = state_for(x, x->state());
2373 2377
2378 + // invokedynamics can deoptimize.
2379 + bool is_invokedynamic = x->code() == Bytecodes::_invokedynamic;
2380 + CodeEmitInfo* deopt_info = is_invokedynamic ? state_for(x, x->state_before()) : NULL;
2381 +
2374 2382 invoke_load_arguments(x, args, arg_list);
2375 2383
2376 2384 if (x->has_receiver()) {
2377 2385 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2378 2386 receiver = args->at(0)->result();
2379 2387 }
2380 2388
2381 2389 // emit invoke code
2382 2390 bool optimized = x->target_is_loaded() && x->target_is_final();
2383 2391 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2384 2392
2385 2393 switch (x->code()) {
2386 2394 case Bytecodes::_invokestatic:
2387 2395 __ call_static(x->target(), result_register,
2388 2396 SharedRuntime::get_resolve_static_call_stub(),
2389 2397 arg_list, info);
2390 2398 break;
2391 2399 case Bytecodes::_invokespecial:
2392 2400 case Bytecodes::_invokevirtual:
2393 2401 case Bytecodes::_invokeinterface:
2394 2402 // for final target we still produce an inline cache, in order
2395 2403 // to be able to call mixed mode
2396 2404 if (x->code() == Bytecodes::_invokespecial || optimized) {
2397 2405 __ call_opt_virtual(x->target(), receiver, result_register,
2398 2406 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2399 2407 arg_list, info);
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2400 2408 } else if (x->vtable_index() < 0) {
2401 2409 __ call_icvirtual(x->target(), receiver, result_register,
2402 2410 SharedRuntime::get_resolve_virtual_call_stub(),
2403 2411 arg_list, info);
2404 2412 } else {
2405 2413 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2406 2414 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2407 2415 __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2408 2416 }
2409 2417 break;
2418 + case Bytecodes::_invokedynamic: {
2419 + ciBytecodeStream bcs(x->scope()->method());
2420 + bcs.force_bci(x->bci());
2421 + assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2422 + ciCPCache* cpcache = bcs.get_cpcache();
2423 +
2424 + // Get CallSite offset from constant pool cache pointer.
2425 + int index = bcs.get_method_index();
2426 + size_t call_site_offset = cpcache->get_f1_offset(index);
2427 +
2428 + // If this invokedynamic call site hasn't been executed yet in
2429 + // the interpreter, the CallSite object in the constant pool
2430 + // cache is still null and we need to deoptimize.
2431 + if (cpcache->is_f1_null_at(index)) {
2432 + // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2433 + // clone all handlers. This is handled transparently in other
2434 + // places by the CodeEmitInfo cloning logic but is handled
2435 + // specially here because a stub isn't being used.
2436 + x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2437 +
2438 + DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2439 + __ jump(deopt_stub);
2440 + }
2441 +
2442 + // Use the receiver register for the synthetic MethodHandle
2443 + // argument.
2444 + receiver = LIR_Assembler::receiverOpr();
2445 + LIR_Opr tmp = new_register(objectType);
2446 +
2447 + // Load CallSite object from constant pool cache.
2448 + __ oop2reg(cpcache->constant_encoding(), tmp);
2449 + __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2450 +
2451 + // Load target MethodHandle from CallSite object.
2452 + __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2453 +
2454 + __ call_dynamic(x->target(), receiver, result_register,
2455 + SharedRuntime::get_resolve_opt_virtual_call_stub(),
2456 + arg_list, info);
2457 + break;
2458 + }
2410 2459 default:
2411 2460 ShouldNotReachHere();
2412 2461 break;
2413 2462 }
2414 2463
2415 2464 if (x->type()->is_float() || x->type()->is_double()) {
2416 2465 // Force rounding of results from non-strictfp when in strictfp
2417 2466 // scope (or when we don't know the strictness of the callee, to
2418 2467 // be safe.)
2419 2468 if (method()->is_strict()) {
2420 2469 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2421 2470 result_register = round_item(result_register);
2422 2471 }
2423 2472 }
2424 2473 }
2425 2474
2426 2475 if (result_register->is_valid()) {
2427 2476 LIR_Opr result = rlock_result(x);
2428 2477 __ move(result_register, result);
2429 2478 }
2430 2479 }
2431 2480
2432 2481
2433 2482 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2434 2483 assert(x->number_of_arguments() == 1, "wrong type");
2435 2484 LIRItem value (x->argument_at(0), this);
2436 2485 LIR_Opr reg = rlock_result(x);
2437 2486 value.load_item();
2438 2487 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2439 2488 __ move(tmp, reg);
2440 2489 }
2441 2490
2442 2491
2443 2492
2444 2493 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2445 2494 void LIRGenerator::do_IfOp(IfOp* x) {
2446 2495 #ifdef ASSERT
2447 2496 {
2448 2497 ValueTag xtag = x->x()->type()->tag();
2449 2498 ValueTag ttag = x->tval()->type()->tag();
2450 2499 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2451 2500 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2452 2501 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2453 2502 }
2454 2503 #endif
2455 2504
2456 2505 LIRItem left(x->x(), this);
2457 2506 LIRItem right(x->y(), this);
2458 2507 left.load_item();
2459 2508 if (can_inline_as_constant(right.value())) {
2460 2509 right.dont_load_item();
2461 2510 } else {
2462 2511 right.load_item();
2463 2512 }
2464 2513
2465 2514 LIRItem t_val(x->tval(), this);
2466 2515 LIRItem f_val(x->fval(), this);
2467 2516 t_val.dont_load_item();
2468 2517 f_val.dont_load_item();
2469 2518 LIR_Opr reg = rlock_result(x);
2470 2519
2471 2520 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2472 2521 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg);
2473 2522 }
2474 2523
2475 2524
2476 2525 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2477 2526 switch (x->id()) {
2478 2527 case vmIntrinsics::_intBitsToFloat :
2479 2528 case vmIntrinsics::_doubleToRawLongBits :
2480 2529 case vmIntrinsics::_longBitsToDouble :
2481 2530 case vmIntrinsics::_floatToRawIntBits : {
2482 2531 do_FPIntrinsics(x);
2483 2532 break;
2484 2533 }
2485 2534
2486 2535 case vmIntrinsics::_currentTimeMillis: {
2487 2536 assert(x->number_of_arguments() == 0, "wrong type");
2488 2537 LIR_Opr reg = result_register_for(x->type());
2489 2538 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
2490 2539 reg, new LIR_OprList());
2491 2540 LIR_Opr result = rlock_result(x);
2492 2541 __ move(reg, result);
2493 2542 break;
2494 2543 }
2495 2544
2496 2545 case vmIntrinsics::_nanoTime: {
2497 2546 assert(x->number_of_arguments() == 0, "wrong type");
2498 2547 LIR_Opr reg = result_register_for(x->type());
2499 2548 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
2500 2549 reg, new LIR_OprList());
2501 2550 LIR_Opr result = rlock_result(x);
2502 2551 __ move(reg, result);
2503 2552 break;
2504 2553 }
2505 2554
2506 2555 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2507 2556 case vmIntrinsics::_getClass: do_getClass(x); break;
2508 2557 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2509 2558
2510 2559 case vmIntrinsics::_dlog: // fall through
2511 2560 case vmIntrinsics::_dlog10: // fall through
2512 2561 case vmIntrinsics::_dabs: // fall through
2513 2562 case vmIntrinsics::_dsqrt: // fall through
2514 2563 case vmIntrinsics::_dtan: // fall through
2515 2564 case vmIntrinsics::_dsin : // fall through
2516 2565 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break;
2517 2566 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2518 2567
2519 2568 // java.nio.Buffer.checkIndex
2520 2569 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2521 2570
2522 2571 case vmIntrinsics::_compareAndSwapObject:
2523 2572 do_CompareAndSwap(x, objectType);
2524 2573 break;
2525 2574 case vmIntrinsics::_compareAndSwapInt:
2526 2575 do_CompareAndSwap(x, intType);
2527 2576 break;
2528 2577 case vmIntrinsics::_compareAndSwapLong:
2529 2578 do_CompareAndSwap(x, longType);
2530 2579 break;
2531 2580
2532 2581 // sun.misc.AtomicLongCSImpl.attemptUpdate
2533 2582 case vmIntrinsics::_attemptUpdate:
2534 2583 do_AttemptUpdate(x);
2535 2584 break;
2536 2585
2537 2586 default: ShouldNotReachHere(); break;
2538 2587 }
2539 2588 }
2540 2589
2541 2590
2542 2591 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2543 2592 // Need recv in a temporary register so it interferes with the other temporaries
2544 2593 LIR_Opr recv = LIR_OprFact::illegalOpr;
2545 2594 LIR_Opr mdo = new_register(T_OBJECT);
2546 2595 LIR_Opr tmp = new_register(T_INT);
2547 2596 if (x->recv() != NULL) {
2548 2597 LIRItem value(x->recv(), this);
2549 2598 value.load_item();
2550 2599 recv = new_register(T_OBJECT);
2551 2600 __ move(value.result(), recv);
2552 2601 }
2553 2602 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
2554 2603 }
2555 2604
2556 2605
2557 2606 void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
2558 2607 LIRItem mdo(x->mdo(), this);
2559 2608 mdo.load_item();
2560 2609
2561 2610 increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
2562 2611 }
2563 2612
2564 2613
2565 2614 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
2566 2615 LIRItemList args(1);
2567 2616 LIRItem value(arg1, this);
2568 2617 args.append(&value);
2569 2618 BasicTypeList signature;
2570 2619 signature.append(as_BasicType(arg1->type()));
2571 2620
2572 2621 return call_runtime(&signature, &args, entry, result_type, info);
2573 2622 }
2574 2623
2575 2624
2576 2625 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
2577 2626 LIRItemList args(2);
2578 2627 LIRItem value1(arg1, this);
2579 2628 LIRItem value2(arg2, this);
2580 2629 args.append(&value1);
2581 2630 args.append(&value2);
2582 2631 BasicTypeList signature;
2583 2632 signature.append(as_BasicType(arg1->type()));
2584 2633 signature.append(as_BasicType(arg2->type()));
2585 2634
2586 2635 return call_runtime(&signature, &args, entry, result_type, info);
2587 2636 }
2588 2637
2589 2638
2590 2639 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
2591 2640 address entry, ValueType* result_type, CodeEmitInfo* info) {
2592 2641 // get a result register
2593 2642 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2594 2643 LIR_Opr result = LIR_OprFact::illegalOpr;
2595 2644 if (result_type->tag() != voidTag) {
2596 2645 result = new_register(result_type);
2597 2646 phys_reg = result_register_for(result_type);
2598 2647 }
2599 2648
2600 2649 // move the arguments into the correct location
2601 2650 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2602 2651 assert(cc->length() == args->length(), "argument mismatch");
2603 2652 for (int i = 0; i < args->length(); i++) {
2604 2653 LIR_Opr arg = args->at(i);
2605 2654 LIR_Opr loc = cc->at(i);
2606 2655 if (loc->is_register()) {
2607 2656 __ move(arg, loc);
2608 2657 } else {
2609 2658 LIR_Address* addr = loc->as_address_ptr();
2610 2659 // if (!can_store_as_constant(arg)) {
2611 2660 // LIR_Opr tmp = new_register(arg->type());
2612 2661 // __ move(arg, tmp);
2613 2662 // arg = tmp;
2614 2663 // }
2615 2664 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2616 2665 __ unaligned_move(arg, addr);
2617 2666 } else {
2618 2667 __ move(arg, addr);
2619 2668 }
2620 2669 }
2621 2670 }
2622 2671
2623 2672 if (info) {
2624 2673 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2625 2674 } else {
2626 2675 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2627 2676 }
2628 2677 if (result->is_valid()) {
2629 2678 __ move(phys_reg, result);
2630 2679 }
2631 2680 return result;
2632 2681 }
2633 2682
2634 2683
2635 2684 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
2636 2685 address entry, ValueType* result_type, CodeEmitInfo* info) {
2637 2686 // get a result register
2638 2687 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2639 2688 LIR_Opr result = LIR_OprFact::illegalOpr;
2640 2689 if (result_type->tag() != voidTag) {
2641 2690 result = new_register(result_type);
2642 2691 phys_reg = result_register_for(result_type);
2643 2692 }
2644 2693
2645 2694 // move the arguments into the correct location
2646 2695 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2647 2696
2648 2697 assert(cc->length() == args->length(), "argument mismatch");
2649 2698 for (int i = 0; i < args->length(); i++) {
2650 2699 LIRItem* arg = args->at(i);
2651 2700 LIR_Opr loc = cc->at(i);
2652 2701 if (loc->is_register()) {
2653 2702 arg->load_item_force(loc);
2654 2703 } else {
2655 2704 LIR_Address* addr = loc->as_address_ptr();
2656 2705 arg->load_for_store(addr->type());
2657 2706 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2658 2707 __ unaligned_move(arg->result(), addr);
2659 2708 } else {
2660 2709 __ move(arg->result(), addr);
2661 2710 }
2662 2711 }
2663 2712 }
2664 2713
2665 2714 if (info) {
2666 2715 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2667 2716 } else {
2668 2717 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2669 2718 }
2670 2719 if (result->is_valid()) {
2671 2720 __ move(phys_reg, result);
2672 2721 }
2673 2722 return result;
2674 2723 }
2675 2724
2676 2725
2677 2726
2678 2727 void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
2679 2728 #ifdef TIERED
2680 2729 if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
2681 2730 (method()->code_size() >= Tier1BytecodeLimit || backedge)) {
2682 2731 int limit = InvocationCounter::Tier1InvocationLimit;
2683 2732 int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
2684 2733 InvocationCounter::counter_offset());
2685 2734 if (backedge) {
2686 2735 limit = InvocationCounter::Tier1BackEdgeLimit;
2687 2736 offset = in_bytes(methodOopDesc::backedge_counter_offset() +
2688 2737 InvocationCounter::counter_offset());
2689 2738 }
2690 2739
2691 2740 LIR_Opr meth = new_register(T_OBJECT);
2692 2741 __ oop2reg(method()->constant_encoding(), meth);
2693 2742 LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
2694 2743 __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
2695 2744 CodeStub* overflow = new CounterOverflowStub(info, info->bci());
2696 2745 __ branch(lir_cond_aboveEqual, T_INT, overflow);
2697 2746 __ branch_destination(overflow->continuation());
2698 2747 }
2699 2748 #endif
2700 2749 }
↓ open down ↓ |
281 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX