Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ new/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "c1/c1_Compilation.hpp"
27 27 #include "c1/c1_LIRAssembler.hpp"
28 28 #include "c1/c1_MacroAssembler.hpp"
29 29 #include "c1/c1_Runtime1.hpp"
30 30 #include "c1/c1_ValueStack.hpp"
31 31 #include "ci/ciArrayKlass.hpp"
32 32 #include "ci/ciInstance.hpp"
33 33 #include "gc_interface/collectedHeap.hpp"
34 34 #include "memory/barrierSet.hpp"
35 35 #include "memory/cardTableModRefBS.hpp"
36 36 #include "nativeInst_sparc.hpp"
37 37 #include "oops/objArrayKlass.hpp"
38 38 #include "runtime/sharedRuntime.hpp"
39 39
40 40 #define __ _masm->
41 41
42 42
43 43 //------------------------------------------------------------
44 44
45 45
46 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
47 47 if (opr->is_constant()) {
48 48 LIR_Const* constant = opr->as_constant_ptr();
49 49 switch (constant->type()) {
50 50 case T_INT: {
51 51 jint value = constant->as_jint();
52 52 return Assembler::is_simm13(value);
53 53 }
54 54
55 55 default:
56 56 return false;
57 57 }
58 58 }
59 59 return false;
60 60 }
61 61
62 62
63 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
64 64 switch (op->code()) {
65 65 case lir_null_check:
66 66 return true;
67 67
68 68
69 69 case lir_add:
70 70 case lir_ushr:
71 71 case lir_shr:
72 72 case lir_shl:
73 73 // integer shifts and adds are always one instruction
74 74 return op->result_opr()->is_single_cpu();
75 75
76 76
77 77 case lir_move: {
78 78 LIR_Op1* op1 = op->as_Op1();
79 79 LIR_Opr src = op1->in_opr();
80 80 LIR_Opr dst = op1->result_opr();
81 81
82 82 if (src == dst) {
83 83 NEEDS_CLEANUP;
84 84 // this works around a problem where moves with the same src and dst
85 85 // end up in the delay slot and then the assembler swallows the mov
86 86 // since it has no effect and then it complains because the delay slot
87 87 // is empty. returning false stops the optimizer from putting this in
88 88 // the delay slot
89 89 return false;
90 90 }
91 91
92 92 // don't put moves involving oops into the delay slot since the VerifyOops code
93 93 // will make it much larger than a single instruction.
94 94 if (VerifyOops) {
95 95 return false;
96 96 }
97 97
98 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
99 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
100 100 return false;
101 101 }
102 102
103 103 if (UseCompressedOops) {
104 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
105 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
106 106 }
107 107
108 108 if (dst->is_register()) {
109 109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
110 110 return !PatchALot;
111 111 } else if (src->is_single_stack()) {
112 112 return true;
113 113 }
114 114 }
115 115
116 116 if (src->is_register()) {
117 117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
118 118 return !PatchALot;
119 119 } else if (dst->is_single_stack()) {
120 120 return true;
121 121 }
122 122 }
123 123
124 124 if (dst->is_register() &&
125 125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
126 126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
127 127 return true;
128 128 }
129 129
130 130 return false;
131 131 }
132 132
133 133 default:
134 134 return false;
135 135 }
136 136 ShouldNotReachHere();
137 137 }
138 138
139 139
140 140 LIR_Opr LIR_Assembler::receiverOpr() {
141 141 return FrameMap::O0_oop_opr;
142 142 }
143 143
144 144
145 145 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
146 146 return FrameMap::I0_oop_opr;
147 147 }
148 148
149 149
150 150 LIR_Opr LIR_Assembler::osrBufferPointer() {
151 151 return FrameMap::I0_opr;
152 152 }
153 153
154 154
155 155 int LIR_Assembler::initial_frame_size_in_bytes() {
156 156 return in_bytes(frame_map()->framesize_in_bytes());
157 157 }
158 158
159 159
160 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
161 161 // we fetch the class of the receiver (O0) and compare it with the cached class.
162 162 // If they do not match we jump to slow case.
163 163 int LIR_Assembler::check_icache() {
164 164 int offset = __ offset();
165 165 __ inline_cache_check(O0, G5_inline_cache_reg);
166 166 return offset;
167 167 }
168 168
169 169
170 170 void LIR_Assembler::osr_entry() {
171 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
172 172 //
173 173 // 1. Create a new compiled activation.
174 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
175 175 // at the osr_bci; it is not initialized.
176 176 // 3. Jump to the continuation address in compiled code to resume execution.
177 177
178 178 // OSR entry point
179 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
180 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
181 181 ValueStack* entry_state = osr_entry->end()->state();
182 182 int number_of_locks = entry_state->locks_size();
183 183
184 184 // Create a frame for the compiled activation.
185 185 __ build_frame(initial_frame_size_in_bytes());
186 186
187 187 // OSR buffer is
188 188 //
189 189 // locals[nlocals-1..0]
190 190 // monitors[number_of_locks-1..0]
191 191 //
192 192 // locals is a direct copy of the interpreter frame so in the osr buffer
193 193 // so first slot in the local array is the last local from the interpreter
194 194 // and last slot is local[0] (receiver) from the interpreter
195 195 //
196 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
197 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
198 198 // in the interpreter frame (the method lock if a sync method)
199 199
200 200 // Initialize monitors in the compiled activation.
201 201 // I0: pointer to osr buffer
202 202 //
203 203 // All other registers are dead at this point and the locals will be
204 204 // copied into place by code emitted in the IR.
205 205
206 206 Register OSR_buf = osrBufferPointer()->as_register();
207 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
208 208 int monitor_offset = BytesPerWord * method()->max_locals() +
209 209 (2 * BytesPerWord) * (number_of_locks - 1);
210 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
211 211 // the OSR buffer using 2 word entries: first the lock and then
212 212 // the oop.
213 213 for (int i = 0; i < number_of_locks; i++) {
214 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
215 215 #ifdef ASSERT
216 216 // verify the interpreter's monitor has a non-null object
217 217 {
218 218 Label L;
219 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
220 220 __ cmp(G0, O7);
221 221 __ br(Assembler::notEqual, false, Assembler::pt, L);
222 222 __ delayed()->nop();
223 223 __ stop("locked object is NULL");
224 224 __ bind(L);
225 225 }
226 226 #endif // ASSERT
227 227 // Copy the lock field into the compiled activation.
228 228 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
229 229 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
230 230 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
231 231 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
232 232 }
233 233 }
234 234 }
235 235
236 236
237 237 // Optimized Library calls
238 238 // This is the fast version of java.lang.String.compare; it has not
239 239 // OSR-entry and therefore, we generate a slow version for OSR's
240 240 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
241 241 Register str0 = left->as_register();
242 242 Register str1 = right->as_register();
243 243
244 244 Label Ldone;
245 245
246 246 Register result = dst->as_register();
247 247 {
248 248 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
249 249 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
250 250 // Also, get string0.count-string1.count in o7 and get the condition code set
251 251 // Note: some instructions have been hoisted for better instruction scheduling
252 252
253 253 Register tmp0 = L0;
254 254 Register tmp1 = L1;
255 255 Register tmp2 = L2;
256 256
257 257 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
258 258 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
259 259 int count_offset = java_lang_String:: count_offset_in_bytes();
260 260
261 261 __ load_heap_oop(str0, value_offset, tmp0);
262 262 __ ld(str0, offset_offset, tmp2);
263 263 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
264 264 __ ld(str0, count_offset, str0);
265 265 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
266 266
267 267 // str1 may be null
268 268 add_debug_info_for_null_check_here(info);
269 269
270 270 __ load_heap_oop(str1, value_offset, tmp1);
271 271 __ add(tmp0, tmp2, tmp0);
272 272
273 273 __ ld(str1, offset_offset, tmp2);
274 274 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
275 275 __ ld(str1, count_offset, str1);
276 276 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
277 277 __ subcc(str0, str1, O7);
278 278 __ add(tmp1, tmp2, tmp1);
279 279 }
280 280
281 281 {
282 282 // Compute the minimum of the string lengths, scale it and store it in limit
283 283 Register count0 = I0;
284 284 Register count1 = I1;
285 285 Register limit = L3;
286 286
287 287 Label Lskip;
288 288 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
289 289 __ br(Assembler::greater, true, Assembler::pt, Lskip);
290 290 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
291 291 __ bind(Lskip);
292 292
293 293 // If either string is empty (or both of them) the result is the difference in lengths
294 294 __ cmp(limit, 0);
295 295 __ br(Assembler::equal, true, Assembler::pn, Ldone);
296 296 __ delayed()->mov(O7, result); // result is difference in lengths
297 297 }
298 298
299 299 {
300 300 // Neither string is empty
301 301 Label Lloop;
302 302
303 303 Register base0 = L0;
304 304 Register base1 = L1;
305 305 Register chr0 = I0;
306 306 Register chr1 = I1;
307 307 Register limit = L3;
308 308
309 309 // Shift base0 and base1 to the end of the arrays, negate limit
310 310 __ add(base0, limit, base0);
311 311 __ add(base1, limit, base1);
312 312 __ neg(limit); // limit = -min{string0.count, strin1.count}
313 313
314 314 __ lduh(base0, limit, chr0);
315 315 __ bind(Lloop);
316 316 __ lduh(base1, limit, chr1);
317 317 __ subcc(chr0, chr1, chr0);
318 318 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
319 319 assert(chr0 == result, "result must be pre-placed");
320 320 __ delayed()->inccc(limit, sizeof(jchar));
321 321 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
322 322 __ delayed()->lduh(base0, limit, chr0);
323 323 }
324 324
325 325 // If strings are equal up to min length, return the length difference.
326 326 __ mov(O7, result);
327 327
328 328 // Otherwise, return the difference between the first mismatched chars.
329 329 __ bind(Ldone);
330 330 }
331 331
332 332
333 333 // --------------------------------------------------------------------------------------------
334 334
335 335 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
336 336 if (!GenerateSynchronizationCode) return;
337 337
338 338 Register obj_reg = obj_opr->as_register();
339 339 Register lock_reg = lock_opr->as_register();
340 340
341 341 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
342 342 Register reg = mon_addr.base();
343 343 int offset = mon_addr.disp();
344 344 // compute pointer to BasicLock
345 345 if (mon_addr.is_simm13()) {
346 346 __ add(reg, offset, lock_reg);
347 347 }
348 348 else {
349 349 __ set(offset, lock_reg);
350 350 __ add(reg, lock_reg, lock_reg);
351 351 }
352 352 // unlock object
353 353 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
354 354 // _slow_case_stubs->append(slow_case);
355 355 // temporary fix: must be created after exceptionhandler, therefore as call stub
356 356 _slow_case_stubs->append(slow_case);
357 357 if (UseFastLocking) {
358 358 // try inlined fast unlocking first, revert to slow locking if it fails
359 359 // note: lock_reg points to the displaced header since the displaced header offset is 0!
360 360 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
361 361 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
362 362 } else {
363 363 // always do slow unlocking
364 364 // note: the slow unlocking code could be inlined here, however if we use
365 365 // slow unlocking, speed doesn't matter anyway and this solution is
366 366 // simpler and requires less duplicated code - additionally, the
367 367 // slow unlocking code is the same in either case which simplifies
368 368 // debugging
369 369 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
370 370 __ delayed()->nop();
371 371 }
372 372 // done
373 373 __ bind(*slow_case->continuation());
374 374 }
375 375
376 376
377 377 int LIR_Assembler::emit_exception_handler() {
378 378 // if the last instruction is a call (typically to do a throw which
379 379 // is coming at the end after block reordering) the return address
380 380 // must still point into the code area in order to avoid assertion
381 381 // failures when searching for the corresponding bci => add a nop
382 382 // (was bug 5/14/1999 - gri)
383 383 __ nop();
384 384
385 385 // generate code for exception handler
386 386 ciMethod* method = compilation()->method();
387 387
↓ open down ↓ |
387 lines elided |
↑ open up ↑ |
388 388 address handler_base = __ start_a_stub(exception_handler_size);
389 389
390 390 if (handler_base == NULL) {
391 391 // not enough space left for the handler
392 392 bailout("exception handler overflow");
393 393 return -1;
394 394 }
395 395
396 396 int offset = code_offset();
397 397
398 - __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
398 + __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
399 399 __ delayed()->nop();
400 - debug_only(__ stop("should have gone to the caller");)
400 + __ should_not_reach_here();
401 401 assert(code_offset() - offset <= exception_handler_size, "overflow");
402 402 __ end_a_stub();
403 403
404 404 return offset;
405 405 }
406 406
407 407
408 408 // Emit the code to remove the frame from the stack in the exception
409 409 // unwind path.
410 410 int LIR_Assembler::emit_unwind_handler() {
411 411 #ifndef PRODUCT
412 412 if (CommentedAssembly) {
413 413 _masm->block_comment("Unwind handler");
414 414 }
415 415 #endif
416 416
417 417 int offset = code_offset();
418 418
419 419 // Fetch the exception from TLS and clear out exception related thread state
420 420 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
421 421 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
422 422 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
423 423
424 424 __ bind(_unwind_handler_entry);
425 425 __ verify_not_null_oop(O0);
426 426 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
427 427 __ mov(O0, I0); // Preserve the exception
428 428 }
429 429
430 430 // Preform needed unlocking
431 431 MonitorExitStub* stub = NULL;
432 432 if (method()->is_synchronized()) {
433 433 monitor_address(0, FrameMap::I1_opr);
434 434 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
435 435 __ unlock_object(I3, I2, I1, *stub->entry());
436 436 __ bind(*stub->continuation());
437 437 }
438 438
439 439 if (compilation()->env()->dtrace_method_probes()) {
440 440 __ mov(G2_thread, O0);
441 441 jobject2reg(method()->constant_encoding(), O1);
442 442 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
443 443 __ delayed()->nop();
444 444 }
445 445
446 446 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
447 447 __ mov(I0, O0); // Restore the exception
448 448 }
449 449
450 450 // dispatch to the unwind logic
451 451 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
452 452 __ delayed()->nop();
453 453
454 454 // Emit the slow path assembly
455 455 if (stub != NULL) {
456 456 stub->emit_code(this);
457 457 }
458 458
459 459 return offset;
460 460 }
461 461
462 462
463 463 int LIR_Assembler::emit_deopt_handler() {
464 464 // if the last instruction is a call (typically to do a throw which
465 465 // is coming at the end after block reordering) the return address
466 466 // must still point into the code area in order to avoid assertion
467 467 // failures when searching for the corresponding bci => add a nop
468 468 // (was bug 5/14/1999 - gri)
469 469 __ nop();
470 470
471 471 // generate code for deopt handler
472 472 ciMethod* method = compilation()->method();
473 473 address handler_base = __ start_a_stub(deopt_handler_size);
474 474 if (handler_base == NULL) {
475 475 // not enough space left for the handler
476 476 bailout("deopt handler overflow");
477 477 return -1;
478 478 }
479 479
480 480 int offset = code_offset();
481 481 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
482 482 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
483 483 __ delayed()->nop();
484 484 assert(code_offset() - offset <= deopt_handler_size, "overflow");
485 485 debug_only(__ stop("should have gone to the caller");)
486 486 __ end_a_stub();
487 487
488 488 return offset;
489 489 }
490 490
491 491
492 492 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
493 493 if (o == NULL) {
494 494 __ set(NULL_WORD, reg);
495 495 } else {
496 496 int oop_index = __ oop_recorder()->find_index(o);
497 497 RelocationHolder rspec = oop_Relocation::spec(oop_index);
498 498 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
499 499 }
500 500 }
501 501
502 502
503 503 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
504 504 // Allocate a new index in oop table to hold the oop once it's been patched
505 505 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
506 506 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
507 507
508 508 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
509 509 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
510 510 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
511 511 // NULL will be dynamically patched later and the patched value may be large. We must
512 512 // therefore generate the sethi/add as a placeholders
513 513 __ patchable_set(addrlit, reg);
514 514
515 515 patching_epilog(patch, lir_patch_normal, reg, info);
516 516 }
517 517
518 518
519 519 void LIR_Assembler::emit_op3(LIR_Op3* op) {
520 520 Register Rdividend = op->in_opr1()->as_register();
521 521 Register Rdivisor = noreg;
522 522 Register Rscratch = op->in_opr3()->as_register();
523 523 Register Rresult = op->result_opr()->as_register();
524 524 int divisor = -1;
525 525
526 526 if (op->in_opr2()->is_register()) {
527 527 Rdivisor = op->in_opr2()->as_register();
528 528 } else {
529 529 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
530 530 assert(Assembler::is_simm13(divisor), "can only handle simm13");
531 531 }
532 532
533 533 assert(Rdividend != Rscratch, "");
534 534 assert(Rdivisor != Rscratch, "");
535 535 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
536 536
537 537 if (Rdivisor == noreg && is_power_of_2(divisor)) {
538 538 // convert division by a power of two into some shifts and logical operations
539 539 if (op->code() == lir_idiv) {
540 540 if (divisor == 2) {
541 541 __ srl(Rdividend, 31, Rscratch);
542 542 } else {
543 543 __ sra(Rdividend, 31, Rscratch);
544 544 __ and3(Rscratch, divisor - 1, Rscratch);
545 545 }
546 546 __ add(Rdividend, Rscratch, Rscratch);
547 547 __ sra(Rscratch, log2_intptr(divisor), Rresult);
548 548 return;
549 549 } else {
550 550 if (divisor == 2) {
551 551 __ srl(Rdividend, 31, Rscratch);
552 552 } else {
553 553 __ sra(Rdividend, 31, Rscratch);
554 554 __ and3(Rscratch, divisor - 1,Rscratch);
555 555 }
556 556 __ add(Rdividend, Rscratch, Rscratch);
557 557 __ andn(Rscratch, divisor - 1,Rscratch);
558 558 __ sub(Rdividend, Rscratch, Rresult);
559 559 return;
560 560 }
561 561 }
562 562
563 563 __ sra(Rdividend, 31, Rscratch);
564 564 __ wry(Rscratch);
565 565 if (!VM_Version::v9_instructions_work()) {
566 566 // v9 doesn't require these nops
567 567 __ nop();
568 568 __ nop();
569 569 __ nop();
570 570 __ nop();
571 571 }
572 572
573 573 add_debug_info_for_div0_here(op->info());
574 574
575 575 if (Rdivisor != noreg) {
576 576 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
577 577 } else {
578 578 assert(Assembler::is_simm13(divisor), "can only handle simm13");
579 579 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
580 580 }
581 581
582 582 Label skip;
583 583 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
584 584 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
585 585 __ bind(skip);
586 586
587 587 if (op->code() == lir_irem) {
588 588 if (Rdivisor != noreg) {
589 589 __ smul(Rscratch, Rdivisor, Rscratch);
590 590 } else {
591 591 __ smul(Rscratch, divisor, Rscratch);
592 592 }
593 593 __ sub(Rdividend, Rscratch, Rresult);
594 594 }
595 595 }
596 596
597 597
598 598 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
599 599 #ifdef ASSERT
600 600 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
601 601 if (op->block() != NULL) _branch_target_blocks.append(op->block());
602 602 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
603 603 #endif
604 604 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
605 605
606 606 if (op->cond() == lir_cond_always) {
607 607 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
608 608 } else if (op->code() == lir_cond_float_branch) {
609 609 assert(op->ublock() != NULL, "must have unordered successor");
610 610 bool is_unordered = (op->ublock() == op->block());
611 611 Assembler::Condition acond;
612 612 switch (op->cond()) {
613 613 case lir_cond_equal: acond = Assembler::f_equal; break;
614 614 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
615 615 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
616 616 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
617 617 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
618 618 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
619 619 default : ShouldNotReachHere();
620 620 };
621 621
622 622 if (!VM_Version::v9_instructions_work()) {
623 623 __ nop();
624 624 }
625 625 __ fb( acond, false, Assembler::pn, *(op->label()));
626 626 } else {
627 627 assert (op->code() == lir_branch, "just checking");
628 628
629 629 Assembler::Condition acond;
630 630 switch (op->cond()) {
631 631 case lir_cond_equal: acond = Assembler::equal; break;
632 632 case lir_cond_notEqual: acond = Assembler::notEqual; break;
633 633 case lir_cond_less: acond = Assembler::less; break;
634 634 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
635 635 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
636 636 case lir_cond_greater: acond = Assembler::greater; break;
637 637 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
638 638 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
639 639 default: ShouldNotReachHere();
640 640 };
641 641
642 642 // sparc has different condition codes for testing 32-bit
643 643 // vs. 64-bit values. We could always test xcc is we could
644 644 // guarantee that 32-bit loads always sign extended but that isn't
645 645 // true and since sign extension isn't free, it would impose a
646 646 // slight cost.
647 647 #ifdef _LP64
648 648 if (op->type() == T_INT) {
649 649 __ br(acond, false, Assembler::pn, *(op->label()));
650 650 } else
651 651 #endif
652 652 __ brx(acond, false, Assembler::pn, *(op->label()));
653 653 }
654 654 // The peephole pass fills the delay slot
655 655 }
656 656
657 657
658 658 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
659 659 Bytecodes::Code code = op->bytecode();
660 660 LIR_Opr dst = op->result_opr();
661 661
662 662 switch(code) {
663 663 case Bytecodes::_i2l: {
664 664 Register rlo = dst->as_register_lo();
665 665 Register rhi = dst->as_register_hi();
666 666 Register rval = op->in_opr()->as_register();
667 667 #ifdef _LP64
668 668 __ sra(rval, 0, rlo);
669 669 #else
670 670 __ mov(rval, rlo);
671 671 __ sra(rval, BitsPerInt-1, rhi);
672 672 #endif
673 673 break;
674 674 }
675 675 case Bytecodes::_i2d:
676 676 case Bytecodes::_i2f: {
677 677 bool is_double = (code == Bytecodes::_i2d);
678 678 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
679 679 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
680 680 FloatRegister rsrc = op->in_opr()->as_float_reg();
681 681 if (rsrc != rdst) {
682 682 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
683 683 }
684 684 __ fitof(w, rdst, rdst);
685 685 break;
686 686 }
687 687 case Bytecodes::_f2i:{
688 688 FloatRegister rsrc = op->in_opr()->as_float_reg();
689 689 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
690 690 Label L;
691 691 // result must be 0 if value is NaN; test by comparing value to itself
692 692 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
693 693 if (!VM_Version::v9_instructions_work()) {
694 694 __ nop();
695 695 }
696 696 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
697 697 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
698 698 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
699 699 // move integer result from float register to int register
700 700 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
701 701 __ bind (L);
702 702 break;
703 703 }
704 704 case Bytecodes::_l2i: {
705 705 Register rlo = op->in_opr()->as_register_lo();
706 706 Register rhi = op->in_opr()->as_register_hi();
707 707 Register rdst = dst->as_register();
708 708 #ifdef _LP64
709 709 __ sra(rlo, 0, rdst);
710 710 #else
711 711 __ mov(rlo, rdst);
712 712 #endif
713 713 break;
714 714 }
715 715 case Bytecodes::_d2f:
716 716 case Bytecodes::_f2d: {
717 717 bool is_double = (code == Bytecodes::_f2d);
718 718 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
719 719 LIR_Opr val = op->in_opr();
720 720 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
721 721 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
722 722 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
723 723 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
724 724 __ ftof(vw, dw, rval, rdst);
725 725 break;
726 726 }
727 727 case Bytecodes::_i2s:
728 728 case Bytecodes::_i2b: {
729 729 Register rval = op->in_opr()->as_register();
730 730 Register rdst = dst->as_register();
731 731 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
732 732 __ sll (rval, shift, rdst);
733 733 __ sra (rdst, shift, rdst);
734 734 break;
735 735 }
736 736 case Bytecodes::_i2c: {
737 737 Register rval = op->in_opr()->as_register();
738 738 Register rdst = dst->as_register();
739 739 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
740 740 __ sll (rval, shift, rdst);
741 741 __ srl (rdst, shift, rdst);
742 742 break;
743 743 }
744 744
745 745 default: ShouldNotReachHere();
746 746 }
747 747 }
748 748
749 749
750 750 void LIR_Assembler::align_call(LIR_Code) {
751 751 // do nothing since all instructions are word aligned on sparc
752 752 }
753 753
754 754
755 755 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
756 756 __ call(op->addr(), rtype);
757 757 // The peephole pass fills the delay slot, add_call_info is done in
758 758 // LIR_Assembler::emit_delay.
759 759 }
760 760
761 761
762 762 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
763 763 RelocationHolder rspec = virtual_call_Relocation::spec(pc());
764 764 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
765 765 __ relocate(rspec);
766 766 __ call(op->addr(), relocInfo::none);
767 767 // The peephole pass fills the delay slot, add_call_info is done in
768 768 // LIR_Assembler::emit_delay.
769 769 }
770 770
771 771
772 772 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
773 773 add_debug_info_for_null_check_here(op->info());
774 774 __ load_klass(O0, G3_scratch);
775 775 if (__ is_simm13(op->vtable_offset())) {
776 776 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
777 777 } else {
778 778 // This will generate 2 instructions
779 779 __ set(op->vtable_offset(), G5_method);
780 780 // ld_ptr, set_hi, set
781 781 __ ld_ptr(G3_scratch, G5_method, G5_method);
782 782 }
783 783 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
784 784 __ callr(G3_scratch, G0);
785 785 // the peephole pass fills the delay slot
786 786 }
787 787
788 788 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
789 789 int store_offset;
790 790 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
791 791 assert(!unaligned, "can't handle this");
792 792 // for offsets larger than a simm13 we setup the offset in O7
793 793 __ set(offset, O7);
794 794 store_offset = store(from_reg, base, O7, type, wide);
795 795 } else {
796 796 if (type == T_ARRAY || type == T_OBJECT) {
797 797 __ verify_oop(from_reg->as_register());
798 798 }
799 799 store_offset = code_offset();
800 800 switch (type) {
801 801 case T_BOOLEAN: // fall through
802 802 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
803 803 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
804 804 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
805 805 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
806 806 case T_LONG :
807 807 #ifdef _LP64
808 808 if (unaligned || PatchALot) {
809 809 __ srax(from_reg->as_register_lo(), 32, O7);
810 810 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
811 811 __ stw(O7, base, offset + hi_word_offset_in_bytes);
812 812 } else {
813 813 __ stx(from_reg->as_register_lo(), base, offset);
814 814 }
815 815 #else
816 816 assert(Assembler::is_simm13(offset + 4), "must be");
817 817 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
818 818 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
819 819 #endif
820 820 break;
821 821 case T_ADDRESS:
822 822 __ st_ptr(from_reg->as_register(), base, offset);
823 823 break;
824 824 case T_ARRAY : // fall through
825 825 case T_OBJECT:
826 826 {
827 827 if (UseCompressedOops && !wide) {
828 828 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
829 829 store_offset = code_offset();
830 830 __ stw(G3_scratch, base, offset);
831 831 } else {
832 832 __ st_ptr(from_reg->as_register(), base, offset);
833 833 }
834 834 break;
835 835 }
836 836
837 837 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
838 838 case T_DOUBLE:
839 839 {
840 840 FloatRegister reg = from_reg->as_double_reg();
841 841 // split unaligned stores
842 842 if (unaligned || PatchALot) {
843 843 assert(Assembler::is_simm13(offset + 4), "must be");
844 844 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
845 845 __ stf(FloatRegisterImpl::S, reg, base, offset);
846 846 } else {
847 847 __ stf(FloatRegisterImpl::D, reg, base, offset);
848 848 }
849 849 break;
850 850 }
851 851 default : ShouldNotReachHere();
852 852 }
853 853 }
854 854 return store_offset;
855 855 }
856 856
857 857
858 858 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
859 859 if (type == T_ARRAY || type == T_OBJECT) {
860 860 __ verify_oop(from_reg->as_register());
861 861 }
862 862 int store_offset = code_offset();
863 863 switch (type) {
864 864 case T_BOOLEAN: // fall through
865 865 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
866 866 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
867 867 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
868 868 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
869 869 case T_LONG :
870 870 #ifdef _LP64
871 871 __ stx(from_reg->as_register_lo(), base, disp);
872 872 #else
873 873 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
874 874 __ std(from_reg->as_register_hi(), base, disp);
875 875 #endif
876 876 break;
877 877 case T_ADDRESS:
878 878 __ st_ptr(from_reg->as_register(), base, disp);
879 879 break;
880 880 case T_ARRAY : // fall through
881 881 case T_OBJECT:
882 882 {
883 883 if (UseCompressedOops && !wide) {
884 884 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
885 885 store_offset = code_offset();
886 886 __ stw(G3_scratch, base, disp);
887 887 } else {
888 888 __ st_ptr(from_reg->as_register(), base, disp);
889 889 }
890 890 break;
891 891 }
892 892 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
893 893 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
894 894 default : ShouldNotReachHere();
895 895 }
896 896 return store_offset;
897 897 }
898 898
899 899
900 900 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
901 901 int load_offset;
902 902 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
903 903 assert(base != O7, "destroying register");
904 904 assert(!unaligned, "can't handle this");
905 905 // for offsets larger than a simm13 we setup the offset in O7
906 906 __ set(offset, O7);
907 907 load_offset = load(base, O7, to_reg, type, wide);
908 908 } else {
909 909 load_offset = code_offset();
910 910 switch(type) {
911 911 case T_BOOLEAN: // fall through
912 912 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
913 913 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
914 914 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
915 915 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
916 916 case T_LONG :
917 917 if (!unaligned) {
918 918 #ifdef _LP64
919 919 __ ldx(base, offset, to_reg->as_register_lo());
920 920 #else
921 921 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
922 922 "must be sequential");
923 923 __ ldd(base, offset, to_reg->as_register_hi());
924 924 #endif
925 925 } else {
926 926 #ifdef _LP64
927 927 assert(base != to_reg->as_register_lo(), "can't handle this");
928 928 assert(O7 != to_reg->as_register_lo(), "can't handle this");
929 929 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
930 930 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
931 931 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
932 932 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
933 933 #else
934 934 if (base == to_reg->as_register_lo()) {
935 935 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
936 936 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
937 937 } else {
938 938 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
939 939 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
940 940 }
941 941 #endif
942 942 }
943 943 break;
944 944 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
945 945 case T_ARRAY : // fall through
946 946 case T_OBJECT:
947 947 {
948 948 if (UseCompressedOops && !wide) {
949 949 __ lduw(base, offset, to_reg->as_register());
950 950 __ decode_heap_oop(to_reg->as_register());
951 951 } else {
952 952 __ ld_ptr(base, offset, to_reg->as_register());
953 953 }
954 954 break;
955 955 }
956 956 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
957 957 case T_DOUBLE:
958 958 {
959 959 FloatRegister reg = to_reg->as_double_reg();
960 960 // split unaligned loads
961 961 if (unaligned || PatchALot) {
962 962 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
963 963 __ ldf(FloatRegisterImpl::S, base, offset, reg);
964 964 } else {
965 965 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
966 966 }
967 967 break;
968 968 }
969 969 default : ShouldNotReachHere();
970 970 }
971 971 if (type == T_ARRAY || type == T_OBJECT) {
972 972 __ verify_oop(to_reg->as_register());
973 973 }
974 974 }
975 975 return load_offset;
976 976 }
977 977
978 978
979 979 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
980 980 int load_offset = code_offset();
981 981 switch(type) {
982 982 case T_BOOLEAN: // fall through
983 983 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
984 984 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
985 985 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
986 986 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
987 987 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
988 988 case T_ARRAY : // fall through
989 989 case T_OBJECT:
990 990 {
991 991 if (UseCompressedOops && !wide) {
992 992 __ lduw(base, disp, to_reg->as_register());
993 993 __ decode_heap_oop(to_reg->as_register());
994 994 } else {
995 995 __ ld_ptr(base, disp, to_reg->as_register());
996 996 }
997 997 break;
998 998 }
999 999 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
1000 1000 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
1001 1001 case T_LONG :
1002 1002 #ifdef _LP64
1003 1003 __ ldx(base, disp, to_reg->as_register_lo());
1004 1004 #else
1005 1005 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
1006 1006 "must be sequential");
1007 1007 __ ldd(base, disp, to_reg->as_register_hi());
1008 1008 #endif
1009 1009 break;
1010 1010 default : ShouldNotReachHere();
1011 1011 }
1012 1012 if (type == T_ARRAY || type == T_OBJECT) {
1013 1013 __ verify_oop(to_reg->as_register());
1014 1014 }
1015 1015 return load_offset;
1016 1016 }
1017 1017
1018 1018 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1019 1019 LIR_Const* c = src->as_constant_ptr();
1020 1020 switch (c->type()) {
1021 1021 case T_INT:
1022 1022 case T_FLOAT: {
1023 1023 Register src_reg = O7;
1024 1024 int value = c->as_jint_bits();
1025 1025 if (value == 0) {
1026 1026 src_reg = G0;
1027 1027 } else {
1028 1028 __ set(value, O7);
1029 1029 }
1030 1030 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1031 1031 __ stw(src_reg, addr.base(), addr.disp());
1032 1032 break;
1033 1033 }
1034 1034 case T_ADDRESS: {
1035 1035 Register src_reg = O7;
1036 1036 int value = c->as_jint_bits();
1037 1037 if (value == 0) {
1038 1038 src_reg = G0;
1039 1039 } else {
1040 1040 __ set(value, O7);
1041 1041 }
1042 1042 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1043 1043 __ st_ptr(src_reg, addr.base(), addr.disp());
1044 1044 break;
1045 1045 }
1046 1046 case T_OBJECT: {
1047 1047 Register src_reg = O7;
1048 1048 jobject2reg(c->as_jobject(), src_reg);
1049 1049 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1050 1050 __ st_ptr(src_reg, addr.base(), addr.disp());
1051 1051 break;
1052 1052 }
1053 1053 case T_LONG:
1054 1054 case T_DOUBLE: {
1055 1055 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1056 1056
1057 1057 Register tmp = O7;
1058 1058 int value_lo = c->as_jint_lo_bits();
1059 1059 if (value_lo == 0) {
1060 1060 tmp = G0;
1061 1061 } else {
1062 1062 __ set(value_lo, O7);
1063 1063 }
1064 1064 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1065 1065 int value_hi = c->as_jint_hi_bits();
1066 1066 if (value_hi == 0) {
1067 1067 tmp = G0;
1068 1068 } else {
1069 1069 __ set(value_hi, O7);
1070 1070 }
1071 1071 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1072 1072 break;
1073 1073 }
1074 1074 default:
1075 1075 Unimplemented();
1076 1076 }
1077 1077 }
1078 1078
1079 1079
1080 1080 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
1081 1081 LIR_Const* c = src->as_constant_ptr();
1082 1082 LIR_Address* addr = dest->as_address_ptr();
1083 1083 Register base = addr->base()->as_pointer_register();
1084 1084 int offset = -1;
1085 1085
1086 1086 switch (c->type()) {
1087 1087 case T_INT:
1088 1088 case T_FLOAT:
1089 1089 case T_ADDRESS: {
1090 1090 LIR_Opr tmp = FrameMap::O7_opr;
1091 1091 int value = c->as_jint_bits();
1092 1092 if (value == 0) {
1093 1093 tmp = FrameMap::G0_opr;
1094 1094 } else if (Assembler::is_simm13(value)) {
1095 1095 __ set(value, O7);
1096 1096 }
1097 1097 if (addr->index()->is_valid()) {
1098 1098 assert(addr->disp() == 0, "must be zero");
1099 1099 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1100 1100 } else {
1101 1101 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1102 1102 offset = store(tmp, base, addr->disp(), type, wide, false);
1103 1103 }
1104 1104 break;
1105 1105 }
1106 1106 case T_LONG:
1107 1107 case T_DOUBLE: {
1108 1108 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1109 1109 assert(Assembler::is_simm13(addr->disp()) &&
1110 1110 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1111 1111
1112 1112 LIR_Opr tmp = FrameMap::O7_opr;
1113 1113 int value_lo = c->as_jint_lo_bits();
1114 1114 if (value_lo == 0) {
1115 1115 tmp = FrameMap::G0_opr;
1116 1116 } else {
1117 1117 __ set(value_lo, O7);
1118 1118 }
1119 1119 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
1120 1120 int value_hi = c->as_jint_hi_bits();
1121 1121 if (value_hi == 0) {
1122 1122 tmp = FrameMap::G0_opr;
1123 1123 } else {
1124 1124 __ set(value_hi, O7);
1125 1125 }
1126 1126 offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
1127 1127 break;
1128 1128 }
1129 1129 case T_OBJECT: {
1130 1130 jobject obj = c->as_jobject();
1131 1131 LIR_Opr tmp;
1132 1132 if (obj == NULL) {
1133 1133 tmp = FrameMap::G0_opr;
1134 1134 } else {
1135 1135 tmp = FrameMap::O7_opr;
1136 1136 jobject2reg(c->as_jobject(), O7);
1137 1137 }
1138 1138 // handle either reg+reg or reg+disp address
1139 1139 if (addr->index()->is_valid()) {
1140 1140 assert(addr->disp() == 0, "must be zero");
1141 1141 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
1142 1142 } else {
1143 1143 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1144 1144 offset = store(tmp, base, addr->disp(), type, wide, false);
1145 1145 }
1146 1146
1147 1147 break;
1148 1148 }
1149 1149 default:
1150 1150 Unimplemented();
1151 1151 }
1152 1152 if (info != NULL) {
1153 1153 assert(offset != -1, "offset should've been set");
1154 1154 add_debug_info_for_null_check(offset, info);
1155 1155 }
1156 1156 }
1157 1157
1158 1158
1159 1159 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1160 1160 LIR_Const* c = src->as_constant_ptr();
1161 1161 LIR_Opr to_reg = dest;
1162 1162
1163 1163 switch (c->type()) {
1164 1164 case T_INT:
1165 1165 case T_ADDRESS:
1166 1166 {
1167 1167 jint con = c->as_jint();
1168 1168 if (to_reg->is_single_cpu()) {
1169 1169 assert(patch_code == lir_patch_none, "no patching handled here");
1170 1170 __ set(con, to_reg->as_register());
1171 1171 } else {
1172 1172 ShouldNotReachHere();
1173 1173 assert(to_reg->is_single_fpu(), "wrong register kind");
1174 1174
1175 1175 __ set(con, O7);
1176 1176 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1177 1177 __ st(O7, temp_slot);
1178 1178 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1179 1179 }
1180 1180 }
1181 1181 break;
1182 1182
1183 1183 case T_LONG:
1184 1184 {
1185 1185 jlong con = c->as_jlong();
1186 1186
1187 1187 if (to_reg->is_double_cpu()) {
1188 1188 #ifdef _LP64
1189 1189 __ set(con, to_reg->as_register_lo());
1190 1190 #else
1191 1191 __ set(low(con), to_reg->as_register_lo());
1192 1192 __ set(high(con), to_reg->as_register_hi());
1193 1193 #endif
1194 1194 #ifdef _LP64
1195 1195 } else if (to_reg->is_single_cpu()) {
1196 1196 __ set(con, to_reg->as_register());
1197 1197 #endif
1198 1198 } else {
1199 1199 ShouldNotReachHere();
1200 1200 assert(to_reg->is_double_fpu(), "wrong register kind");
1201 1201 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
1202 1202 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1203 1203 __ set(low(con), O7);
1204 1204 __ st(O7, temp_slot_lo);
1205 1205 __ set(high(con), O7);
1206 1206 __ st(O7, temp_slot_hi);
1207 1207 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1208 1208 }
1209 1209 }
1210 1210 break;
1211 1211
1212 1212 case T_OBJECT:
1213 1213 {
1214 1214 if (patch_code == lir_patch_none) {
1215 1215 jobject2reg(c->as_jobject(), to_reg->as_register());
1216 1216 } else {
1217 1217 jobject2reg_with_patching(to_reg->as_register(), info);
1218 1218 }
1219 1219 }
1220 1220 break;
1221 1221
1222 1222 case T_FLOAT:
1223 1223 {
1224 1224 address const_addr = __ float_constant(c->as_jfloat());
1225 1225 if (const_addr == NULL) {
1226 1226 bailout("const section overflow");
1227 1227 break;
1228 1228 }
1229 1229 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1230 1230 AddressLiteral const_addrlit(const_addr, rspec);
1231 1231 if (to_reg->is_single_fpu()) {
1232 1232 __ patchable_sethi(const_addrlit, O7);
1233 1233 __ relocate(rspec);
1234 1234 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1235 1235
1236 1236 } else {
1237 1237 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1238 1238
1239 1239 __ set(const_addrlit, O7);
1240 1240 __ ld(O7, 0, to_reg->as_register());
1241 1241 }
1242 1242 }
1243 1243 break;
1244 1244
1245 1245 case T_DOUBLE:
1246 1246 {
1247 1247 address const_addr = __ double_constant(c->as_jdouble());
1248 1248 if (const_addr == NULL) {
1249 1249 bailout("const section overflow");
1250 1250 break;
1251 1251 }
1252 1252 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1253 1253
1254 1254 if (to_reg->is_double_fpu()) {
1255 1255 AddressLiteral const_addrlit(const_addr, rspec);
1256 1256 __ patchable_sethi(const_addrlit, O7);
1257 1257 __ relocate(rspec);
1258 1258 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1259 1259 } else {
1260 1260 assert(to_reg->is_double_cpu(), "Must be a long register.");
1261 1261 #ifdef _LP64
1262 1262 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1263 1263 #else
1264 1264 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1265 1265 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1266 1266 #endif
1267 1267 }
1268 1268
1269 1269 }
1270 1270 break;
1271 1271
1272 1272 default:
1273 1273 ShouldNotReachHere();
1274 1274 }
1275 1275 }
1276 1276
1277 1277 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1278 1278 Register reg = addr->base()->as_register();
1279 1279 return Address(reg, addr->disp());
1280 1280 }
1281 1281
1282 1282
1283 1283 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1284 1284 switch (type) {
1285 1285 case T_INT:
1286 1286 case T_FLOAT: {
1287 1287 Register tmp = O7;
1288 1288 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1289 1289 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1290 1290 __ lduw(from.base(), from.disp(), tmp);
1291 1291 __ stw(tmp, to.base(), to.disp());
1292 1292 break;
1293 1293 }
1294 1294 case T_OBJECT: {
1295 1295 Register tmp = O7;
1296 1296 Address from = frame_map()->address_for_slot(src->single_stack_ix());
1297 1297 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
1298 1298 __ ld_ptr(from.base(), from.disp(), tmp);
1299 1299 __ st_ptr(tmp, to.base(), to.disp());
1300 1300 break;
1301 1301 }
1302 1302 case T_LONG:
1303 1303 case T_DOUBLE: {
1304 1304 Register tmp = O7;
1305 1305 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1306 1306 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
1307 1307 __ lduw(from.base(), from.disp(), tmp);
1308 1308 __ stw(tmp, to.base(), to.disp());
1309 1309 __ lduw(from.base(), from.disp() + 4, tmp);
1310 1310 __ stw(tmp, to.base(), to.disp() + 4);
1311 1311 break;
1312 1312 }
1313 1313
1314 1314 default:
1315 1315 ShouldNotReachHere();
1316 1316 }
1317 1317 }
1318 1318
1319 1319
1320 1320 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1321 1321 Address base = as_Address(addr);
1322 1322 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1323 1323 }
1324 1324
1325 1325
1326 1326 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1327 1327 Address base = as_Address(addr);
1328 1328 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1329 1329 }
1330 1330
1331 1331
1332 1332 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1333 1333 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1334 1334
1335 1335 LIR_Address* addr = src_opr->as_address_ptr();
1336 1336 LIR_Opr to_reg = dest;
1337 1337
1338 1338 Register src = addr->base()->as_pointer_register();
1339 1339 Register disp_reg = noreg;
1340 1340 int disp_value = addr->disp();
1341 1341 bool needs_patching = (patch_code != lir_patch_none);
1342 1342
1343 1343 if (addr->base()->type() == T_OBJECT) {
1344 1344 __ verify_oop(src);
1345 1345 }
1346 1346
1347 1347 PatchingStub* patch = NULL;
1348 1348 if (needs_patching) {
1349 1349 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1350 1350 assert(!to_reg->is_double_cpu() ||
1351 1351 patch_code == lir_patch_none ||
1352 1352 patch_code == lir_patch_normal, "patching doesn't match register");
1353 1353 }
1354 1354
1355 1355 if (addr->index()->is_illegal()) {
1356 1356 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1357 1357 if (needs_patching) {
1358 1358 __ patchable_set(0, O7);
1359 1359 } else {
1360 1360 __ set(disp_value, O7);
1361 1361 }
1362 1362 disp_reg = O7;
1363 1363 }
1364 1364 } else if (unaligned || PatchALot) {
1365 1365 __ add(src, addr->index()->as_register(), O7);
1366 1366 src = O7;
1367 1367 } else {
1368 1368 disp_reg = addr->index()->as_pointer_register();
1369 1369 assert(disp_value == 0, "can't handle 3 operand addresses");
1370 1370 }
1371 1371
1372 1372 // remember the offset of the load. The patching_epilog must be done
1373 1373 // before the call to add_debug_info, otherwise the PcDescs don't get
1374 1374 // entered in increasing order.
1375 1375 int offset = code_offset();
1376 1376
1377 1377 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1378 1378 if (disp_reg == noreg) {
1379 1379 offset = load(src, disp_value, to_reg, type, wide, unaligned);
1380 1380 } else {
1381 1381 assert(!unaligned, "can't handle this");
1382 1382 offset = load(src, disp_reg, to_reg, type, wide);
1383 1383 }
1384 1384
1385 1385 if (patch != NULL) {
1386 1386 patching_epilog(patch, patch_code, src, info);
1387 1387 }
1388 1388 if (info != NULL) add_debug_info_for_null_check(offset, info);
1389 1389 }
1390 1390
1391 1391
1392 1392 void LIR_Assembler::prefetchr(LIR_Opr src) {
1393 1393 LIR_Address* addr = src->as_address_ptr();
1394 1394 Address from_addr = as_Address(addr);
1395 1395
1396 1396 if (VM_Version::has_v9()) {
1397 1397 __ prefetch(from_addr, Assembler::severalReads);
1398 1398 }
1399 1399 }
1400 1400
1401 1401
1402 1402 void LIR_Assembler::prefetchw(LIR_Opr src) {
1403 1403 LIR_Address* addr = src->as_address_ptr();
1404 1404 Address from_addr = as_Address(addr);
1405 1405
1406 1406 if (VM_Version::has_v9()) {
1407 1407 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1408 1408 }
1409 1409 }
1410 1410
1411 1411
1412 1412 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1413 1413 Address addr;
1414 1414 if (src->is_single_word()) {
1415 1415 addr = frame_map()->address_for_slot(src->single_stack_ix());
1416 1416 } else if (src->is_double_word()) {
1417 1417 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1418 1418 }
1419 1419
1420 1420 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1421 1421 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1422 1422 }
1423 1423
1424 1424
1425 1425 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1426 1426 Address addr;
1427 1427 if (dest->is_single_word()) {
1428 1428 addr = frame_map()->address_for_slot(dest->single_stack_ix());
1429 1429 } else if (dest->is_double_word()) {
1430 1430 addr = frame_map()->address_for_slot(dest->double_stack_ix());
1431 1431 }
1432 1432 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1433 1433 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1434 1434 }
1435 1435
1436 1436
1437 1437 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1438 1438 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1439 1439 if (from_reg->is_double_fpu()) {
1440 1440 // double to double moves
1441 1441 assert(to_reg->is_double_fpu(), "should match");
1442 1442 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1443 1443 } else {
1444 1444 // float to float moves
1445 1445 assert(to_reg->is_single_fpu(), "should match");
1446 1446 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1447 1447 }
1448 1448 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1449 1449 if (from_reg->is_double_cpu()) {
1450 1450 #ifdef _LP64
1451 1451 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1452 1452 #else
1453 1453 assert(to_reg->is_double_cpu() &&
1454 1454 from_reg->as_register_hi() != to_reg->as_register_lo() &&
1455 1455 from_reg->as_register_lo() != to_reg->as_register_hi(),
1456 1456 "should both be long and not overlap");
1457 1457 // long to long moves
1458 1458 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1459 1459 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1460 1460 #endif
1461 1461 #ifdef _LP64
1462 1462 } else if (to_reg->is_double_cpu()) {
1463 1463 // int to int moves
1464 1464 __ mov(from_reg->as_register(), to_reg->as_register_lo());
1465 1465 #endif
1466 1466 } else {
1467 1467 // int to int moves
1468 1468 __ mov(from_reg->as_register(), to_reg->as_register());
1469 1469 }
1470 1470 } else {
1471 1471 ShouldNotReachHere();
1472 1472 }
1473 1473 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1474 1474 __ verify_oop(to_reg->as_register());
1475 1475 }
1476 1476 }
1477 1477
1478 1478
1479 1479 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1480 1480 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1481 1481 bool wide, bool unaligned) {
1482 1482 LIR_Address* addr = dest->as_address_ptr();
1483 1483
1484 1484 Register src = addr->base()->as_pointer_register();
1485 1485 Register disp_reg = noreg;
1486 1486 int disp_value = addr->disp();
1487 1487 bool needs_patching = (patch_code != lir_patch_none);
1488 1488
1489 1489 if (addr->base()->is_oop_register()) {
1490 1490 __ verify_oop(src);
1491 1491 }
1492 1492
1493 1493 PatchingStub* patch = NULL;
1494 1494 if (needs_patching) {
1495 1495 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1496 1496 assert(!from_reg->is_double_cpu() ||
1497 1497 patch_code == lir_patch_none ||
1498 1498 patch_code == lir_patch_normal, "patching doesn't match register");
1499 1499 }
1500 1500
1501 1501 if (addr->index()->is_illegal()) {
1502 1502 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1503 1503 if (needs_patching) {
1504 1504 __ patchable_set(0, O7);
1505 1505 } else {
1506 1506 __ set(disp_value, O7);
1507 1507 }
1508 1508 disp_reg = O7;
1509 1509 }
1510 1510 } else if (unaligned || PatchALot) {
1511 1511 __ add(src, addr->index()->as_register(), O7);
1512 1512 src = O7;
1513 1513 } else {
1514 1514 disp_reg = addr->index()->as_pointer_register();
1515 1515 assert(disp_value == 0, "can't handle 3 operand addresses");
1516 1516 }
1517 1517
1518 1518 // remember the offset of the store. The patching_epilog must be done
1519 1519 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1520 1520 // entered in increasing order.
1521 1521 int offset;
1522 1522
1523 1523 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1524 1524 if (disp_reg == noreg) {
1525 1525 offset = store(from_reg, src, disp_value, type, wide, unaligned);
1526 1526 } else {
1527 1527 assert(!unaligned, "can't handle this");
1528 1528 offset = store(from_reg, src, disp_reg, type, wide);
1529 1529 }
1530 1530
1531 1531 if (patch != NULL) {
1532 1532 patching_epilog(patch, patch_code, src, info);
1533 1533 }
1534 1534
1535 1535 if (info != NULL) add_debug_info_for_null_check(offset, info);
1536 1536 }
1537 1537
1538 1538
1539 1539 void LIR_Assembler::return_op(LIR_Opr result) {
1540 1540 // the poll may need a register so just pick one that isn't the return register
1541 1541 #if defined(TIERED) && !defined(_LP64)
1542 1542 if (result->type_field() == LIR_OprDesc::long_type) {
1543 1543 // Must move the result to G1
1544 1544 // Must leave proper result in O0,O1 and G1 (TIERED only)
1545 1545 __ sllx(I0, 32, G1); // Shift bits into high G1
1546 1546 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
1547 1547 __ or3 (I1, G1, G1); // OR 64 bits into G1
1548 1548 #ifdef ASSERT
1549 1549 // mangle it so any problems will show up
1550 1550 __ set(0xdeadbeef, I0);
1551 1551 __ set(0xdeadbeef, I1);
1552 1552 #endif
1553 1553 }
1554 1554 #endif // TIERED
1555 1555 __ set((intptr_t)os::get_polling_page(), L0);
1556 1556 __ relocate(relocInfo::poll_return_type);
1557 1557 __ ld_ptr(L0, 0, G0);
1558 1558 __ ret();
1559 1559 __ delayed()->restore();
1560 1560 }
1561 1561
1562 1562
1563 1563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1564 1564 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1565 1565 if (info != NULL) {
1566 1566 add_debug_info_for_branch(info);
1567 1567 } else {
1568 1568 __ relocate(relocInfo::poll_type);
1569 1569 }
1570 1570
1571 1571 int offset = __ offset();
1572 1572 __ ld_ptr(tmp->as_register(), 0, G0);
1573 1573
1574 1574 return offset;
1575 1575 }
1576 1576
1577 1577
1578 1578 void LIR_Assembler::emit_static_call_stub() {
1579 1579 address call_pc = __ pc();
1580 1580 address stub = __ start_a_stub(call_stub_size);
1581 1581 if (stub == NULL) {
1582 1582 bailout("static call stub overflow");
1583 1583 return;
1584 1584 }
1585 1585
1586 1586 int start = __ offset();
1587 1587 __ relocate(static_stub_Relocation::spec(call_pc));
1588 1588
1589 1589 __ set_oop(NULL, G5);
1590 1590 // must be set to -1 at code generation time
1591 1591 AddressLiteral addrlit(-1);
1592 1592 __ jump_to(addrlit, G3);
1593 1593 __ delayed()->nop();
1594 1594
1595 1595 assert(__ offset() - start <= call_stub_size, "stub too big");
1596 1596 __ end_a_stub();
1597 1597 }
1598 1598
1599 1599
1600 1600 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1601 1601 if (opr1->is_single_fpu()) {
1602 1602 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1603 1603 } else if (opr1->is_double_fpu()) {
1604 1604 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1605 1605 } else if (opr1->is_single_cpu()) {
1606 1606 if (opr2->is_constant()) {
1607 1607 switch (opr2->as_constant_ptr()->type()) {
1608 1608 case T_INT:
1609 1609 { jint con = opr2->as_constant_ptr()->as_jint();
1610 1610 if (Assembler::is_simm13(con)) {
1611 1611 __ cmp(opr1->as_register(), con);
1612 1612 } else {
1613 1613 __ set(con, O7);
1614 1614 __ cmp(opr1->as_register(), O7);
1615 1615 }
1616 1616 }
1617 1617 break;
1618 1618
1619 1619 case T_OBJECT:
1620 1620 // there are only equal/notequal comparisions on objects
1621 1621 { jobject con = opr2->as_constant_ptr()->as_jobject();
1622 1622 if (con == NULL) {
1623 1623 __ cmp(opr1->as_register(), 0);
1624 1624 } else {
1625 1625 jobject2reg(con, O7);
1626 1626 __ cmp(opr1->as_register(), O7);
1627 1627 }
1628 1628 }
1629 1629 break;
1630 1630
1631 1631 default:
1632 1632 ShouldNotReachHere();
1633 1633 break;
1634 1634 }
1635 1635 } else {
1636 1636 if (opr2->is_address()) {
1637 1637 LIR_Address * addr = opr2->as_address_ptr();
1638 1638 BasicType type = addr->type();
1639 1639 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1640 1640 else __ ld(as_Address(addr), O7);
1641 1641 __ cmp(opr1->as_register(), O7);
1642 1642 } else {
1643 1643 __ cmp(opr1->as_register(), opr2->as_register());
1644 1644 }
1645 1645 }
1646 1646 } else if (opr1->is_double_cpu()) {
1647 1647 Register xlo = opr1->as_register_lo();
1648 1648 Register xhi = opr1->as_register_hi();
1649 1649 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1650 1650 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1651 1651 #ifdef _LP64
1652 1652 __ orcc(xhi, G0, G0);
1653 1653 #else
1654 1654 __ orcc(xhi, xlo, G0);
1655 1655 #endif
1656 1656 } else if (opr2->is_register()) {
1657 1657 Register ylo = opr2->as_register_lo();
1658 1658 Register yhi = opr2->as_register_hi();
1659 1659 #ifdef _LP64
1660 1660 __ cmp(xlo, ylo);
1661 1661 #else
1662 1662 __ subcc(xlo, ylo, xlo);
1663 1663 __ subccc(xhi, yhi, xhi);
1664 1664 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1665 1665 __ orcc(xhi, xlo, G0);
1666 1666 }
1667 1667 #endif
1668 1668 } else {
1669 1669 ShouldNotReachHere();
1670 1670 }
1671 1671 } else if (opr1->is_address()) {
1672 1672 LIR_Address * addr = opr1->as_address_ptr();
1673 1673 BasicType type = addr->type();
1674 1674 assert (opr2->is_constant(), "Checking");
1675 1675 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1676 1676 else __ ld(as_Address(addr), O7);
1677 1677 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1678 1678 } else {
1679 1679 ShouldNotReachHere();
1680 1680 }
1681 1681 }
1682 1682
1683 1683
1684 1684 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1685 1685 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1686 1686 bool is_unordered_less = (code == lir_ucmp_fd2i);
1687 1687 if (left->is_single_fpu()) {
1688 1688 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1689 1689 } else if (left->is_double_fpu()) {
1690 1690 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1691 1691 } else {
1692 1692 ShouldNotReachHere();
1693 1693 }
1694 1694 } else if (code == lir_cmp_l2i) {
1695 1695 #ifdef _LP64
1696 1696 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1697 1697 #else
1698 1698 __ lcmp(left->as_register_hi(), left->as_register_lo(),
1699 1699 right->as_register_hi(), right->as_register_lo(),
1700 1700 dst->as_register());
1701 1701 #endif
1702 1702 } else {
1703 1703 ShouldNotReachHere();
1704 1704 }
1705 1705 }
1706 1706
1707 1707
1708 1708 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1709 1709 Assembler::Condition acond;
1710 1710 switch (condition) {
1711 1711 case lir_cond_equal: acond = Assembler::equal; break;
1712 1712 case lir_cond_notEqual: acond = Assembler::notEqual; break;
1713 1713 case lir_cond_less: acond = Assembler::less; break;
1714 1714 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1715 1715 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1716 1716 case lir_cond_greater: acond = Assembler::greater; break;
1717 1717 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
1718 1718 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
1719 1719 default: ShouldNotReachHere();
1720 1720 };
1721 1721
1722 1722 if (opr1->is_constant() && opr1->type() == T_INT) {
1723 1723 Register dest = result->as_register();
1724 1724 // load up first part of constant before branch
1725 1725 // and do the rest in the delay slot.
1726 1726 if (!Assembler::is_simm13(opr1->as_jint())) {
1727 1727 __ sethi(opr1->as_jint(), dest);
1728 1728 }
1729 1729 } else if (opr1->is_constant()) {
1730 1730 const2reg(opr1, result, lir_patch_none, NULL);
1731 1731 } else if (opr1->is_register()) {
1732 1732 reg2reg(opr1, result);
1733 1733 } else if (opr1->is_stack()) {
1734 1734 stack2reg(opr1, result, result->type());
1735 1735 } else {
1736 1736 ShouldNotReachHere();
1737 1737 }
1738 1738 Label skip;
1739 1739 #ifdef _LP64
1740 1740 if (type == T_INT) {
1741 1741 __ br(acond, false, Assembler::pt, skip);
1742 1742 } else
1743 1743 #endif
1744 1744 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
1745 1745 if (opr1->is_constant() && opr1->type() == T_INT) {
1746 1746 Register dest = result->as_register();
1747 1747 if (Assembler::is_simm13(opr1->as_jint())) {
1748 1748 __ delayed()->or3(G0, opr1->as_jint(), dest);
1749 1749 } else {
1750 1750 // the sethi has been done above, so just put in the low 10 bits
1751 1751 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1752 1752 }
1753 1753 } else {
1754 1754 // can't do anything useful in the delay slot
1755 1755 __ delayed()->nop();
1756 1756 }
1757 1757 if (opr2->is_constant()) {
1758 1758 const2reg(opr2, result, lir_patch_none, NULL);
1759 1759 } else if (opr2->is_register()) {
1760 1760 reg2reg(opr2, result);
1761 1761 } else if (opr2->is_stack()) {
1762 1762 stack2reg(opr2, result, result->type());
1763 1763 } else {
1764 1764 ShouldNotReachHere();
1765 1765 }
1766 1766 __ bind(skip);
1767 1767 }
1768 1768
1769 1769
1770 1770 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1771 1771 assert(info == NULL, "unused on this code path");
1772 1772 assert(left->is_register(), "wrong items state");
1773 1773 assert(dest->is_register(), "wrong items state");
1774 1774
1775 1775 if (right->is_register()) {
1776 1776 if (dest->is_float_kind()) {
1777 1777
1778 1778 FloatRegister lreg, rreg, res;
1779 1779 FloatRegisterImpl::Width w;
1780 1780 if (right->is_single_fpu()) {
1781 1781 w = FloatRegisterImpl::S;
1782 1782 lreg = left->as_float_reg();
1783 1783 rreg = right->as_float_reg();
1784 1784 res = dest->as_float_reg();
1785 1785 } else {
1786 1786 w = FloatRegisterImpl::D;
1787 1787 lreg = left->as_double_reg();
1788 1788 rreg = right->as_double_reg();
1789 1789 res = dest->as_double_reg();
1790 1790 }
1791 1791
1792 1792 switch (code) {
1793 1793 case lir_add: __ fadd(w, lreg, rreg, res); break;
1794 1794 case lir_sub: __ fsub(w, lreg, rreg, res); break;
1795 1795 case lir_mul: // fall through
1796 1796 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1797 1797 case lir_div: // fall through
1798 1798 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1799 1799 default: ShouldNotReachHere();
1800 1800 }
1801 1801
1802 1802 } else if (dest->is_double_cpu()) {
1803 1803 #ifdef _LP64
1804 1804 Register dst_lo = dest->as_register_lo();
1805 1805 Register op1_lo = left->as_pointer_register();
1806 1806 Register op2_lo = right->as_pointer_register();
1807 1807
1808 1808 switch (code) {
1809 1809 case lir_add:
1810 1810 __ add(op1_lo, op2_lo, dst_lo);
1811 1811 break;
1812 1812
1813 1813 case lir_sub:
1814 1814 __ sub(op1_lo, op2_lo, dst_lo);
1815 1815 break;
1816 1816
1817 1817 default: ShouldNotReachHere();
1818 1818 }
1819 1819 #else
1820 1820 Register op1_lo = left->as_register_lo();
1821 1821 Register op1_hi = left->as_register_hi();
1822 1822 Register op2_lo = right->as_register_lo();
1823 1823 Register op2_hi = right->as_register_hi();
1824 1824 Register dst_lo = dest->as_register_lo();
1825 1825 Register dst_hi = dest->as_register_hi();
1826 1826
1827 1827 switch (code) {
1828 1828 case lir_add:
1829 1829 __ addcc(op1_lo, op2_lo, dst_lo);
1830 1830 __ addc (op1_hi, op2_hi, dst_hi);
1831 1831 break;
1832 1832
1833 1833 case lir_sub:
1834 1834 __ subcc(op1_lo, op2_lo, dst_lo);
1835 1835 __ subc (op1_hi, op2_hi, dst_hi);
1836 1836 break;
1837 1837
1838 1838 default: ShouldNotReachHere();
1839 1839 }
1840 1840 #endif
1841 1841 } else {
1842 1842 assert (right->is_single_cpu(), "Just Checking");
1843 1843
1844 1844 Register lreg = left->as_register();
1845 1845 Register res = dest->as_register();
1846 1846 Register rreg = right->as_register();
1847 1847 switch (code) {
1848 1848 case lir_add: __ add (lreg, rreg, res); break;
1849 1849 case lir_sub: __ sub (lreg, rreg, res); break;
1850 1850 case lir_mul: __ mult (lreg, rreg, res); break;
1851 1851 default: ShouldNotReachHere();
1852 1852 }
1853 1853 }
1854 1854 } else {
1855 1855 assert (right->is_constant(), "must be constant");
1856 1856
1857 1857 if (dest->is_single_cpu()) {
1858 1858 Register lreg = left->as_register();
1859 1859 Register res = dest->as_register();
1860 1860 int simm13 = right->as_constant_ptr()->as_jint();
1861 1861
1862 1862 switch (code) {
1863 1863 case lir_add: __ add (lreg, simm13, res); break;
1864 1864 case lir_sub: __ sub (lreg, simm13, res); break;
1865 1865 case lir_mul: __ mult (lreg, simm13, res); break;
1866 1866 default: ShouldNotReachHere();
1867 1867 }
1868 1868 } else {
1869 1869 Register lreg = left->as_pointer_register();
1870 1870 Register res = dest->as_register_lo();
1871 1871 long con = right->as_constant_ptr()->as_jlong();
1872 1872 assert(Assembler::is_simm13(con), "must be simm13");
1873 1873
1874 1874 switch (code) {
1875 1875 case lir_add: __ add (lreg, (int)con, res); break;
1876 1876 case lir_sub: __ sub (lreg, (int)con, res); break;
1877 1877 case lir_mul: __ mult (lreg, (int)con, res); break;
1878 1878 default: ShouldNotReachHere();
1879 1879 }
1880 1880 }
1881 1881 }
1882 1882 }
1883 1883
1884 1884
1885 1885 void LIR_Assembler::fpop() {
1886 1886 // do nothing
1887 1887 }
1888 1888
1889 1889
1890 1890 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1891 1891 switch (code) {
1892 1892 case lir_sin:
1893 1893 case lir_tan:
1894 1894 case lir_cos: {
1895 1895 assert(thread->is_valid(), "preserve the thread object for performance reasons");
1896 1896 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1897 1897 break;
1898 1898 }
1899 1899 case lir_sqrt: {
1900 1900 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1901 1901 FloatRegister src_reg = value->as_double_reg();
1902 1902 FloatRegister dst_reg = dest->as_double_reg();
1903 1903 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1904 1904 break;
1905 1905 }
1906 1906 case lir_abs: {
1907 1907 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1908 1908 FloatRegister src_reg = value->as_double_reg();
1909 1909 FloatRegister dst_reg = dest->as_double_reg();
1910 1910 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1911 1911 break;
1912 1912 }
1913 1913 default: {
1914 1914 ShouldNotReachHere();
1915 1915 break;
1916 1916 }
1917 1917 }
1918 1918 }
1919 1919
1920 1920
1921 1921 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1922 1922 if (right->is_constant()) {
1923 1923 if (dest->is_single_cpu()) {
1924 1924 int simm13 = right->as_constant_ptr()->as_jint();
1925 1925 switch (code) {
1926 1926 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
1927 1927 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
1928 1928 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1929 1929 default: ShouldNotReachHere();
1930 1930 }
1931 1931 } else {
1932 1932 long c = right->as_constant_ptr()->as_jlong();
1933 1933 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1934 1934 int simm13 = (int)c;
1935 1935 switch (code) {
1936 1936 case lir_logic_and:
1937 1937 #ifndef _LP64
1938 1938 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
1939 1939 #endif
1940 1940 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1941 1941 break;
1942 1942
1943 1943 case lir_logic_or:
1944 1944 #ifndef _LP64
1945 1945 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
1946 1946 #endif
1947 1947 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1948 1948 break;
1949 1949
1950 1950 case lir_logic_xor:
1951 1951 #ifndef _LP64
1952 1952 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
1953 1953 #endif
1954 1954 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
1955 1955 break;
1956 1956
1957 1957 default: ShouldNotReachHere();
1958 1958 }
1959 1959 }
1960 1960 } else {
1961 1961 assert(right->is_register(), "right should be in register");
1962 1962
1963 1963 if (dest->is_single_cpu()) {
1964 1964 switch (code) {
1965 1965 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
1966 1966 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
1967 1967 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
1968 1968 default: ShouldNotReachHere();
1969 1969 }
1970 1970 } else {
1971 1971 #ifdef _LP64
1972 1972 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1973 1973 left->as_register_lo();
1974 1974 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1975 1975 right->as_register_lo();
1976 1976
1977 1977 switch (code) {
1978 1978 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
1979 1979 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
1980 1980 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
1981 1981 default: ShouldNotReachHere();
1982 1982 }
1983 1983 #else
1984 1984 switch (code) {
1985 1985 case lir_logic_and:
1986 1986 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1987 1987 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1988 1988 break;
1989 1989
1990 1990 case lir_logic_or:
1991 1991 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1992 1992 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1993 1993 break;
1994 1994
1995 1995 case lir_logic_xor:
1996 1996 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
1997 1997 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
1998 1998 break;
1999 1999
2000 2000 default: ShouldNotReachHere();
2001 2001 }
2002 2002 #endif
2003 2003 }
2004 2004 }
2005 2005 }
2006 2006
2007 2007
2008 2008 int LIR_Assembler::shift_amount(BasicType t) {
2009 2009 int elem_size = type2aelembytes(t);
2010 2010 switch (elem_size) {
2011 2011 case 1 : return 0;
2012 2012 case 2 : return 1;
2013 2013 case 4 : return 2;
2014 2014 case 8 : return 3;
2015 2015 }
2016 2016 ShouldNotReachHere();
2017 2017 return -1;
2018 2018 }
2019 2019
2020 2020
2021 2021 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2022 2022 assert(exceptionOop->as_register() == Oexception, "should match");
2023 2023 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
2024 2024
2025 2025 info->add_register_oop(exceptionOop);
2026 2026
2027 2027 // reuse the debug info from the safepoint poll for the throw op itself
2028 2028 address pc_for_athrow = __ pc();
2029 2029 int pc_for_athrow_offset = __ offset();
2030 2030 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2031 2031 __ set(pc_for_athrow, Oissuing_pc, rspec);
2032 2032 add_call_info(pc_for_athrow_offset, info); // for exception handler
2033 2033
2034 2034 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2035 2035 __ delayed()->nop();
2036 2036 }
2037 2037
2038 2038
2039 2039 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2040 2040 assert(exceptionOop->as_register() == Oexception, "should match");
2041 2041
2042 2042 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
2043 2043 __ delayed()->nop();
2044 2044 }
2045 2045
2046 2046
2047 2047 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2048 2048 Register src = op->src()->as_register();
2049 2049 Register dst = op->dst()->as_register();
2050 2050 Register src_pos = op->src_pos()->as_register();
2051 2051 Register dst_pos = op->dst_pos()->as_register();
2052 2052 Register length = op->length()->as_register();
2053 2053 Register tmp = op->tmp()->as_register();
2054 2054 Register tmp2 = O7;
2055 2055
2056 2056 int flags = op->flags();
2057 2057 ciArrayKlass* default_type = op->expected_type();
2058 2058 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2059 2059 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2060 2060
2061 2061 // set up the arraycopy stub information
2062 2062 ArrayCopyStub* stub = op->stub();
2063 2063
2064 2064 // always do stub if no type information is available. it's ok if
2065 2065 // the known type isn't loaded since the code sanity checks
2066 2066 // in debug mode and the type isn't required when we know the exact type
2067 2067 // also check that the type is an array type.
2068 2068 // We also, for now, always call the stub if the barrier set requires a
2069 2069 // write_ref_pre barrier (which the stub does, but none of the optimized
2070 2070 // cases currently does).
2071 2071 if (op->expected_type() == NULL ||
2072 2072 Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
2073 2073 __ mov(src, O0);
2074 2074 __ mov(src_pos, O1);
2075 2075 __ mov(dst, O2);
2076 2076 __ mov(dst_pos, O3);
2077 2077 __ mov(length, O4);
2078 2078 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2079 2079
2080 2080 __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
2081 2081 __ delayed()->nop();
2082 2082 __ bind(*stub->continuation());
2083 2083 return;
2084 2084 }
2085 2085
2086 2086 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2087 2087
2088 2088 // make sure src and dst are non-null and load array length
2089 2089 if (flags & LIR_OpArrayCopy::src_null_check) {
2090 2090 __ tst(src);
2091 2091 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2092 2092 __ delayed()->nop();
2093 2093 }
2094 2094
2095 2095 if (flags & LIR_OpArrayCopy::dst_null_check) {
2096 2096 __ tst(dst);
2097 2097 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
2098 2098 __ delayed()->nop();
2099 2099 }
2100 2100
2101 2101 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2102 2102 // test src_pos register
2103 2103 __ tst(src_pos);
2104 2104 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2105 2105 __ delayed()->nop();
2106 2106 }
2107 2107
2108 2108 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2109 2109 // test dst_pos register
2110 2110 __ tst(dst_pos);
2111 2111 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2112 2112 __ delayed()->nop();
2113 2113 }
2114 2114
2115 2115 if (flags & LIR_OpArrayCopy::length_positive_check) {
2116 2116 // make sure length isn't negative
2117 2117 __ tst(length);
2118 2118 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2119 2119 __ delayed()->nop();
2120 2120 }
2121 2121
2122 2122 if (flags & LIR_OpArrayCopy::src_range_check) {
2123 2123 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2124 2124 __ add(length, src_pos, tmp);
2125 2125 __ cmp(tmp2, tmp);
2126 2126 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2127 2127 __ delayed()->nop();
2128 2128 }
2129 2129
2130 2130 if (flags & LIR_OpArrayCopy::dst_range_check) {
2131 2131 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2132 2132 __ add(length, dst_pos, tmp);
2133 2133 __ cmp(tmp2, tmp);
2134 2134 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2135 2135 __ delayed()->nop();
2136 2136 }
2137 2137
2138 2138 if (flags & LIR_OpArrayCopy::type_check) {
2139 2139 if (UseCompressedOops) {
2140 2140 // We don't need decode because we just need to compare
2141 2141 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
2142 2142 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2143 2143 __ cmp(tmp, tmp2);
2144 2144 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2145 2145 } else {
2146 2146 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2147 2147 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2148 2148 __ cmp(tmp, tmp2);
2149 2149 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2150 2150 }
2151 2151 __ delayed()->nop();
2152 2152 }
2153 2153
2154 2154 #ifdef ASSERT
2155 2155 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2156 2156 // Sanity check the known type with the incoming class. For the
2157 2157 // primitive case the types must match exactly with src.klass and
2158 2158 // dst.klass each exactly matching the default type. For the
2159 2159 // object array case, if no type check is needed then either the
2160 2160 // dst type is exactly the expected type and the src type is a
2161 2161 // subtype which we can't check or src is the same array as dst
2162 2162 // but not necessarily exactly of type default_type.
2163 2163 Label known_ok, halt;
2164 2164 jobject2reg(op->expected_type()->constant_encoding(), tmp);
2165 2165 if (UseCompressedOops) {
2166 2166 // tmp holds the default type. It currently comes uncompressed after the
2167 2167 // load of a constant, so encode it.
2168 2168 __ encode_heap_oop(tmp);
2169 2169 // load the raw value of the dst klass, since we will be comparing
2170 2170 // uncompressed values directly.
2171 2171 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2172 2172 if (basic_type != T_OBJECT) {
2173 2173 __ cmp(tmp, tmp2);
2174 2174 __ br(Assembler::notEqual, false, Assembler::pn, halt);
2175 2175 // load the raw value of the src klass.
2176 2176 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
2177 2177 __ cmp(tmp, tmp2);
2178 2178 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2179 2179 __ delayed()->nop();
2180 2180 } else {
2181 2181 __ cmp(tmp, tmp2);
2182 2182 __ br(Assembler::equal, false, Assembler::pn, known_ok);
2183 2183 __ delayed()->cmp(src, dst);
2184 2184 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2185 2185 __ delayed()->nop();
2186 2186 }
2187 2187 } else {
2188 2188 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2189 2189 if (basic_type != T_OBJECT) {
2190 2190 __ cmp(tmp, tmp2);
2191 2191 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
2192 2192 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2193 2193 __ cmp(tmp, tmp2);
2194 2194 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2195 2195 __ delayed()->nop();
2196 2196 } else {
2197 2197 __ cmp(tmp, tmp2);
2198 2198 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2199 2199 __ delayed()->cmp(src, dst);
2200 2200 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
2201 2201 __ delayed()->nop();
2202 2202 }
2203 2203 }
2204 2204 __ bind(halt);
2205 2205 __ stop("incorrect type information in arraycopy");
2206 2206 __ bind(known_ok);
2207 2207 }
2208 2208 #endif
2209 2209
2210 2210 int shift = shift_amount(basic_type);
2211 2211
2212 2212 Register src_ptr = O0;
2213 2213 Register dst_ptr = O1;
2214 2214 Register len = O2;
2215 2215
2216 2216 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2217 2217 LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
2218 2218 if (shift == 0) {
2219 2219 __ add(src_ptr, src_pos, src_ptr);
2220 2220 } else {
2221 2221 __ sll(src_pos, shift, tmp);
2222 2222 __ add(src_ptr, tmp, src_ptr);
2223 2223 }
2224 2224
2225 2225 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2226 2226 LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
2227 2227 if (shift == 0) {
2228 2228 __ add(dst_ptr, dst_pos, dst_ptr);
2229 2229 } else {
2230 2230 __ sll(dst_pos, shift, tmp);
2231 2231 __ add(dst_ptr, tmp, dst_ptr);
2232 2232 }
2233 2233
2234 2234 if (basic_type != T_OBJECT) {
2235 2235 if (shift == 0) {
2236 2236 __ mov(length, len);
2237 2237 } else {
2238 2238 __ sll(length, shift, len);
2239 2239 }
2240 2240 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
2241 2241 } else {
2242 2242 // oop_arraycopy takes a length in number of elements, so don't scale it.
2243 2243 __ mov(length, len);
2244 2244 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
2245 2245 }
2246 2246
2247 2247 __ bind(*stub->continuation());
2248 2248 }
2249 2249
2250 2250
2251 2251 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2252 2252 if (dest->is_single_cpu()) {
2253 2253 #ifdef _LP64
2254 2254 if (left->type() == T_OBJECT) {
2255 2255 switch (code) {
2256 2256 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
2257 2257 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
2258 2258 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2259 2259 default: ShouldNotReachHere();
2260 2260 }
2261 2261 } else
2262 2262 #endif
2263 2263 switch (code) {
2264 2264 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
2265 2265 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
2266 2266 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
2267 2267 default: ShouldNotReachHere();
2268 2268 }
2269 2269 } else {
2270 2270 #ifdef _LP64
2271 2271 switch (code) {
2272 2272 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2273 2273 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2274 2274 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2275 2275 default: ShouldNotReachHere();
2276 2276 }
2277 2277 #else
2278 2278 switch (code) {
2279 2279 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2280 2280 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2281 2281 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2282 2282 default: ShouldNotReachHere();
2283 2283 }
2284 2284 #endif
2285 2285 }
2286 2286 }
2287 2287
2288 2288
2289 2289 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2290 2290 #ifdef _LP64
2291 2291 if (left->type() == T_OBJECT) {
2292 2292 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
2293 2293 Register l = left->as_register();
2294 2294 Register d = dest->as_register_lo();
2295 2295 switch (code) {
2296 2296 case lir_shl: __ sllx (l, count, d); break;
2297 2297 case lir_shr: __ srax (l, count, d); break;
2298 2298 case lir_ushr: __ srlx (l, count, d); break;
2299 2299 default: ShouldNotReachHere();
2300 2300 }
2301 2301 return;
2302 2302 }
2303 2303 #endif
2304 2304
2305 2305 if (dest->is_single_cpu()) {
2306 2306 count = count & 0x1F; // Java spec
2307 2307 switch (code) {
2308 2308 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
2309 2309 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
2310 2310 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
2311 2311 default: ShouldNotReachHere();
2312 2312 }
2313 2313 } else if (dest->is_double_cpu()) {
2314 2314 count = count & 63; // Java spec
2315 2315 switch (code) {
2316 2316 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2317 2317 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2318 2318 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2319 2319 default: ShouldNotReachHere();
2320 2320 }
2321 2321 } else {
2322 2322 ShouldNotReachHere();
2323 2323 }
2324 2324 }
2325 2325
2326 2326
2327 2327 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2328 2328 assert(op->tmp1()->as_register() == G1 &&
2329 2329 op->tmp2()->as_register() == G3 &&
2330 2330 op->tmp3()->as_register() == G4 &&
2331 2331 op->obj()->as_register() == O0 &&
2332 2332 op->klass()->as_register() == G5, "must be");
2333 2333 if (op->init_check()) {
2334 2334 __ ld(op->klass()->as_register(),
2335 2335 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
2336 2336 op->tmp1()->as_register());
2337 2337 add_debug_info_for_null_check_here(op->stub()->info());
2338 2338 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
2339 2339 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2340 2340 __ delayed()->nop();
2341 2341 }
2342 2342 __ allocate_object(op->obj()->as_register(),
2343 2343 op->tmp1()->as_register(),
2344 2344 op->tmp2()->as_register(),
2345 2345 op->tmp3()->as_register(),
2346 2346 op->header_size(),
2347 2347 op->object_size(),
2348 2348 op->klass()->as_register(),
2349 2349 *op->stub()->entry());
2350 2350 __ bind(*op->stub()->continuation());
2351 2351 __ verify_oop(op->obj()->as_register());
2352 2352 }
2353 2353
2354 2354
2355 2355 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2356 2356 assert(op->tmp1()->as_register() == G1 &&
2357 2357 op->tmp2()->as_register() == G3 &&
2358 2358 op->tmp3()->as_register() == G4 &&
2359 2359 op->tmp4()->as_register() == O1 &&
2360 2360 op->klass()->as_register() == G5, "must be");
2361 2361
2362 2362 LP64_ONLY( __ signx(op->len()->as_register()); )
2363 2363 if (UseSlowPath ||
2364 2364 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2365 2365 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2366 2366 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2367 2367 __ delayed()->nop();
2368 2368 } else {
2369 2369 __ allocate_array(op->obj()->as_register(),
2370 2370 op->len()->as_register(),
2371 2371 op->tmp1()->as_register(),
2372 2372 op->tmp2()->as_register(),
2373 2373 op->tmp3()->as_register(),
2374 2374 arrayOopDesc::header_size(op->type()),
2375 2375 type2aelembytes(op->type()),
2376 2376 op->klass()->as_register(),
2377 2377 *op->stub()->entry());
2378 2378 }
2379 2379 __ bind(*op->stub()->continuation());
2380 2380 }
2381 2381
2382 2382
2383 2383 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2384 2384 ciMethodData *md, ciProfileData *data,
2385 2385 Register recv, Register tmp1, Label* update_done) {
2386 2386 uint i;
2387 2387 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2388 2388 Label next_test;
2389 2389 // See if the receiver is receiver[n].
2390 2390 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2391 2391 mdo_offset_bias);
2392 2392 __ ld_ptr(receiver_addr, tmp1);
2393 2393 __ verify_oop(tmp1);
2394 2394 __ cmp(recv, tmp1);
2395 2395 __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
2396 2396 __ delayed()->nop();
2397 2397 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2398 2398 mdo_offset_bias);
2399 2399 __ ld_ptr(data_addr, tmp1);
2400 2400 __ add(tmp1, DataLayout::counter_increment, tmp1);
2401 2401 __ st_ptr(tmp1, data_addr);
2402 2402 __ ba(false, *update_done);
2403 2403 __ delayed()->nop();
2404 2404 __ bind(next_test);
2405 2405 }
2406 2406
2407 2407 // Didn't find receiver; find next empty slot and fill it in
2408 2408 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2409 2409 Label next_test;
2410 2410 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
2411 2411 mdo_offset_bias);
2412 2412 __ ld_ptr(recv_addr, tmp1);
2413 2413 __ br_notnull(tmp1, false, Assembler::pt, next_test);
2414 2414 __ delayed()->nop();
2415 2415 __ st_ptr(recv, recv_addr);
2416 2416 __ set(DataLayout::counter_increment, tmp1);
2417 2417 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
2418 2418 mdo_offset_bias);
2419 2419 __ ba(false, *update_done);
2420 2420 __ delayed()->nop();
2421 2421 __ bind(next_test);
2422 2422 }
2423 2423 }
2424 2424
2425 2425
2426 2426 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2427 2427 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2428 2428 md = method->method_data_or_null();
2429 2429 assert(md != NULL, "Sanity");
2430 2430 data = md->bci_to_data(bci);
2431 2431 assert(data != NULL, "need data for checkcast");
2432 2432 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2433 2433 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2434 2434 // The offset is large so bias the mdo by the base of the slot so
2435 2435 // that the ld can use simm13s to reference the slots of the data
2436 2436 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2437 2437 }
2438 2438 }
2439 2439
2440 2440 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2441 2441 // we always need a stub for the failure case.
2442 2442 CodeStub* stub = op->stub();
2443 2443 Register obj = op->object()->as_register();
2444 2444 Register k_RInfo = op->tmp1()->as_register();
2445 2445 Register klass_RInfo = op->tmp2()->as_register();
2446 2446 Register dst = op->result_opr()->as_register();
2447 2447 Register Rtmp1 = op->tmp3()->as_register();
2448 2448 ciKlass* k = op->klass();
2449 2449
2450 2450
2451 2451 if (obj == k_RInfo) {
2452 2452 k_RInfo = klass_RInfo;
2453 2453 klass_RInfo = obj;
2454 2454 }
2455 2455
2456 2456 ciMethodData* md;
2457 2457 ciProfileData* data;
2458 2458 int mdo_offset_bias = 0;
2459 2459 if (op->should_profile()) {
2460 2460 ciMethod* method = op->profiled_method();
2461 2461 assert(method != NULL, "Should have method");
2462 2462 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2463 2463
2464 2464 Label not_null;
2465 2465 __ br_notnull(obj, false, Assembler::pn, not_null);
2466 2466 __ delayed()->nop();
2467 2467 Register mdo = k_RInfo;
2468 2468 Register data_val = Rtmp1;
2469 2469 jobject2reg(md->constant_encoding(), mdo);
2470 2470 if (mdo_offset_bias > 0) {
2471 2471 __ set(mdo_offset_bias, data_val);
2472 2472 __ add(mdo, data_val, mdo);
2473 2473 }
2474 2474 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2475 2475 __ ldub(flags_addr, data_val);
2476 2476 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2477 2477 __ stb(data_val, flags_addr);
2478 2478 __ ba(false, *obj_is_null);
2479 2479 __ delayed()->nop();
2480 2480 __ bind(not_null);
2481 2481 } else {
2482 2482 __ br_null(obj, false, Assembler::pn, *obj_is_null);
2483 2483 __ delayed()->nop();
2484 2484 }
2485 2485
2486 2486 Label profile_cast_failure, profile_cast_success;
2487 2487 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2488 2488 Label *success_target = op->should_profile() ? &profile_cast_success : success;
2489 2489
2490 2490 // patching may screw with our temporaries on sparc,
2491 2491 // so let's do it before loading the class
2492 2492 if (k->is_loaded()) {
2493 2493 jobject2reg(k->constant_encoding(), k_RInfo);
2494 2494 } else {
2495 2495 jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2496 2496 }
2497 2497 assert(obj != k_RInfo, "must be different");
2498 2498
2499 2499 // get object class
2500 2500 // not a safepoint as obj null check happens earlier
2501 2501 __ load_klass(obj, klass_RInfo);
2502 2502 if (op->fast_check()) {
2503 2503 assert_different_registers(klass_RInfo, k_RInfo);
2504 2504 __ cmp(k_RInfo, klass_RInfo);
2505 2505 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
2506 2506 __ delayed()->nop();
2507 2507 } else {
2508 2508 bool need_slow_path = true;
2509 2509 if (k->is_loaded()) {
2510 2510 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
2511 2511 need_slow_path = false;
2512 2512 // perform the fast part of the checking logic
2513 2513 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2514 2514 (need_slow_path ? success_target : NULL),
2515 2515 failure_target, NULL,
2516 2516 RegisterOrConstant(k->super_check_offset()));
2517 2517 } else {
2518 2518 // perform the fast part of the checking logic
2519 2519 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
2520 2520 failure_target, NULL);
2521 2521 }
2522 2522 if (need_slow_path) {
2523 2523 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2524 2524 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2525 2525 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2526 2526 __ delayed()->nop();
2527 2527 __ cmp(G3, 0);
2528 2528 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2529 2529 __ delayed()->nop();
2530 2530 // Fall through to success case
2531 2531 }
2532 2532 }
2533 2533
2534 2534 if (op->should_profile()) {
2535 2535 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2536 2536 assert_different_registers(obj, mdo, recv, tmp1);
2537 2537 __ bind(profile_cast_success);
2538 2538 jobject2reg(md->constant_encoding(), mdo);
2539 2539 if (mdo_offset_bias > 0) {
2540 2540 __ set(mdo_offset_bias, tmp1);
2541 2541 __ add(mdo, tmp1, mdo);
2542 2542 }
2543 2543 __ load_klass(obj, recv);
2544 2544 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
2545 2545 // Jump over the failure case
2546 2546 __ ba(false, *success);
2547 2547 __ delayed()->nop();
2548 2548 // Cast failure case
2549 2549 __ bind(profile_cast_failure);
2550 2550 jobject2reg(md->constant_encoding(), mdo);
2551 2551 if (mdo_offset_bias > 0) {
2552 2552 __ set(mdo_offset_bias, tmp1);
2553 2553 __ add(mdo, tmp1, mdo);
2554 2554 }
2555 2555 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2556 2556 __ ld_ptr(data_addr, tmp1);
2557 2557 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2558 2558 __ st_ptr(tmp1, data_addr);
2559 2559 __ ba(false, *failure);
2560 2560 __ delayed()->nop();
2561 2561 }
2562 2562 __ ba(false, *success);
2563 2563 __ delayed()->nop();
2564 2564 }
2565 2565
2566 2566 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2567 2567 LIR_Code code = op->code();
2568 2568 if (code == lir_store_check) {
2569 2569 Register value = op->object()->as_register();
2570 2570 Register array = op->array()->as_register();
2571 2571 Register k_RInfo = op->tmp1()->as_register();
2572 2572 Register klass_RInfo = op->tmp2()->as_register();
2573 2573 Register Rtmp1 = op->tmp3()->as_register();
2574 2574
2575 2575 __ verify_oop(value);
2576 2576 CodeStub* stub = op->stub();
2577 2577 // check if it needs to be profiled
2578 2578 ciMethodData* md;
2579 2579 ciProfileData* data;
2580 2580 int mdo_offset_bias = 0;
2581 2581 if (op->should_profile()) {
2582 2582 ciMethod* method = op->profiled_method();
2583 2583 assert(method != NULL, "Should have method");
2584 2584 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2585 2585 }
2586 2586 Label profile_cast_success, profile_cast_failure, done;
2587 2587 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2588 2588 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2589 2589
2590 2590 if (op->should_profile()) {
2591 2591 Label not_null;
2592 2592 __ br_notnull(value, false, Assembler::pn, not_null);
2593 2593 __ delayed()->nop();
2594 2594 Register mdo = k_RInfo;
2595 2595 Register data_val = Rtmp1;
2596 2596 jobject2reg(md->constant_encoding(), mdo);
2597 2597 if (mdo_offset_bias > 0) {
2598 2598 __ set(mdo_offset_bias, data_val);
2599 2599 __ add(mdo, data_val, mdo);
2600 2600 }
2601 2601 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2602 2602 __ ldub(flags_addr, data_val);
2603 2603 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2604 2604 __ stb(data_val, flags_addr);
2605 2605 __ ba(false, done);
2606 2606 __ delayed()->nop();
2607 2607 __ bind(not_null);
2608 2608 } else {
2609 2609 __ br_null(value, false, Assembler::pn, done);
2610 2610 __ delayed()->nop();
2611 2611 }
2612 2612 add_debug_info_for_null_check_here(op->info_for_exception());
2613 2613 __ load_klass(array, k_RInfo);
2614 2614 __ load_klass(value, klass_RInfo);
2615 2615
2616 2616 // get instance klass
2617 2617 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
2618 2618 // perform the fast part of the checking logic
2619 2619 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
2620 2620
2621 2621 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2622 2622 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2623 2623 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2624 2624 __ delayed()->nop();
2625 2625 __ cmp(G3, 0);
2626 2626 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
2627 2627 __ delayed()->nop();
2628 2628 // fall through to the success case
2629 2629
2630 2630 if (op->should_profile()) {
2631 2631 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2632 2632 assert_different_registers(value, mdo, recv, tmp1);
2633 2633 __ bind(profile_cast_success);
2634 2634 jobject2reg(md->constant_encoding(), mdo);
2635 2635 if (mdo_offset_bias > 0) {
2636 2636 __ set(mdo_offset_bias, tmp1);
2637 2637 __ add(mdo, tmp1, mdo);
2638 2638 }
2639 2639 __ load_klass(value, recv);
2640 2640 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2641 2641 __ ba(false, done);
2642 2642 __ delayed()->nop();
2643 2643 // Cast failure case
2644 2644 __ bind(profile_cast_failure);
2645 2645 jobject2reg(md->constant_encoding(), mdo);
2646 2646 if (mdo_offset_bias > 0) {
2647 2647 __ set(mdo_offset_bias, tmp1);
2648 2648 __ add(mdo, tmp1, mdo);
2649 2649 }
2650 2650 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2651 2651 __ ld_ptr(data_addr, tmp1);
2652 2652 __ sub(tmp1, DataLayout::counter_increment, tmp1);
2653 2653 __ st_ptr(tmp1, data_addr);
2654 2654 __ ba(false, *stub->entry());
2655 2655 __ delayed()->nop();
2656 2656 }
2657 2657 __ bind(done);
2658 2658 } else if (code == lir_checkcast) {
2659 2659 Register obj = op->object()->as_register();
2660 2660 Register dst = op->result_opr()->as_register();
2661 2661 Label success;
2662 2662 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2663 2663 __ bind(success);
2664 2664 __ mov(obj, dst);
2665 2665 } else if (code == lir_instanceof) {
2666 2666 Register obj = op->object()->as_register();
2667 2667 Register dst = op->result_opr()->as_register();
2668 2668 Label success, failure, done;
2669 2669 emit_typecheck_helper(op, &success, &failure, &failure);
2670 2670 __ bind(failure);
2671 2671 __ set(0, dst);
2672 2672 __ ba(false, done);
2673 2673 __ delayed()->nop();
2674 2674 __ bind(success);
2675 2675 __ set(1, dst);
2676 2676 __ bind(done);
2677 2677 } else {
2678 2678 ShouldNotReachHere();
2679 2679 }
2680 2680
2681 2681 }
2682 2682
2683 2683
2684 2684 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2685 2685 if (op->code() == lir_cas_long) {
2686 2686 assert(VM_Version::supports_cx8(), "wrong machine");
2687 2687 Register addr = op->addr()->as_pointer_register();
2688 2688 Register cmp_value_lo = op->cmp_value()->as_register_lo();
2689 2689 Register cmp_value_hi = op->cmp_value()->as_register_hi();
2690 2690 Register new_value_lo = op->new_value()->as_register_lo();
2691 2691 Register new_value_hi = op->new_value()->as_register_hi();
2692 2692 Register t1 = op->tmp1()->as_register();
2693 2693 Register t2 = op->tmp2()->as_register();
2694 2694 #ifdef _LP64
2695 2695 __ mov(cmp_value_lo, t1);
2696 2696 __ mov(new_value_lo, t2);
2697 2697 // perform the compare and swap operation
2698 2698 __ casx(addr, t1, t2);
2699 2699 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2700 2700 // overwritten with the original value in "addr" and will be equal to t1.
2701 2701 __ cmp(t1, t2);
2702 2702 #else
2703 2703 // move high and low halves of long values into single registers
2704 2704 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
2705 2705 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2706 2706 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
2707 2707 __ sllx(new_value_hi, 32, t2);
2708 2708 __ srl(new_value_lo, 0, new_value_lo);
2709 2709 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
2710 2710 // perform the compare and swap operation
2711 2711 __ casx(addr, t1, t2);
2712 2712 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2713 2713 // overwritten with the original value in "addr" and will be equal to t1.
2714 2714 // Produce icc flag for 32bit.
2715 2715 __ sub(t1, t2, t2);
2716 2716 __ srlx(t2, 32, t1);
2717 2717 __ orcc(t2, t1, G0);
2718 2718 #endif
2719 2719 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2720 2720 Register addr = op->addr()->as_pointer_register();
2721 2721 Register cmp_value = op->cmp_value()->as_register();
2722 2722 Register new_value = op->new_value()->as_register();
2723 2723 Register t1 = op->tmp1()->as_register();
2724 2724 Register t2 = op->tmp2()->as_register();
2725 2725 __ mov(cmp_value, t1);
2726 2726 __ mov(new_value, t2);
2727 2727 if (op->code() == lir_cas_obj) {
2728 2728 if (UseCompressedOops) {
2729 2729 __ encode_heap_oop(t1);
2730 2730 __ encode_heap_oop(t2);
2731 2731 __ cas(addr, t1, t2);
2732 2732 } else {
2733 2733 __ cas_ptr(addr, t1, t2);
2734 2734 }
2735 2735 } else {
2736 2736 __ cas(addr, t1, t2);
2737 2737 }
2738 2738 __ cmp(t1, t2);
2739 2739 } else {
2740 2740 Unimplemented();
2741 2741 }
2742 2742 }
2743 2743
2744 2744 void LIR_Assembler::set_24bit_FPU() {
2745 2745 Unimplemented();
2746 2746 }
2747 2747
2748 2748
2749 2749 void LIR_Assembler::reset_FPU() {
2750 2750 Unimplemented();
2751 2751 }
2752 2752
2753 2753
2754 2754 void LIR_Assembler::breakpoint() {
2755 2755 __ breakpoint_trap();
2756 2756 }
2757 2757
2758 2758
2759 2759 void LIR_Assembler::push(LIR_Opr opr) {
2760 2760 Unimplemented();
2761 2761 }
2762 2762
2763 2763
2764 2764 void LIR_Assembler::pop(LIR_Opr opr) {
2765 2765 Unimplemented();
2766 2766 }
2767 2767
2768 2768
2769 2769 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2770 2770 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2771 2771 Register dst = dst_opr->as_register();
2772 2772 Register reg = mon_addr.base();
2773 2773 int offset = mon_addr.disp();
2774 2774 // compute pointer to BasicLock
2775 2775 if (mon_addr.is_simm13()) {
2776 2776 __ add(reg, offset, dst);
2777 2777 } else {
2778 2778 __ set(offset, dst);
2779 2779 __ add(dst, reg, dst);
2780 2780 }
2781 2781 }
2782 2782
2783 2783
2784 2784 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2785 2785 Register obj = op->obj_opr()->as_register();
2786 2786 Register hdr = op->hdr_opr()->as_register();
2787 2787 Register lock = op->lock_opr()->as_register();
2788 2788
2789 2789 // obj may not be an oop
2790 2790 if (op->code() == lir_lock) {
2791 2791 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2792 2792 if (UseFastLocking) {
2793 2793 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2794 2794 // add debug info for NullPointerException only if one is possible
2795 2795 if (op->info() != NULL) {
2796 2796 add_debug_info_for_null_check_here(op->info());
2797 2797 }
2798 2798 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2799 2799 } else {
2800 2800 // always do slow locking
2801 2801 // note: the slow locking code could be inlined here, however if we use
2802 2802 // slow locking, speed doesn't matter anyway and this solution is
2803 2803 // simpler and requires less duplicated code - additionally, the
2804 2804 // slow locking code is the same in either case which simplifies
2805 2805 // debugging
2806 2806 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2807 2807 __ delayed()->nop();
2808 2808 }
2809 2809 } else {
2810 2810 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2811 2811 if (UseFastLocking) {
2812 2812 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2813 2813 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2814 2814 } else {
2815 2815 // always do slow unlocking
2816 2816 // note: the slow unlocking code could be inlined here, however if we use
2817 2817 // slow unlocking, speed doesn't matter anyway and this solution is
2818 2818 // simpler and requires less duplicated code - additionally, the
2819 2819 // slow unlocking code is the same in either case which simplifies
2820 2820 // debugging
2821 2821 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2822 2822 __ delayed()->nop();
2823 2823 }
2824 2824 }
2825 2825 __ bind(*op->stub()->continuation());
2826 2826 }
2827 2827
2828 2828
2829 2829 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2830 2830 ciMethod* method = op->profiled_method();
2831 2831 int bci = op->profiled_bci();
2832 2832
2833 2833 // Update counter for all call types
2834 2834 ciMethodData* md = method->method_data_or_null();
2835 2835 assert(md != NULL, "Sanity");
2836 2836 ciProfileData* data = md->bci_to_data(bci);
2837 2837 assert(data->is_CounterData(), "need CounterData for calls");
2838 2838 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2839 2839 Register mdo = op->mdo()->as_register();
2840 2840 #ifdef _LP64
2841 2841 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2842 2842 Register tmp1 = op->tmp1()->as_register_lo();
2843 2843 #else
2844 2844 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2845 2845 Register tmp1 = op->tmp1()->as_register();
2846 2846 #endif
2847 2847 jobject2reg(md->constant_encoding(), mdo);
2848 2848 int mdo_offset_bias = 0;
2849 2849 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2850 2850 data->size_in_bytes())) {
2851 2851 // The offset is large so bias the mdo by the base of the slot so
2852 2852 // that the ld can use simm13s to reference the slots of the data
2853 2853 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2854 2854 __ set(mdo_offset_bias, O7);
2855 2855 __ add(mdo, O7, mdo);
2856 2856 }
2857 2857
2858 2858 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2859 2859 Bytecodes::Code bc = method->java_code_at_bci(bci);
2860 2860 // Perform additional virtual call profiling for invokevirtual and
2861 2861 // invokeinterface bytecodes
2862 2862 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2863 2863 C1ProfileVirtualCalls) {
2864 2864 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2865 2865 Register recv = op->recv()->as_register();
2866 2866 assert_different_registers(mdo, tmp1, recv);
2867 2867 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2868 2868 ciKlass* known_klass = op->known_holder();
2869 2869 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2870 2870 // We know the type that will be seen at this call site; we can
2871 2871 // statically update the methodDataOop rather than needing to do
2872 2872 // dynamic tests on the receiver type
2873 2873
2874 2874 // NOTE: we should probably put a lock around this search to
2875 2875 // avoid collisions by concurrent compilations
2876 2876 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2877 2877 uint i;
2878 2878 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2879 2879 ciKlass* receiver = vc_data->receiver(i);
2880 2880 if (known_klass->equals(receiver)) {
2881 2881 Address data_addr(mdo, md->byte_offset_of_slot(data,
2882 2882 VirtualCallData::receiver_count_offset(i)) -
2883 2883 mdo_offset_bias);
2884 2884 __ ld_ptr(data_addr, tmp1);
2885 2885 __ add(tmp1, DataLayout::counter_increment, tmp1);
2886 2886 __ st_ptr(tmp1, data_addr);
2887 2887 return;
2888 2888 }
2889 2889 }
2890 2890
2891 2891 // Receiver type not found in profile data; select an empty slot
2892 2892
2893 2893 // Note that this is less efficient than it should be because it
2894 2894 // always does a write to the receiver part of the
2895 2895 // VirtualCallData rather than just the first time
2896 2896 for (i = 0; i < VirtualCallData::row_limit(); i++) {
2897 2897 ciKlass* receiver = vc_data->receiver(i);
2898 2898 if (receiver == NULL) {
2899 2899 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2900 2900 mdo_offset_bias);
2901 2901 jobject2reg(known_klass->constant_encoding(), tmp1);
2902 2902 __ st_ptr(tmp1, recv_addr);
2903 2903 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2904 2904 mdo_offset_bias);
2905 2905 __ ld_ptr(data_addr, tmp1);
2906 2906 __ add(tmp1, DataLayout::counter_increment, tmp1);
2907 2907 __ st_ptr(tmp1, data_addr);
2908 2908 return;
2909 2909 }
2910 2910 }
2911 2911 } else {
2912 2912 __ load_klass(recv, recv);
2913 2913 Label update_done;
2914 2914 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2915 2915 // Receiver did not match any saved receiver and there is no empty row for it.
2916 2916 // Increment total counter to indicate polymorphic case.
2917 2917 __ ld_ptr(counter_addr, tmp1);
2918 2918 __ add(tmp1, DataLayout::counter_increment, tmp1);
2919 2919 __ st_ptr(tmp1, counter_addr);
2920 2920
2921 2921 __ bind(update_done);
2922 2922 }
2923 2923 } else {
2924 2924 // Static call
2925 2925 __ ld_ptr(counter_addr, tmp1);
2926 2926 __ add(tmp1, DataLayout::counter_increment, tmp1);
2927 2927 __ st_ptr(tmp1, counter_addr);
2928 2928 }
2929 2929 }
2930 2930
2931 2931 void LIR_Assembler::align_backward_branch_target() {
2932 2932 __ align(OptoLoopAlignment);
2933 2933 }
2934 2934
2935 2935
2936 2936 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2937 2937 // make sure we are expecting a delay
2938 2938 // this has the side effect of clearing the delay state
2939 2939 // so we can use _masm instead of _masm->delayed() to do the
2940 2940 // code generation.
2941 2941 __ delayed();
2942 2942
2943 2943 // make sure we only emit one instruction
2944 2944 int offset = code_offset();
2945 2945 op->delay_op()->emit_code(this);
2946 2946 #ifdef ASSERT
2947 2947 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
2948 2948 op->delay_op()->print();
2949 2949 }
2950 2950 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
2951 2951 "only one instruction can go in a delay slot");
2952 2952 #endif
2953 2953
2954 2954 // we may also be emitting the call info for the instruction
2955 2955 // which we are the delay slot of.
2956 2956 CodeEmitInfo* call_info = op->call_info();
2957 2957 if (call_info) {
2958 2958 add_call_info(code_offset(), call_info);
2959 2959 }
2960 2960
2961 2961 if (VerifyStackAtCalls) {
2962 2962 _masm->sub(FP, SP, O7);
2963 2963 _masm->cmp(O7, initial_frame_size_in_bytes());
2964 2964 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
2965 2965 }
2966 2966 }
2967 2967
2968 2968
2969 2969 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2970 2970 assert(left->is_register(), "can only handle registers");
2971 2971
2972 2972 if (left->is_single_cpu()) {
2973 2973 __ neg(left->as_register(), dest->as_register());
2974 2974 } else if (left->is_single_fpu()) {
2975 2975 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
2976 2976 } else if (left->is_double_fpu()) {
2977 2977 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
2978 2978 } else {
2979 2979 assert (left->is_double_cpu(), "Must be a long");
2980 2980 Register Rlow = left->as_register_lo();
2981 2981 Register Rhi = left->as_register_hi();
2982 2982 #ifdef _LP64
2983 2983 __ sub(G0, Rlow, dest->as_register_lo());
2984 2984 #else
2985 2985 __ subcc(G0, Rlow, dest->as_register_lo());
2986 2986 __ subc (G0, Rhi, dest->as_register_hi());
2987 2987 #endif
2988 2988 }
2989 2989 }
2990 2990
2991 2991
2992 2992 void LIR_Assembler::fxch(int i) {
2993 2993 Unimplemented();
2994 2994 }
2995 2995
2996 2996 void LIR_Assembler::fld(int i) {
2997 2997 Unimplemented();
2998 2998 }
2999 2999
3000 3000 void LIR_Assembler::ffree(int i) {
3001 3001 Unimplemented();
3002 3002 }
3003 3003
3004 3004 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
3005 3005 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3006 3006
3007 3007 // if tmp is invalid, then the function being called doesn't destroy the thread
3008 3008 if (tmp->is_valid()) {
3009 3009 __ save_thread(tmp->as_register());
3010 3010 }
3011 3011 __ call(dest, relocInfo::runtime_call_type);
3012 3012 __ delayed()->nop();
3013 3013 if (info != NULL) {
3014 3014 add_call_info_here(info);
3015 3015 }
3016 3016 if (tmp->is_valid()) {
3017 3017 __ restore_thread(tmp->as_register());
3018 3018 }
3019 3019
3020 3020 #ifdef ASSERT
3021 3021 __ verify_thread();
3022 3022 #endif // ASSERT
3023 3023 }
3024 3024
3025 3025
3026 3026 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3027 3027 #ifdef _LP64
3028 3028 ShouldNotReachHere();
3029 3029 #endif
3030 3030
3031 3031 NEEDS_CLEANUP;
3032 3032 if (type == T_LONG) {
3033 3033 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3034 3034
3035 3035 // (extended to allow indexed as well as constant displaced for JSR-166)
3036 3036 Register idx = noreg; // contains either constant offset or index
3037 3037
3038 3038 int disp = mem_addr->disp();
3039 3039 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3040 3040 if (!Assembler::is_simm13(disp)) {
3041 3041 idx = O7;
3042 3042 __ set(disp, idx);
3043 3043 }
3044 3044 } else {
3045 3045 assert(disp == 0, "not both indexed and disp");
3046 3046 idx = mem_addr->index()->as_register();
3047 3047 }
3048 3048
3049 3049 int null_check_offset = -1;
3050 3050
3051 3051 Register base = mem_addr->base()->as_register();
3052 3052 if (src->is_register() && dest->is_address()) {
3053 3053 // G4 is high half, G5 is low half
3054 3054 if (VM_Version::v9_instructions_work()) {
3055 3055 // clear the top bits of G5, and scale up G4
3056 3056 __ srl (src->as_register_lo(), 0, G5);
3057 3057 __ sllx(src->as_register_hi(), 32, G4);
3058 3058 // combine the two halves into the 64 bits of G4
3059 3059 __ or3(G4, G5, G4);
3060 3060 null_check_offset = __ offset();
3061 3061 if (idx == noreg) {
3062 3062 __ stx(G4, base, disp);
3063 3063 } else {
3064 3064 __ stx(G4, base, idx);
3065 3065 }
3066 3066 } else {
3067 3067 __ mov (src->as_register_hi(), G4);
3068 3068 __ mov (src->as_register_lo(), G5);
3069 3069 null_check_offset = __ offset();
3070 3070 if (idx == noreg) {
3071 3071 __ std(G4, base, disp);
3072 3072 } else {
3073 3073 __ std(G4, base, idx);
3074 3074 }
3075 3075 }
3076 3076 } else if (src->is_address() && dest->is_register()) {
3077 3077 null_check_offset = __ offset();
3078 3078 if (VM_Version::v9_instructions_work()) {
3079 3079 if (idx == noreg) {
3080 3080 __ ldx(base, disp, G5);
3081 3081 } else {
3082 3082 __ ldx(base, idx, G5);
3083 3083 }
3084 3084 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3085 3085 __ mov (G5, dest->as_register_lo()); // copy low half into lo
3086 3086 } else {
3087 3087 if (idx == noreg) {
3088 3088 __ ldd(base, disp, G4);
3089 3089 } else {
3090 3090 __ ldd(base, idx, G4);
3091 3091 }
3092 3092 // G4 is high half, G5 is low half
3093 3093 __ mov (G4, dest->as_register_hi());
3094 3094 __ mov (G5, dest->as_register_lo());
3095 3095 }
3096 3096 } else {
3097 3097 Unimplemented();
3098 3098 }
3099 3099 if (info != NULL) {
3100 3100 add_debug_info_for_null_check(null_check_offset, info);
3101 3101 }
3102 3102
3103 3103 } else {
3104 3104 // use normal move for all other volatiles since they don't need
3105 3105 // special handling to remain atomic.
3106 3106 move_op(src, dest, type, lir_patch_none, info, false, false, false);
3107 3107 }
3108 3108 }
3109 3109
3110 3110 void LIR_Assembler::membar() {
3111 3111 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3112 3112 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3113 3113 }
3114 3114
3115 3115 void LIR_Assembler::membar_acquire() {
3116 3116 // no-op on TSO
3117 3117 }
3118 3118
3119 3119 void LIR_Assembler::membar_release() {
3120 3120 // no-op on TSO
3121 3121 }
3122 3122
3123 3123 // Pack two sequential registers containing 32 bit values
3124 3124 // into a single 64 bit register.
3125 3125 // src and src->successor() are packed into dst
3126 3126 // src and dst may be the same register.
3127 3127 // Note: src is destroyed
3128 3128 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
3129 3129 Register rs = src->as_register();
3130 3130 Register rd = dst->as_register_lo();
3131 3131 __ sllx(rs, 32, rs);
3132 3132 __ srl(rs->successor(), 0, rs->successor());
3133 3133 __ or3(rs, rs->successor(), rd);
3134 3134 }
3135 3135
3136 3136 // Unpack a 64 bit value in a register into
3137 3137 // two sequential registers.
3138 3138 // src is unpacked into dst and dst->successor()
3139 3139 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
3140 3140 Register rs = src->as_register_lo();
3141 3141 Register rd = dst->as_register_hi();
3142 3142 assert_different_registers(rs, rd, rd->successor());
3143 3143 __ srlx(rs, 32, rd);
3144 3144 __ srl (rs, 0, rd->successor());
3145 3145 }
3146 3146
3147 3147
3148 3148 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3149 3149 LIR_Address* addr = addr_opr->as_address_ptr();
3150 3150 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3151 3151
3152 3152 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
3153 3153 }
3154 3154
3155 3155
3156 3156 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3157 3157 assert(result_reg->is_register(), "check");
3158 3158 __ mov(G2_thread, result_reg->as_register());
3159 3159 }
3160 3160
3161 3161
3162 3162 void LIR_Assembler::peephole(LIR_List* lir) {
3163 3163 LIR_OpList* inst = lir->instructions_list();
3164 3164 for (int i = 0; i < inst->length(); i++) {
3165 3165 LIR_Op* op = inst->at(i);
3166 3166 switch (op->code()) {
3167 3167 case lir_cond_float_branch:
3168 3168 case lir_branch: {
3169 3169 LIR_OpBranch* branch = op->as_OpBranch();
3170 3170 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3171 3171 LIR_Op* delay_op = NULL;
3172 3172 // we'd like to be able to pull following instructions into
3173 3173 // this slot but we don't know enough to do it safely yet so
3174 3174 // only optimize block to block control flow.
3175 3175 if (LIRFillDelaySlots && branch->block()) {
3176 3176 LIR_Op* prev = inst->at(i - 1);
3177 3177 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3178 3178 // swap previous instruction into delay slot
3179 3179 inst->at_put(i - 1, op);
3180 3180 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3181 3181 #ifndef PRODUCT
3182 3182 if (LIRTracePeephole) {
3183 3183 tty->print_cr("delayed");
3184 3184 inst->at(i - 1)->print();
3185 3185 inst->at(i)->print();
3186 3186 tty->cr();
3187 3187 }
3188 3188 #endif
3189 3189 continue;
3190 3190 }
3191 3191 }
3192 3192
3193 3193 if (!delay_op) {
3194 3194 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3195 3195 }
3196 3196 inst->insert_before(i + 1, delay_op);
3197 3197 break;
3198 3198 }
3199 3199 case lir_static_call:
3200 3200 case lir_virtual_call:
3201 3201 case lir_icvirtual_call:
3202 3202 case lir_optvirtual_call:
3203 3203 case lir_dynamic_call: {
3204 3204 LIR_Op* prev = inst->at(i - 1);
3205 3205 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3206 3206 (op->code() != lir_virtual_call ||
3207 3207 !prev->result_opr()->is_single_cpu() ||
3208 3208 prev->result_opr()->as_register() != O0) &&
3209 3209 LIR_Assembler::is_single_instruction(prev)) {
3210 3210 // Only moves without info can be put into the delay slot.
3211 3211 // Also don't allow the setup of the receiver in the delay
3212 3212 // slot for vtable calls.
3213 3213 inst->at_put(i - 1, op);
3214 3214 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3215 3215 #ifndef PRODUCT
3216 3216 if (LIRTracePeephole) {
3217 3217 tty->print_cr("delayed");
3218 3218 inst->at(i - 1)->print();
3219 3219 inst->at(i)->print();
3220 3220 tty->cr();
3221 3221 }
3222 3222 #endif
3223 3223 } else {
3224 3224 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3225 3225 inst->insert_before(i + 1, delay_op);
3226 3226 i++;
3227 3227 }
3228 3228
3229 3229 #if defined(TIERED) && !defined(_LP64)
3230 3230 // fixup the return value from G1 to O0/O1 for long returns.
3231 3231 // It's done here instead of in LIRGenerator because there's
3232 3232 // such a mismatch between the single reg and double reg
3233 3233 // calling convention.
3234 3234 LIR_OpJavaCall* callop = op->as_OpJavaCall();
3235 3235 if (callop->result_opr() == FrameMap::out_long_opr) {
3236 3236 LIR_OpJavaCall* call;
3237 3237 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
3238 3238 for (int a = 0; a < arguments->length(); a++) {
3239 3239 arguments[a] = callop->arguments()[a];
3240 3240 }
3241 3241 if (op->code() == lir_virtual_call) {
3242 3242 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3243 3243 callop->vtable_offset(), arguments, callop->info());
3244 3244 } else {
3245 3245 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
3246 3246 callop->addr(), arguments, callop->info());
3247 3247 }
3248 3248 inst->at_put(i - 1, call);
3249 3249 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
3250 3250 T_LONG, lir_patch_none, NULL));
3251 3251 }
3252 3252 #endif
3253 3253 break;
3254 3254 }
3255 3255 }
3256 3256 }
3257 3257 }
3258 3258
3259 3259
3260 3260
3261 3261
3262 3262 #undef __
↓ open down ↓ |
2852 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX