Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ new/src/cpu/x86/vm/templateTable_x86_32.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "asm/assembler.hpp"
27 27 #include "interpreter/interpreter.hpp"
28 28 #include "interpreter/interpreterRuntime.hpp"
29 29 #include "interpreter/templateTable.hpp"
30 30 #include "memory/universe.inline.hpp"
31 31 #include "oops/methodDataOop.hpp"
32 32 #include "oops/objArrayKlass.hpp"
33 33 #include "oops/oop.inline.hpp"
34 34 #include "prims/methodHandles.hpp"
35 35 #include "runtime/sharedRuntime.hpp"
36 36 #include "runtime/stubRoutines.hpp"
37 37 #include "runtime/synchronizer.hpp"
38 38
39 39 #ifndef CC_INTERP
40 40 #define __ _masm->
41 41
42 42 //----------------------------------------------------------------------------------------------------
43 43 // Platform-dependent initialization
44 44
45 45 void TemplateTable::pd_initialize() {
46 46 // No i486 specific initialization
47 47 }
48 48
49 49 //----------------------------------------------------------------------------------------------------
50 50 // Address computation
51 51
52 52 // local variables
53 53 static inline Address iaddress(int n) {
54 54 return Address(rdi, Interpreter::local_offset_in_bytes(n));
55 55 }
56 56
57 57 static inline Address laddress(int n) { return iaddress(n + 1); }
58 58 static inline Address haddress(int n) { return iaddress(n + 0); }
59 59 static inline Address faddress(int n) { return iaddress(n); }
60 60 static inline Address daddress(int n) { return laddress(n); }
61 61 static inline Address aaddress(int n) { return iaddress(n); }
62 62
63 63 static inline Address iaddress(Register r) {
64 64 return Address(rdi, r, Interpreter::stackElementScale());
65 65 }
66 66 static inline Address laddress(Register r) {
67 67 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
68 68 }
69 69 static inline Address haddress(Register r) {
70 70 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
71 71 }
72 72
73 73 static inline Address faddress(Register r) { return iaddress(r); }
74 74 static inline Address daddress(Register r) { return laddress(r); }
75 75 static inline Address aaddress(Register r) { return iaddress(r); }
76 76
77 77 // expression stack
78 78 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
79 79 // data beyond the rsp which is potentially unsafe in an MT environment;
80 80 // an interrupt may overwrite that data.)
81 81 static inline Address at_rsp () {
82 82 return Address(rsp, 0);
83 83 }
84 84
85 85 // At top of Java expression stack which may be different than rsp(). It
86 86 // isn't for category 1 objects.
87 87 static inline Address at_tos () {
88 88 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
89 89 return tos;
90 90 }
91 91
92 92 static inline Address at_tos_p1() {
93 93 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
94 94 }
95 95
96 96 static inline Address at_tos_p2() {
97 97 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
98 98 }
99 99
100 100 // Condition conversion
101 101 static Assembler::Condition j_not(TemplateTable::Condition cc) {
102 102 switch (cc) {
103 103 case TemplateTable::equal : return Assembler::notEqual;
104 104 case TemplateTable::not_equal : return Assembler::equal;
105 105 case TemplateTable::less : return Assembler::greaterEqual;
106 106 case TemplateTable::less_equal : return Assembler::greater;
107 107 case TemplateTable::greater : return Assembler::lessEqual;
108 108 case TemplateTable::greater_equal: return Assembler::less;
109 109 }
110 110 ShouldNotReachHere();
111 111 return Assembler::zero;
112 112 }
113 113
114 114
115 115 //----------------------------------------------------------------------------------------------------
116 116 // Miscelaneous helper routines
117 117
118 118 // Store an oop (or NULL) at the address described by obj.
119 119 // If val == noreg this means store a NULL
120 120
121 121 static void do_oop_store(InterpreterMacroAssembler* _masm,
122 122 Address obj,
123 123 Register val,
124 124 BarrierSet::Name barrier,
125 125 bool precise) {
126 126 assert(val == noreg || val == rax, "parameter is just for looks");
127 127 switch (barrier) {
128 128 #ifndef SERIALGC
129 129 case BarrierSet::G1SATBCT:
130 130 case BarrierSet::G1SATBCTLogging:
131 131 {
132 132 // flatten object address if needed
133 133 // We do it regardless of precise because we need the registers
134 134 if (obj.index() == noreg && obj.disp() == 0) {
135 135 if (obj.base() != rdx) {
136 136 __ movl(rdx, obj.base());
137 137 }
138 138 } else {
139 139 __ leal(rdx, obj);
140 140 }
141 141 __ get_thread(rcx);
142 142 __ save_bcp();
143 143 __ g1_write_barrier_pre(rdx /* obj */,
144 144 rbx /* pre_val */,
145 145 rcx /* thread */,
146 146 rsi /* tmp */,
147 147 val != noreg /* tosca_live */,
148 148 false /* expand_call */);
149 149
150 150 // Do the actual store
151 151 // noreg means NULL
152 152 if (val == noreg) {
153 153 __ movptr(Address(rdx, 0), NULL_WORD);
154 154 // No post barrier for NULL
155 155 } else {
156 156 __ movl(Address(rdx, 0), val);
157 157 __ g1_write_barrier_post(rdx /* store_adr */,
158 158 val /* new_val */,
159 159 rcx /* thread */,
160 160 rbx /* tmp */,
161 161 rsi /* tmp2 */);
162 162 }
163 163 __ restore_bcp();
164 164
165 165 }
166 166 break;
167 167 #endif // SERIALGC
168 168 case BarrierSet::CardTableModRef:
169 169 case BarrierSet::CardTableExtension:
170 170 {
171 171 if (val == noreg) {
172 172 __ movptr(obj, NULL_WORD);
173 173 } else {
174 174 __ movl(obj, val);
175 175 // flatten object address if needed
176 176 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
177 177 __ store_check(obj.base());
178 178 } else {
179 179 __ leal(rdx, obj);
180 180 __ store_check(rdx);
181 181 }
182 182 }
183 183 }
184 184 break;
185 185 case BarrierSet::ModRef:
186 186 case BarrierSet::Other:
187 187 if (val == noreg) {
188 188 __ movptr(obj, NULL_WORD);
189 189 } else {
190 190 __ movl(obj, val);
191 191 }
192 192 break;
193 193 default :
194 194 ShouldNotReachHere();
↓ open down ↓ |
194 lines elided |
↑ open up ↑ |
195 195
196 196 }
197 197 }
198 198
199 199 Address TemplateTable::at_bcp(int offset) {
200 200 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
201 201 return Address(rsi, offset);
202 202 }
203 203
204 204
205 -void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
206 - Register scratch,
207 - bool load_bc_into_scratch/*=true*/) {
208 -
209 - if (!RewriteBytecodes) return;
210 - // the pair bytecodes have already done the load.
211 - if (load_bc_into_scratch) {
212 - __ movl(bc, bytecode);
205 +void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
206 + Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
207 + int byte_no) {
208 + if (!RewriteBytecodes) return;
209 + Label L_patch_done;
210 +
211 + switch (bc) {
212 + case Bytecodes::_fast_aputfield:
213 + case Bytecodes::_fast_bputfield:
214 + case Bytecodes::_fast_cputfield:
215 + case Bytecodes::_fast_dputfield:
216 + case Bytecodes::_fast_fputfield:
217 + case Bytecodes::_fast_iputfield:
218 + case Bytecodes::_fast_lputfield:
219 + case Bytecodes::_fast_sputfield:
220 + {
221 + // We skip bytecode quickening for putfield instructions when
222 + // the put_code written to the constant pool cache is zero.
223 + // This is required so that every execution of this instruction
224 + // calls out to InterpreterRuntime::resolve_get_put to do
225 + // additional, required work.
226 + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
227 + assert(load_bc_into_bc_reg, "we use bc_reg as temp");
228 + __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
229 + __ movl(bc_reg, bc);
230 + __ cmpl(temp_reg, (int) 0);
231 + __ jcc(Assembler::zero, L_patch_done); // don't patch
232 + }
233 + break;
234 + default:
235 + assert(byte_no == -1, "sanity");
236 + // the pair bytecodes have already done the load.
237 + if (load_bc_into_bc_reg) {
238 + __ movl(bc_reg, bc);
239 + }
213 240 }
214 - Label patch_done;
241 +
215 242 if (JvmtiExport::can_post_breakpoint()) {
216 - Label fast_patch;
243 + Label L_fast_patch;
217 244 // if a breakpoint is present we can't rewrite the stream directly
218 - __ movzbl(scratch, at_bcp(0));
219 - __ cmpl(scratch, Bytecodes::_breakpoint);
220 - __ jcc(Assembler::notEqual, fast_patch);
221 - __ get_method(scratch);
245 + __ movzbl(temp_reg, at_bcp(0));
246 + __ cmpl(temp_reg, Bytecodes::_breakpoint);
247 + __ jcc(Assembler::notEqual, L_fast_patch);
248 + __ get_method(temp_reg);
222 249 // Let breakpoint table handling rewrite to quicker bytecode
223 - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
250 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
224 251 #ifndef ASSERT
225 - __ jmpb(patch_done);
252 + __ jmpb(L_patch_done);
226 253 #else
227 - __ jmp(patch_done);
254 + __ jmp(L_patch_done);
228 255 #endif
229 - __ bind(fast_patch);
256 + __ bind(L_fast_patch);
230 257 }
258 +
231 259 #ifdef ASSERT
232 - Label okay;
233 - __ load_unsigned_byte(scratch, at_bcp(0));
234 - __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
235 - __ jccb(Assembler::equal, okay);
236 - __ cmpl(scratch, bc);
237 - __ jcc(Assembler::equal, okay);
260 + Label L_okay;
261 + __ load_unsigned_byte(temp_reg, at_bcp(0));
262 + __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
263 + __ jccb(Assembler::equal, L_okay);
264 + __ cmpl(temp_reg, bc_reg);
265 + __ jcc(Assembler::equal, L_okay);
238 266 __ stop("patching the wrong bytecode");
239 - __ bind(okay);
267 + __ bind(L_okay);
240 268 #endif
269 +
241 270 // patch bytecode
242 - __ movb(at_bcp(0), bc);
243 - __ bind(patch_done);
271 + __ movb(at_bcp(0), bc_reg);
272 + __ bind(L_patch_done);
244 273 }
245 274
246 275 //----------------------------------------------------------------------------------------------------
247 276 // Individual instructions
248 277
249 278 void TemplateTable::nop() {
250 279 transition(vtos, vtos);
251 280 // nothing to do
252 281 }
253 282
254 283 void TemplateTable::shouldnotreachhere() {
255 284 transition(vtos, vtos);
256 285 __ stop("shouldnotreachhere bytecode");
257 286 }
258 287
259 288
260 289
261 290 void TemplateTable::aconst_null() {
262 291 transition(vtos, atos);
263 292 __ xorptr(rax, rax);
264 293 }
265 294
266 295
267 296 void TemplateTable::iconst(int value) {
268 297 transition(vtos, itos);
269 298 if (value == 0) {
270 299 __ xorptr(rax, rax);
271 300 } else {
272 301 __ movptr(rax, value);
273 302 }
274 303 }
275 304
276 305
277 306 void TemplateTable::lconst(int value) {
278 307 transition(vtos, ltos);
279 308 if (value == 0) {
280 309 __ xorptr(rax, rax);
281 310 } else {
282 311 __ movptr(rax, value);
283 312 }
284 313 assert(value >= 0, "check this code");
285 314 __ xorptr(rdx, rdx);
286 315 }
287 316
288 317
289 318 void TemplateTable::fconst(int value) {
290 319 transition(vtos, ftos);
291 320 if (value == 0) { __ fldz();
292 321 } else if (value == 1) { __ fld1();
293 322 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
294 323 } else { ShouldNotReachHere();
295 324 }
296 325 }
297 326
298 327
299 328 void TemplateTable::dconst(int value) {
300 329 transition(vtos, dtos);
301 330 if (value == 0) { __ fldz();
302 331 } else if (value == 1) { __ fld1();
303 332 } else { ShouldNotReachHere();
304 333 }
305 334 }
306 335
307 336
308 337 void TemplateTable::bipush() {
309 338 transition(vtos, itos);
310 339 __ load_signed_byte(rax, at_bcp(1));
311 340 }
312 341
313 342
314 343 void TemplateTable::sipush() {
315 344 transition(vtos, itos);
316 345 __ load_unsigned_short(rax, at_bcp(1));
317 346 __ bswapl(rax);
318 347 __ sarl(rax, 16);
319 348 }
320 349
321 350 void TemplateTable::ldc(bool wide) {
322 351 transition(vtos, vtos);
323 352 Label call_ldc, notFloat, notClass, Done;
324 353
325 354 if (wide) {
326 355 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
327 356 } else {
328 357 __ load_unsigned_byte(rbx, at_bcp(1));
329 358 }
330 359 __ get_cpool_and_tags(rcx, rax);
331 360 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
332 361 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
333 362
334 363 // get type
335 364 __ xorptr(rdx, rdx);
336 365 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
337 366
338 367 // unresolved string - get the resolved string
339 368 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
340 369 __ jccb(Assembler::equal, call_ldc);
341 370
342 371 // unresolved class - get the resolved class
343 372 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
344 373 __ jccb(Assembler::equal, call_ldc);
345 374
346 375 // unresolved class in error (resolution failed) - call into runtime
347 376 // so that the same error from first resolution attempt is thrown.
348 377 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
349 378 __ jccb(Assembler::equal, call_ldc);
350 379
351 380 // resolved class - need to call vm to get java mirror of the class
352 381 __ cmpl(rdx, JVM_CONSTANT_Class);
353 382 __ jcc(Assembler::notEqual, notClass);
354 383
355 384 __ bind(call_ldc);
356 385 __ movl(rcx, wide);
357 386 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
358 387 __ push(atos);
359 388 __ jmp(Done);
360 389
361 390 __ bind(notClass);
362 391 __ cmpl(rdx, JVM_CONSTANT_Float);
363 392 __ jccb(Assembler::notEqual, notFloat);
364 393 // ftos
365 394 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
366 395 __ push(ftos);
367 396 __ jmp(Done);
368 397
369 398 __ bind(notFloat);
370 399 #ifdef ASSERT
371 400 { Label L;
372 401 __ cmpl(rdx, JVM_CONSTANT_Integer);
373 402 __ jcc(Assembler::equal, L);
374 403 __ cmpl(rdx, JVM_CONSTANT_String);
375 404 __ jcc(Assembler::equal, L);
376 405 __ cmpl(rdx, JVM_CONSTANT_Object);
377 406 __ jcc(Assembler::equal, L);
378 407 __ stop("unexpected tag type in ldc");
379 408 __ bind(L);
380 409 }
381 410 #endif
382 411 Label isOop;
383 412 // atos and itos
384 413 // Integer is only non-oop type we will see here
385 414 __ cmpl(rdx, JVM_CONSTANT_Integer);
386 415 __ jccb(Assembler::notEqual, isOop);
387 416 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
388 417 __ push(itos);
389 418 __ jmp(Done);
390 419 __ bind(isOop);
391 420 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
392 421 __ push(atos);
393 422
394 423 if (VerifyOops) {
395 424 __ verify_oop(rax);
396 425 }
397 426 __ bind(Done);
398 427 }
399 428
400 429 // Fast path for caching oop constants.
401 430 // %%% We should use this to handle Class and String constants also.
402 431 // %%% It will simplify the ldc/primitive path considerably.
403 432 void TemplateTable::fast_aldc(bool wide) {
404 433 transition(vtos, atos);
405 434
406 435 if (!EnableInvokeDynamic) {
407 436 // We should not encounter this bytecode if !EnableInvokeDynamic.
408 437 // The verifier will stop it. However, if we get past the verifier,
409 438 // this will stop the thread in a reasonable way, without crashing the JVM.
410 439 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
411 440 InterpreterRuntime::throw_IncompatibleClassChangeError));
412 441 // the call_VM checks for exception, so we should never return here.
413 442 __ should_not_reach_here();
414 443 return;
415 444 }
416 445
417 446 const Register cache = rcx;
418 447 const Register index = rdx;
419 448
420 449 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
421 450 if (VerifyOops) {
422 451 __ verify_oop(rax);
423 452 }
424 453
425 454 Label L_done, L_throw_exception;
426 455 const Register con_klass_temp = rcx; // same as Rcache
427 456 __ load_klass(con_klass_temp, rax);
428 457 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
429 458 __ jcc(Assembler::notEqual, L_done);
430 459 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
431 460 __ jcc(Assembler::notEqual, L_throw_exception);
432 461 __ xorptr(rax, rax);
433 462 __ jmp(L_done);
434 463
435 464 // Load the exception from the system-array which wraps it:
436 465 __ bind(L_throw_exception);
437 466 __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
438 467 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
439 468
440 469 __ bind(L_done);
441 470 }
442 471
443 472 void TemplateTable::ldc2_w() {
444 473 transition(vtos, vtos);
445 474 Label Long, Done;
446 475 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
447 476
448 477 __ get_cpool_and_tags(rcx, rax);
449 478 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
450 479 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
451 480
452 481 // get type
453 482 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
454 483 __ jccb(Assembler::notEqual, Long);
455 484 // dtos
456 485 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
457 486 __ push(dtos);
458 487 __ jmpb(Done);
459 488
460 489 __ bind(Long);
461 490 // ltos
462 491 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
463 492 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
464 493
465 494 __ push(ltos);
466 495
467 496 __ bind(Done);
468 497 }
469 498
470 499
471 500 void TemplateTable::locals_index(Register reg, int offset) {
472 501 __ load_unsigned_byte(reg, at_bcp(offset));
473 502 __ negptr(reg);
474 503 }
475 504
476 505
477 506 void TemplateTable::iload() {
478 507 transition(vtos, itos);
479 508 if (RewriteFrequentPairs) {
480 509 Label rewrite, done;
481 510
482 511 // get next byte
483 512 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
484 513 // if _iload, wait to rewrite to iload2. We only want to rewrite the
485 514 // last two iloads in a pair. Comparing against fast_iload means that
486 515 // the next bytecode is neither an iload or a caload, and therefore
487 516 // an iload pair.
488 517 __ cmpl(rbx, Bytecodes::_iload);
489 518 __ jcc(Assembler::equal, done);
490 519
491 520 __ cmpl(rbx, Bytecodes::_fast_iload);
492 521 __ movl(rcx, Bytecodes::_fast_iload2);
493 522 __ jccb(Assembler::equal, rewrite);
494 523
495 524 // if _caload, rewrite to fast_icaload
496 525 __ cmpl(rbx, Bytecodes::_caload);
497 526 __ movl(rcx, Bytecodes::_fast_icaload);
498 527 __ jccb(Assembler::equal, rewrite);
499 528
500 529 // rewrite so iload doesn't check again.
501 530 __ movl(rcx, Bytecodes::_fast_iload);
502 531
503 532 // rewrite
504 533 // rcx: fast bytecode
505 534 __ bind(rewrite);
506 535 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
507 536 __ bind(done);
508 537 }
509 538
510 539 // Get the local value into tos
511 540 locals_index(rbx);
512 541 __ movl(rax, iaddress(rbx));
513 542 }
514 543
515 544
516 545 void TemplateTable::fast_iload2() {
517 546 transition(vtos, itos);
518 547 locals_index(rbx);
519 548 __ movl(rax, iaddress(rbx));
520 549 __ push(itos);
521 550 locals_index(rbx, 3);
522 551 __ movl(rax, iaddress(rbx));
523 552 }
524 553
525 554 void TemplateTable::fast_iload() {
526 555 transition(vtos, itos);
527 556 locals_index(rbx);
528 557 __ movl(rax, iaddress(rbx));
529 558 }
530 559
531 560
532 561 void TemplateTable::lload() {
533 562 transition(vtos, ltos);
534 563 locals_index(rbx);
535 564 __ movptr(rax, laddress(rbx));
536 565 NOT_LP64(__ movl(rdx, haddress(rbx)));
537 566 }
538 567
539 568
540 569 void TemplateTable::fload() {
541 570 transition(vtos, ftos);
542 571 locals_index(rbx);
543 572 __ fld_s(faddress(rbx));
544 573 }
545 574
546 575
547 576 void TemplateTable::dload() {
548 577 transition(vtos, dtos);
549 578 locals_index(rbx);
550 579 __ fld_d(daddress(rbx));
551 580 }
552 581
553 582
554 583 void TemplateTable::aload() {
555 584 transition(vtos, atos);
556 585 locals_index(rbx);
557 586 __ movptr(rax, aaddress(rbx));
558 587 }
559 588
560 589
561 590 void TemplateTable::locals_index_wide(Register reg) {
562 591 __ movl(reg, at_bcp(2));
563 592 __ bswapl(reg);
564 593 __ shrl(reg, 16);
565 594 __ negptr(reg);
566 595 }
567 596
568 597
569 598 void TemplateTable::wide_iload() {
570 599 transition(vtos, itos);
571 600 locals_index_wide(rbx);
572 601 __ movl(rax, iaddress(rbx));
573 602 }
574 603
575 604
576 605 void TemplateTable::wide_lload() {
577 606 transition(vtos, ltos);
578 607 locals_index_wide(rbx);
579 608 __ movptr(rax, laddress(rbx));
580 609 NOT_LP64(__ movl(rdx, haddress(rbx)));
581 610 }
582 611
583 612
584 613 void TemplateTable::wide_fload() {
585 614 transition(vtos, ftos);
586 615 locals_index_wide(rbx);
587 616 __ fld_s(faddress(rbx));
588 617 }
589 618
590 619
591 620 void TemplateTable::wide_dload() {
592 621 transition(vtos, dtos);
593 622 locals_index_wide(rbx);
594 623 __ fld_d(daddress(rbx));
595 624 }
596 625
597 626
598 627 void TemplateTable::wide_aload() {
599 628 transition(vtos, atos);
600 629 locals_index_wide(rbx);
601 630 __ movptr(rax, aaddress(rbx));
602 631 }
603 632
604 633 void TemplateTable::index_check(Register array, Register index) {
605 634 // Pop ptr into array
606 635 __ pop_ptr(array);
607 636 index_check_without_pop(array, index);
608 637 }
609 638
610 639 void TemplateTable::index_check_without_pop(Register array, Register index) {
611 640 // destroys rbx,
612 641 // check array
613 642 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
614 643 LP64_ONLY(__ movslq(index, index));
615 644 // check index
616 645 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
617 646 if (index != rbx) {
618 647 // ??? convention: move aberrant index into rbx, for exception message
619 648 assert(rbx != array, "different registers");
620 649 __ mov(rbx, index);
621 650 }
622 651 __ jump_cc(Assembler::aboveEqual,
623 652 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
624 653 }
625 654
626 655
627 656 void TemplateTable::iaload() {
628 657 transition(itos, itos);
629 658 // rdx: array
630 659 index_check(rdx, rax); // kills rbx,
631 660 // rax,: index
632 661 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
633 662 }
634 663
635 664
636 665 void TemplateTable::laload() {
637 666 transition(itos, ltos);
638 667 // rax,: index
639 668 // rdx: array
640 669 index_check(rdx, rax);
641 670 __ mov(rbx, rax);
642 671 // rbx,: index
643 672 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
644 673 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
645 674 }
646 675
647 676
648 677 void TemplateTable::faload() {
649 678 transition(itos, ftos);
650 679 // rdx: array
651 680 index_check(rdx, rax); // kills rbx,
652 681 // rax,: index
653 682 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 683 }
655 684
656 685
657 686 void TemplateTable::daload() {
658 687 transition(itos, dtos);
659 688 // rdx: array
660 689 index_check(rdx, rax); // kills rbx,
661 690 // rax,: index
662 691 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 692 }
664 693
665 694
666 695 void TemplateTable::aaload() {
667 696 transition(itos, atos);
668 697 // rdx: array
669 698 index_check(rdx, rax); // kills rbx,
670 699 // rax,: index
671 700 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 701 }
673 702
674 703
675 704 void TemplateTable::baload() {
676 705 transition(itos, itos);
677 706 // rdx: array
678 707 index_check(rdx, rax); // kills rbx,
679 708 // rax,: index
680 709 // can do better code for P5 - fix this at some point
681 710 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 711 __ mov(rax, rbx);
683 712 }
684 713
685 714
686 715 void TemplateTable::caload() {
687 716 transition(itos, itos);
688 717 // rdx: array
689 718 index_check(rdx, rax); // kills rbx,
690 719 // rax,: index
691 720 // can do better code for P5 - may want to improve this at some point
692 721 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
693 722 __ mov(rax, rbx);
694 723 }
695 724
696 725 // iload followed by caload frequent pair
697 726 void TemplateTable::fast_icaload() {
698 727 transition(vtos, itos);
699 728 // load index out of locals
700 729 locals_index(rbx);
701 730 __ movl(rax, iaddress(rbx));
702 731
703 732 // rdx: array
704 733 index_check(rdx, rax);
705 734 // rax,: index
706 735 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
707 736 __ mov(rax, rbx);
708 737 }
709 738
710 739 void TemplateTable::saload() {
711 740 transition(itos, itos);
712 741 // rdx: array
713 742 index_check(rdx, rax); // kills rbx,
714 743 // rax,: index
715 744 // can do better code for P5 - may want to improve this at some point
716 745 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
717 746 __ mov(rax, rbx);
718 747 }
719 748
720 749
721 750 void TemplateTable::iload(int n) {
722 751 transition(vtos, itos);
723 752 __ movl(rax, iaddress(n));
724 753 }
725 754
726 755
727 756 void TemplateTable::lload(int n) {
728 757 transition(vtos, ltos);
729 758 __ movptr(rax, laddress(n));
730 759 NOT_LP64(__ movptr(rdx, haddress(n)));
731 760 }
732 761
733 762
734 763 void TemplateTable::fload(int n) {
735 764 transition(vtos, ftos);
736 765 __ fld_s(faddress(n));
737 766 }
738 767
739 768
740 769 void TemplateTable::dload(int n) {
741 770 transition(vtos, dtos);
742 771 __ fld_d(daddress(n));
743 772 }
744 773
745 774
746 775 void TemplateTable::aload(int n) {
747 776 transition(vtos, atos);
748 777 __ movptr(rax, aaddress(n));
749 778 }
750 779
751 780
752 781 void TemplateTable::aload_0() {
753 782 transition(vtos, atos);
754 783 // According to bytecode histograms, the pairs:
755 784 //
756 785 // _aload_0, _fast_igetfield
757 786 // _aload_0, _fast_agetfield
758 787 // _aload_0, _fast_fgetfield
759 788 //
760 789 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
761 790 // bytecode checks if the next bytecode is either _fast_igetfield,
762 791 // _fast_agetfield or _fast_fgetfield and then rewrites the
763 792 // current bytecode into a pair bytecode; otherwise it rewrites the current
764 793 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
765 794 //
766 795 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
767 796 // otherwise we may miss an opportunity for a pair.
768 797 //
769 798 // Also rewrite frequent pairs
770 799 // aload_0, aload_1
771 800 // aload_0, iload_1
772 801 // These bytecodes with a small amount of code are most profitable to rewrite
773 802 if (RewriteFrequentPairs) {
774 803 Label rewrite, done;
775 804 // get next byte
776 805 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
777 806
778 807 // do actual aload_0
779 808 aload(0);
780 809
781 810 // if _getfield then wait with rewrite
782 811 __ cmpl(rbx, Bytecodes::_getfield);
783 812 __ jcc(Assembler::equal, done);
784 813
785 814 // if _igetfield then reqrite to _fast_iaccess_0
786 815 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 816 __ cmpl(rbx, Bytecodes::_fast_igetfield);
788 817 __ movl(rcx, Bytecodes::_fast_iaccess_0);
789 818 __ jccb(Assembler::equal, rewrite);
790 819
791 820 // if _agetfield then reqrite to _fast_aaccess_0
792 821 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 822 __ cmpl(rbx, Bytecodes::_fast_agetfield);
794 823 __ movl(rcx, Bytecodes::_fast_aaccess_0);
795 824 __ jccb(Assembler::equal, rewrite);
796 825
797 826 // if _fgetfield then reqrite to _fast_faccess_0
798 827 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
799 828 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
800 829 __ movl(rcx, Bytecodes::_fast_faccess_0);
801 830 __ jccb(Assembler::equal, rewrite);
802 831
803 832 // else rewrite to _fast_aload0
804 833 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
805 834 __ movl(rcx, Bytecodes::_fast_aload_0);
806 835
807 836 // rewrite
808 837 // rcx: fast bytecode
809 838 __ bind(rewrite);
810 839 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
811 840
812 841 __ bind(done);
813 842 } else {
814 843 aload(0);
815 844 }
816 845 }
817 846
818 847 void TemplateTable::istore() {
819 848 transition(itos, vtos);
820 849 locals_index(rbx);
821 850 __ movl(iaddress(rbx), rax);
822 851 }
823 852
824 853
825 854 void TemplateTable::lstore() {
826 855 transition(ltos, vtos);
827 856 locals_index(rbx);
828 857 __ movptr(laddress(rbx), rax);
829 858 NOT_LP64(__ movptr(haddress(rbx), rdx));
830 859 }
831 860
832 861
833 862 void TemplateTable::fstore() {
834 863 transition(ftos, vtos);
835 864 locals_index(rbx);
836 865 __ fstp_s(faddress(rbx));
837 866 }
838 867
839 868
840 869 void TemplateTable::dstore() {
841 870 transition(dtos, vtos);
842 871 locals_index(rbx);
843 872 __ fstp_d(daddress(rbx));
844 873 }
845 874
846 875
847 876 void TemplateTable::astore() {
848 877 transition(vtos, vtos);
849 878 __ pop_ptr(rax);
850 879 locals_index(rbx);
851 880 __ movptr(aaddress(rbx), rax);
852 881 }
853 882
854 883
855 884 void TemplateTable::wide_istore() {
856 885 transition(vtos, vtos);
857 886 __ pop_i(rax);
858 887 locals_index_wide(rbx);
859 888 __ movl(iaddress(rbx), rax);
860 889 }
861 890
862 891
863 892 void TemplateTable::wide_lstore() {
864 893 transition(vtos, vtos);
865 894 __ pop_l(rax, rdx);
866 895 locals_index_wide(rbx);
867 896 __ movptr(laddress(rbx), rax);
868 897 NOT_LP64(__ movl(haddress(rbx), rdx));
869 898 }
870 899
871 900
872 901 void TemplateTable::wide_fstore() {
873 902 wide_istore();
874 903 }
875 904
876 905
877 906 void TemplateTable::wide_dstore() {
878 907 wide_lstore();
879 908 }
880 909
881 910
882 911 void TemplateTable::wide_astore() {
883 912 transition(vtos, vtos);
884 913 __ pop_ptr(rax);
885 914 locals_index_wide(rbx);
886 915 __ movptr(aaddress(rbx), rax);
887 916 }
888 917
889 918
890 919 void TemplateTable::iastore() {
891 920 transition(itos, vtos);
892 921 __ pop_i(rbx);
893 922 // rax,: value
894 923 // rdx: array
895 924 index_check(rdx, rbx); // prefer index in rbx,
896 925 // rbx,: index
897 926 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
898 927 }
899 928
900 929
901 930 void TemplateTable::lastore() {
902 931 transition(ltos, vtos);
903 932 __ pop_i(rbx);
904 933 // rax,: low(value)
905 934 // rcx: array
906 935 // rdx: high(value)
907 936 index_check(rcx, rbx); // prefer index in rbx,
908 937 // rbx,: index
909 938 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
910 939 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
911 940 }
912 941
913 942
914 943 void TemplateTable::fastore() {
915 944 transition(ftos, vtos);
916 945 __ pop_i(rbx);
917 946 // rdx: array
918 947 // st0: value
919 948 index_check(rdx, rbx); // prefer index in rbx,
920 949 // rbx,: index
921 950 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
922 951 }
923 952
924 953
925 954 void TemplateTable::dastore() {
926 955 transition(dtos, vtos);
927 956 __ pop_i(rbx);
928 957 // rdx: array
929 958 // st0: value
930 959 index_check(rdx, rbx); // prefer index in rbx,
931 960 // rbx,: index
932 961 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
933 962 }
934 963
935 964
936 965 void TemplateTable::aastore() {
937 966 Label is_null, ok_is_subtype, done;
938 967 transition(vtos, vtos);
939 968 // stack: ..., array, index, value
940 969 __ movptr(rax, at_tos()); // Value
941 970 __ movl(rcx, at_tos_p1()); // Index
942 971 __ movptr(rdx, at_tos_p2()); // Array
943 972
944 973 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
945 974 index_check_without_pop(rdx, rcx); // kills rbx,
946 975 // do array store check - check for NULL value first
947 976 __ testptr(rax, rax);
948 977 __ jcc(Assembler::zero, is_null);
949 978
950 979 // Move subklass into EBX
951 980 __ load_klass(rbx, rax);
952 981 // Move superklass into EAX
953 982 __ load_klass(rax, rdx);
954 983 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
955 984 // Compress array+index*wordSize+12 into a single register. Frees ECX.
956 985 __ lea(rdx, element_address);
957 986
958 987 // Generate subtype check. Blows ECX. Resets EDI to locals.
959 988 // Superklass in EAX. Subklass in EBX.
960 989 __ gen_subtype_check( rbx, ok_is_subtype );
961 990
962 991 // Come here on failure
963 992 // object is at TOS
964 993 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
965 994
966 995 // Come here on success
967 996 __ bind(ok_is_subtype);
968 997
969 998 // Get the value to store
970 999 __ movptr(rax, at_rsp());
971 1000 // and store it with appropriate barrier
972 1001 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
973 1002
974 1003 __ jmp(done);
975 1004
976 1005 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
977 1006 __ bind(is_null);
978 1007 __ profile_null_seen(rbx);
979 1008
980 1009 // Store NULL, (noreg means NULL to do_oop_store)
981 1010 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
982 1011
983 1012 // Pop stack arguments
984 1013 __ bind(done);
985 1014 __ addptr(rsp, 3 * Interpreter::stackElementSize);
986 1015 }
987 1016
988 1017
989 1018 void TemplateTable::bastore() {
990 1019 transition(itos, vtos);
991 1020 __ pop_i(rbx);
992 1021 // rax,: value
993 1022 // rdx: array
994 1023 index_check(rdx, rbx); // prefer index in rbx,
995 1024 // rbx,: index
996 1025 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
997 1026 }
998 1027
999 1028
1000 1029 void TemplateTable::castore() {
1001 1030 transition(itos, vtos);
1002 1031 __ pop_i(rbx);
1003 1032 // rax,: value
1004 1033 // rdx: array
1005 1034 index_check(rdx, rbx); // prefer index in rbx,
1006 1035 // rbx,: index
1007 1036 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1008 1037 }
1009 1038
1010 1039
1011 1040 void TemplateTable::sastore() {
1012 1041 castore();
1013 1042 }
1014 1043
1015 1044
1016 1045 void TemplateTable::istore(int n) {
1017 1046 transition(itos, vtos);
1018 1047 __ movl(iaddress(n), rax);
1019 1048 }
1020 1049
1021 1050
1022 1051 void TemplateTable::lstore(int n) {
1023 1052 transition(ltos, vtos);
1024 1053 __ movptr(laddress(n), rax);
1025 1054 NOT_LP64(__ movptr(haddress(n), rdx));
1026 1055 }
1027 1056
1028 1057
1029 1058 void TemplateTable::fstore(int n) {
1030 1059 transition(ftos, vtos);
1031 1060 __ fstp_s(faddress(n));
1032 1061 }
1033 1062
1034 1063
1035 1064 void TemplateTable::dstore(int n) {
1036 1065 transition(dtos, vtos);
1037 1066 __ fstp_d(daddress(n));
1038 1067 }
1039 1068
1040 1069
1041 1070 void TemplateTable::astore(int n) {
1042 1071 transition(vtos, vtos);
1043 1072 __ pop_ptr(rax);
1044 1073 __ movptr(aaddress(n), rax);
1045 1074 }
1046 1075
1047 1076
1048 1077 void TemplateTable::pop() {
1049 1078 transition(vtos, vtos);
1050 1079 __ addptr(rsp, Interpreter::stackElementSize);
1051 1080 }
1052 1081
1053 1082
1054 1083 void TemplateTable::pop2() {
1055 1084 transition(vtos, vtos);
1056 1085 __ addptr(rsp, 2*Interpreter::stackElementSize);
1057 1086 }
1058 1087
1059 1088
1060 1089 void TemplateTable::dup() {
1061 1090 transition(vtos, vtos);
1062 1091 // stack: ..., a
1063 1092 __ load_ptr(0, rax);
1064 1093 __ push_ptr(rax);
1065 1094 // stack: ..., a, a
1066 1095 }
1067 1096
1068 1097
1069 1098 void TemplateTable::dup_x1() {
1070 1099 transition(vtos, vtos);
1071 1100 // stack: ..., a, b
1072 1101 __ load_ptr( 0, rax); // load b
1073 1102 __ load_ptr( 1, rcx); // load a
1074 1103 __ store_ptr(1, rax); // store b
1075 1104 __ store_ptr(0, rcx); // store a
1076 1105 __ push_ptr(rax); // push b
1077 1106 // stack: ..., b, a, b
1078 1107 }
1079 1108
1080 1109
1081 1110 void TemplateTable::dup_x2() {
1082 1111 transition(vtos, vtos);
1083 1112 // stack: ..., a, b, c
1084 1113 __ load_ptr( 0, rax); // load c
1085 1114 __ load_ptr( 2, rcx); // load a
1086 1115 __ store_ptr(2, rax); // store c in a
1087 1116 __ push_ptr(rax); // push c
1088 1117 // stack: ..., c, b, c, c
1089 1118 __ load_ptr( 2, rax); // load b
1090 1119 __ store_ptr(2, rcx); // store a in b
1091 1120 // stack: ..., c, a, c, c
1092 1121 __ store_ptr(1, rax); // store b in c
1093 1122 // stack: ..., c, a, b, c
1094 1123 }
1095 1124
1096 1125
1097 1126 void TemplateTable::dup2() {
1098 1127 transition(vtos, vtos);
1099 1128 // stack: ..., a, b
1100 1129 __ load_ptr(1, rax); // load a
1101 1130 __ push_ptr(rax); // push a
1102 1131 __ load_ptr(1, rax); // load b
1103 1132 __ push_ptr(rax); // push b
1104 1133 // stack: ..., a, b, a, b
1105 1134 }
1106 1135
1107 1136
1108 1137 void TemplateTable::dup2_x1() {
1109 1138 transition(vtos, vtos);
1110 1139 // stack: ..., a, b, c
1111 1140 __ load_ptr( 0, rcx); // load c
1112 1141 __ load_ptr( 1, rax); // load b
1113 1142 __ push_ptr(rax); // push b
1114 1143 __ push_ptr(rcx); // push c
1115 1144 // stack: ..., a, b, c, b, c
1116 1145 __ store_ptr(3, rcx); // store c in b
1117 1146 // stack: ..., a, c, c, b, c
1118 1147 __ load_ptr( 4, rcx); // load a
1119 1148 __ store_ptr(2, rcx); // store a in 2nd c
1120 1149 // stack: ..., a, c, a, b, c
1121 1150 __ store_ptr(4, rax); // store b in a
1122 1151 // stack: ..., b, c, a, b, c
1123 1152 // stack: ..., b, c, a, b, c
1124 1153 }
1125 1154
1126 1155
1127 1156 void TemplateTable::dup2_x2() {
1128 1157 transition(vtos, vtos);
1129 1158 // stack: ..., a, b, c, d
1130 1159 __ load_ptr( 0, rcx); // load d
1131 1160 __ load_ptr( 1, rax); // load c
1132 1161 __ push_ptr(rax); // push c
1133 1162 __ push_ptr(rcx); // push d
1134 1163 // stack: ..., a, b, c, d, c, d
1135 1164 __ load_ptr( 4, rax); // load b
1136 1165 __ store_ptr(2, rax); // store b in d
1137 1166 __ store_ptr(4, rcx); // store d in b
1138 1167 // stack: ..., a, d, c, b, c, d
1139 1168 __ load_ptr( 5, rcx); // load a
1140 1169 __ load_ptr( 3, rax); // load c
1141 1170 __ store_ptr(3, rcx); // store a in c
1142 1171 __ store_ptr(5, rax); // store c in a
1143 1172 // stack: ..., c, d, a, b, c, d
1144 1173 // stack: ..., c, d, a, b, c, d
1145 1174 }
1146 1175
1147 1176
1148 1177 void TemplateTable::swap() {
1149 1178 transition(vtos, vtos);
1150 1179 // stack: ..., a, b
1151 1180 __ load_ptr( 1, rcx); // load a
1152 1181 __ load_ptr( 0, rax); // load b
1153 1182 __ store_ptr(0, rcx); // store a in b
1154 1183 __ store_ptr(1, rax); // store b in a
1155 1184 // stack: ..., b, a
1156 1185 }
1157 1186
1158 1187
1159 1188 void TemplateTable::iop2(Operation op) {
1160 1189 transition(itos, itos);
1161 1190 switch (op) {
1162 1191 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1163 1192 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1164 1193 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1165 1194 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1166 1195 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1167 1196 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1168 1197 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1169 1198 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1170 1199 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1171 1200 default : ShouldNotReachHere();
1172 1201 }
1173 1202 }
1174 1203
1175 1204
1176 1205 void TemplateTable::lop2(Operation op) {
1177 1206 transition(ltos, ltos);
1178 1207 __ pop_l(rbx, rcx);
1179 1208 switch (op) {
1180 1209 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1181 1210 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1182 1211 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1183 1212 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1184 1213 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1185 1214 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1186 1215 default : ShouldNotReachHere();
1187 1216 }
1188 1217 }
1189 1218
1190 1219
1191 1220 void TemplateTable::idiv() {
1192 1221 transition(itos, itos);
1193 1222 __ mov(rcx, rax);
1194 1223 __ pop_i(rax);
1195 1224 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1196 1225 // they are not equal, one could do a normal division (no correction
1197 1226 // needed), which may speed up this implementation for the common case.
1198 1227 // (see also JVM spec., p.243 & p.271)
1199 1228 __ corrected_idivl(rcx);
1200 1229 }
1201 1230
1202 1231
1203 1232 void TemplateTable::irem() {
1204 1233 transition(itos, itos);
1205 1234 __ mov(rcx, rax);
1206 1235 __ pop_i(rax);
1207 1236 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1208 1237 // they are not equal, one could do a normal division (no correction
1209 1238 // needed), which may speed up this implementation for the common case.
1210 1239 // (see also JVM spec., p.243 & p.271)
1211 1240 __ corrected_idivl(rcx);
1212 1241 __ mov(rax, rdx);
1213 1242 }
1214 1243
1215 1244
1216 1245 void TemplateTable::lmul() {
1217 1246 transition(ltos, ltos);
1218 1247 __ pop_l(rbx, rcx);
1219 1248 __ push(rcx); __ push(rbx);
1220 1249 __ push(rdx); __ push(rax);
1221 1250 __ lmul(2 * wordSize, 0);
1222 1251 __ addptr(rsp, 4 * wordSize); // take off temporaries
1223 1252 }
1224 1253
1225 1254
1226 1255 void TemplateTable::ldiv() {
1227 1256 transition(ltos, ltos);
1228 1257 __ pop_l(rbx, rcx);
1229 1258 __ push(rcx); __ push(rbx);
1230 1259 __ push(rdx); __ push(rax);
1231 1260 // check if y = 0
1232 1261 __ orl(rax, rdx);
1233 1262 __ jump_cc(Assembler::zero,
1234 1263 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1235 1264 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1236 1265 __ addptr(rsp, 4 * wordSize); // take off temporaries
1237 1266 }
1238 1267
1239 1268
1240 1269 void TemplateTable::lrem() {
1241 1270 transition(ltos, ltos);
1242 1271 __ pop_l(rbx, rcx);
1243 1272 __ push(rcx); __ push(rbx);
1244 1273 __ push(rdx); __ push(rax);
1245 1274 // check if y = 0
1246 1275 __ orl(rax, rdx);
1247 1276 __ jump_cc(Assembler::zero,
1248 1277 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1249 1278 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1250 1279 __ addptr(rsp, 4 * wordSize);
1251 1280 }
1252 1281
1253 1282
1254 1283 void TemplateTable::lshl() {
1255 1284 transition(itos, ltos);
1256 1285 __ movl(rcx, rax); // get shift count
1257 1286 __ pop_l(rax, rdx); // get shift value
1258 1287 __ lshl(rdx, rax);
1259 1288 }
1260 1289
1261 1290
1262 1291 void TemplateTable::lshr() {
1263 1292 transition(itos, ltos);
1264 1293 __ mov(rcx, rax); // get shift count
1265 1294 __ pop_l(rax, rdx); // get shift value
1266 1295 __ lshr(rdx, rax, true);
1267 1296 }
1268 1297
1269 1298
1270 1299 void TemplateTable::lushr() {
1271 1300 transition(itos, ltos);
1272 1301 __ mov(rcx, rax); // get shift count
1273 1302 __ pop_l(rax, rdx); // get shift value
1274 1303 __ lshr(rdx, rax);
1275 1304 }
1276 1305
1277 1306
1278 1307 void TemplateTable::fop2(Operation op) {
1279 1308 transition(ftos, ftos);
1280 1309 switch (op) {
1281 1310 case add: __ fadd_s (at_rsp()); break;
1282 1311 case sub: __ fsubr_s(at_rsp()); break;
1283 1312 case mul: __ fmul_s (at_rsp()); break;
1284 1313 case div: __ fdivr_s(at_rsp()); break;
1285 1314 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1286 1315 default : ShouldNotReachHere();
1287 1316 }
1288 1317 __ f2ieee();
1289 1318 __ pop(rax); // pop float thing off
1290 1319 }
1291 1320
1292 1321
1293 1322 void TemplateTable::dop2(Operation op) {
1294 1323 transition(dtos, dtos);
1295 1324
1296 1325 switch (op) {
1297 1326 case add: __ fadd_d (at_rsp()); break;
1298 1327 case sub: __ fsubr_d(at_rsp()); break;
1299 1328 case mul: {
1300 1329 Label L_strict;
1301 1330 Label L_join;
1302 1331 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1303 1332 __ get_method(rcx);
1304 1333 __ movl(rcx, access_flags);
1305 1334 __ testl(rcx, JVM_ACC_STRICT);
1306 1335 __ jccb(Assembler::notZero, L_strict);
1307 1336 __ fmul_d (at_rsp());
1308 1337 __ jmpb(L_join);
1309 1338 __ bind(L_strict);
1310 1339 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1311 1340 __ fmulp();
1312 1341 __ fmul_d (at_rsp());
1313 1342 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1314 1343 __ fmulp();
1315 1344 __ bind(L_join);
1316 1345 break;
1317 1346 }
1318 1347 case div: {
1319 1348 Label L_strict;
1320 1349 Label L_join;
1321 1350 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
1322 1351 __ get_method(rcx);
1323 1352 __ movl(rcx, access_flags);
1324 1353 __ testl(rcx, JVM_ACC_STRICT);
1325 1354 __ jccb(Assembler::notZero, L_strict);
1326 1355 __ fdivr_d(at_rsp());
1327 1356 __ jmp(L_join);
1328 1357 __ bind(L_strict);
1329 1358 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1330 1359 __ fmul_d (at_rsp());
1331 1360 __ fdivrp();
1332 1361 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1333 1362 __ fmulp();
1334 1363 __ bind(L_join);
1335 1364 break;
1336 1365 }
1337 1366 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1338 1367 default : ShouldNotReachHere();
1339 1368 }
1340 1369 __ d2ieee();
1341 1370 // Pop double precision number from rsp.
1342 1371 __ pop(rax);
1343 1372 __ pop(rdx);
1344 1373 }
1345 1374
1346 1375
1347 1376 void TemplateTable::ineg() {
1348 1377 transition(itos, itos);
1349 1378 __ negl(rax);
1350 1379 }
1351 1380
1352 1381
1353 1382 void TemplateTable::lneg() {
1354 1383 transition(ltos, ltos);
1355 1384 __ lneg(rdx, rax);
1356 1385 }
1357 1386
1358 1387
1359 1388 void TemplateTable::fneg() {
1360 1389 transition(ftos, ftos);
1361 1390 __ fchs();
1362 1391 }
1363 1392
1364 1393
1365 1394 void TemplateTable::dneg() {
1366 1395 transition(dtos, dtos);
1367 1396 __ fchs();
1368 1397 }
1369 1398
1370 1399
1371 1400 void TemplateTable::iinc() {
1372 1401 transition(vtos, vtos);
1373 1402 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1374 1403 locals_index(rbx);
1375 1404 __ addl(iaddress(rbx), rdx);
1376 1405 }
1377 1406
1378 1407
1379 1408 void TemplateTable::wide_iinc() {
1380 1409 transition(vtos, vtos);
1381 1410 __ movl(rdx, at_bcp(4)); // get constant
1382 1411 locals_index_wide(rbx);
1383 1412 __ bswapl(rdx); // swap bytes & sign-extend constant
1384 1413 __ sarl(rdx, 16);
1385 1414 __ addl(iaddress(rbx), rdx);
1386 1415 // Note: should probably use only one movl to get both
1387 1416 // the index and the constant -> fix this
1388 1417 }
1389 1418
1390 1419
1391 1420 void TemplateTable::convert() {
1392 1421 // Checking
1393 1422 #ifdef ASSERT
1394 1423 { TosState tos_in = ilgl;
1395 1424 TosState tos_out = ilgl;
1396 1425 switch (bytecode()) {
1397 1426 case Bytecodes::_i2l: // fall through
1398 1427 case Bytecodes::_i2f: // fall through
1399 1428 case Bytecodes::_i2d: // fall through
1400 1429 case Bytecodes::_i2b: // fall through
1401 1430 case Bytecodes::_i2c: // fall through
1402 1431 case Bytecodes::_i2s: tos_in = itos; break;
1403 1432 case Bytecodes::_l2i: // fall through
1404 1433 case Bytecodes::_l2f: // fall through
1405 1434 case Bytecodes::_l2d: tos_in = ltos; break;
1406 1435 case Bytecodes::_f2i: // fall through
1407 1436 case Bytecodes::_f2l: // fall through
1408 1437 case Bytecodes::_f2d: tos_in = ftos; break;
1409 1438 case Bytecodes::_d2i: // fall through
1410 1439 case Bytecodes::_d2l: // fall through
1411 1440 case Bytecodes::_d2f: tos_in = dtos; break;
1412 1441 default : ShouldNotReachHere();
1413 1442 }
1414 1443 switch (bytecode()) {
1415 1444 case Bytecodes::_l2i: // fall through
1416 1445 case Bytecodes::_f2i: // fall through
1417 1446 case Bytecodes::_d2i: // fall through
1418 1447 case Bytecodes::_i2b: // fall through
1419 1448 case Bytecodes::_i2c: // fall through
1420 1449 case Bytecodes::_i2s: tos_out = itos; break;
1421 1450 case Bytecodes::_i2l: // fall through
1422 1451 case Bytecodes::_f2l: // fall through
1423 1452 case Bytecodes::_d2l: tos_out = ltos; break;
1424 1453 case Bytecodes::_i2f: // fall through
1425 1454 case Bytecodes::_l2f: // fall through
1426 1455 case Bytecodes::_d2f: tos_out = ftos; break;
1427 1456 case Bytecodes::_i2d: // fall through
1428 1457 case Bytecodes::_l2d: // fall through
1429 1458 case Bytecodes::_f2d: tos_out = dtos; break;
1430 1459 default : ShouldNotReachHere();
1431 1460 }
1432 1461 transition(tos_in, tos_out);
1433 1462 }
1434 1463 #endif // ASSERT
1435 1464
1436 1465 // Conversion
1437 1466 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1438 1467 switch (bytecode()) {
1439 1468 case Bytecodes::_i2l:
1440 1469 __ extend_sign(rdx, rax);
1441 1470 break;
1442 1471 case Bytecodes::_i2f:
1443 1472 __ push(rax); // store int on tos
1444 1473 __ fild_s(at_rsp()); // load int to ST0
1445 1474 __ f2ieee(); // truncate to float size
1446 1475 __ pop(rcx); // adjust rsp
1447 1476 break;
1448 1477 case Bytecodes::_i2d:
1449 1478 __ push(rax); // add one slot for d2ieee()
1450 1479 __ push(rax); // store int on tos
1451 1480 __ fild_s(at_rsp()); // load int to ST0
1452 1481 __ d2ieee(); // truncate to double size
1453 1482 __ pop(rcx); // adjust rsp
1454 1483 __ pop(rcx);
1455 1484 break;
1456 1485 case Bytecodes::_i2b:
1457 1486 __ shll(rax, 24); // truncate upper 24 bits
1458 1487 __ sarl(rax, 24); // and sign-extend byte
1459 1488 LP64_ONLY(__ movsbl(rax, rax));
1460 1489 break;
1461 1490 case Bytecodes::_i2c:
1462 1491 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1463 1492 LP64_ONLY(__ movzwl(rax, rax));
1464 1493 break;
1465 1494 case Bytecodes::_i2s:
1466 1495 __ shll(rax, 16); // truncate upper 16 bits
1467 1496 __ sarl(rax, 16); // and sign-extend short
1468 1497 LP64_ONLY(__ movswl(rax, rax));
1469 1498 break;
1470 1499 case Bytecodes::_l2i:
1471 1500 /* nothing to do */
1472 1501 break;
1473 1502 case Bytecodes::_l2f:
1474 1503 __ push(rdx); // store long on tos
1475 1504 __ push(rax);
1476 1505 __ fild_d(at_rsp()); // load long to ST0
1477 1506 __ f2ieee(); // truncate to float size
1478 1507 __ pop(rcx); // adjust rsp
1479 1508 __ pop(rcx);
1480 1509 break;
1481 1510 case Bytecodes::_l2d:
1482 1511 __ push(rdx); // store long on tos
1483 1512 __ push(rax);
1484 1513 __ fild_d(at_rsp()); // load long to ST0
1485 1514 __ d2ieee(); // truncate to double size
1486 1515 __ pop(rcx); // adjust rsp
1487 1516 __ pop(rcx);
1488 1517 break;
1489 1518 case Bytecodes::_f2i:
1490 1519 __ push(rcx); // reserve space for argument
1491 1520 __ fstp_s(at_rsp()); // pass float argument on stack
1492 1521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1493 1522 break;
1494 1523 case Bytecodes::_f2l:
1495 1524 __ push(rcx); // reserve space for argument
1496 1525 __ fstp_s(at_rsp()); // pass float argument on stack
1497 1526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1498 1527 break;
1499 1528 case Bytecodes::_f2d:
1500 1529 /* nothing to do */
1501 1530 break;
1502 1531 case Bytecodes::_d2i:
1503 1532 __ push(rcx); // reserve space for argument
1504 1533 __ push(rcx);
1505 1534 __ fstp_d(at_rsp()); // pass double argument on stack
1506 1535 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1507 1536 break;
1508 1537 case Bytecodes::_d2l:
1509 1538 __ push(rcx); // reserve space for argument
1510 1539 __ push(rcx);
1511 1540 __ fstp_d(at_rsp()); // pass double argument on stack
1512 1541 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1513 1542 break;
1514 1543 case Bytecodes::_d2f:
1515 1544 __ push(rcx); // reserve space for f2ieee()
1516 1545 __ f2ieee(); // truncate to float size
1517 1546 __ pop(rcx); // adjust rsp
1518 1547 break;
1519 1548 default :
1520 1549 ShouldNotReachHere();
1521 1550 }
1522 1551 }
1523 1552
1524 1553
1525 1554 void TemplateTable::lcmp() {
1526 1555 transition(ltos, itos);
1527 1556 // y = rdx:rax
1528 1557 __ pop_l(rbx, rcx); // get x = rcx:rbx
1529 1558 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1530 1559 __ mov(rax, rcx);
1531 1560 }
1532 1561
1533 1562
1534 1563 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1535 1564 if (is_float) {
1536 1565 __ fld_s(at_rsp());
1537 1566 } else {
1538 1567 __ fld_d(at_rsp());
1539 1568 __ pop(rdx);
1540 1569 }
1541 1570 __ pop(rcx);
1542 1571 __ fcmp2int(rax, unordered_result < 0);
1543 1572 }
1544 1573
1545 1574
1546 1575 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1547 1576 __ get_method(rcx); // ECX holds method
1548 1577 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1549 1578
1550 1579 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
1551 1580 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
1552 1581 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
1553 1582
1554 1583 // Load up EDX with the branch displacement
1555 1584 __ movl(rdx, at_bcp(1));
1556 1585 __ bswapl(rdx);
1557 1586 if (!is_wide) __ sarl(rdx, 16);
1558 1587 LP64_ONLY(__ movslq(rdx, rdx));
1559 1588
1560 1589
1561 1590 // Handle all the JSR stuff here, then exit.
1562 1591 // It's much shorter and cleaner than intermingling with the
1563 1592 // non-JSR normal-branch stuff occurring below.
1564 1593 if (is_jsr) {
1565 1594 // Pre-load the next target bytecode into EBX
1566 1595 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1567 1596
1568 1597 // compute return address as bci in rax,
1569 1598 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
1570 1599 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
1571 1600 // Adjust the bcp in RSI by the displacement in EDX
1572 1601 __ addptr(rsi, rdx);
1573 1602 // Push return address
1574 1603 __ push_i(rax);
1575 1604 // jsr returns vtos
1576 1605 __ dispatch_only_noverify(vtos);
1577 1606 return;
1578 1607 }
1579 1608
1580 1609 // Normal (non-jsr) branch handling
1581 1610
1582 1611 // Adjust the bcp in RSI by the displacement in EDX
1583 1612 __ addptr(rsi, rdx);
1584 1613
1585 1614 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1586 1615 Label backedge_counter_overflow;
1587 1616 Label profile_method;
1588 1617 Label dispatch;
1589 1618 if (UseLoopCounter) {
1590 1619 // increment backedge counter for backward branches
1591 1620 // rax,: MDO
1592 1621 // rbx,: MDO bumped taken-count
1593 1622 // rcx: method
1594 1623 // rdx: target offset
1595 1624 // rsi: target bcp
1596 1625 // rdi: locals pointer
1597 1626 __ testl(rdx, rdx); // check if forward or backward branch
1598 1627 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1599 1628
1600 1629 if (TieredCompilation) {
1601 1630 Label no_mdo;
1602 1631 int increment = InvocationCounter::count_increment;
1603 1632 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1604 1633 if (ProfileInterpreter) {
1605 1634 // Are we profiling?
1606 1635 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
1607 1636 __ testptr(rbx, rbx);
1608 1637 __ jccb(Assembler::zero, no_mdo);
1609 1638 // Increment the MDO backedge counter
1610 1639 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1611 1640 in_bytes(InvocationCounter::counter_offset()));
1612 1641 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1613 1642 rax, false, Assembler::zero, &backedge_counter_overflow);
1614 1643 __ jmp(dispatch);
1615 1644 }
1616 1645 __ bind(no_mdo);
1617 1646 // Increment backedge counter in methodOop
1618 1647 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1619 1648 rax, false, Assembler::zero, &backedge_counter_overflow);
1620 1649 } else {
1621 1650 // increment counter
1622 1651 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1623 1652 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1624 1653 __ movl(Address(rcx, be_offset), rax); // store counter
1625 1654
1626 1655 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1627 1656 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1628 1657 __ addl(rax, Address(rcx, be_offset)); // add both counters
1629 1658
1630 1659 if (ProfileInterpreter) {
1631 1660 // Test to see if we should create a method data oop
1632 1661 __ cmp32(rax,
1633 1662 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1634 1663 __ jcc(Assembler::less, dispatch);
1635 1664
1636 1665 // if no method data exists, go to profile method
1637 1666 __ test_method_data_pointer(rax, profile_method);
1638 1667
1639 1668 if (UseOnStackReplacement) {
1640 1669 // check for overflow against rbx, which is the MDO taken count
1641 1670 __ cmp32(rbx,
1642 1671 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1643 1672 __ jcc(Assembler::below, dispatch);
1644 1673
1645 1674 // When ProfileInterpreter is on, the backedge_count comes from the
1646 1675 // methodDataOop, which value does not get reset on the call to
1647 1676 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1648 1677 // routine while the method is being compiled, add a second test to make
1649 1678 // sure the overflow function is called only once every overflow_frequency.
1650 1679 const int overflow_frequency = 1024;
1651 1680 __ andptr(rbx, overflow_frequency-1);
1652 1681 __ jcc(Assembler::zero, backedge_counter_overflow);
1653 1682 }
1654 1683 } else {
1655 1684 if (UseOnStackReplacement) {
1656 1685 // check for overflow against rax, which is the sum of the counters
1657 1686 __ cmp32(rax,
1658 1687 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1659 1688 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1660 1689
1661 1690 }
1662 1691 }
1663 1692 }
1664 1693 __ bind(dispatch);
1665 1694 }
1666 1695
1667 1696 // Pre-load the next target bytecode into EBX
1668 1697 __ load_unsigned_byte(rbx, Address(rsi, 0));
1669 1698
1670 1699 // continue with the bytecode @ target
1671 1700 // rax,: return bci for jsr's, unused otherwise
1672 1701 // rbx,: target bytecode
1673 1702 // rsi: target bcp
1674 1703 __ dispatch_only(vtos);
1675 1704
1676 1705 if (UseLoopCounter) {
1677 1706 if (ProfileInterpreter) {
1678 1707 // Out-of-line code to allocate method data oop.
1679 1708 __ bind(profile_method);
1680 1709 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1681 1710 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1682 1711 __ set_method_data_pointer_for_bcp();
1683 1712 __ jmp(dispatch);
1684 1713 }
1685 1714
1686 1715 if (UseOnStackReplacement) {
1687 1716
1688 1717 // invocation counter overflow
1689 1718 __ bind(backedge_counter_overflow);
1690 1719 __ negptr(rdx);
1691 1720 __ addptr(rdx, rsi); // branch bcp
1692 1721 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1693 1722 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1694 1723
1695 1724 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1696 1725 // rbx,: target bytecode
1697 1726 // rdx: scratch
1698 1727 // rdi: locals pointer
1699 1728 // rsi: bcp
1700 1729 __ testptr(rax, rax); // test result
1701 1730 __ jcc(Assembler::zero, dispatch); // no osr if null
1702 1731 // nmethod may have been invalidated (VM may block upon call_VM return)
1703 1732 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1704 1733 __ cmpl(rcx, InvalidOSREntryBci);
1705 1734 __ jcc(Assembler::equal, dispatch);
1706 1735
1707 1736 // We have the address of an on stack replacement routine in rax,
1708 1737 // We need to prepare to execute the OSR method. First we must
1709 1738 // migrate the locals and monitors off of the stack.
1710 1739
1711 1740 __ mov(rbx, rax); // save the nmethod
1712 1741
1713 1742 const Register thread = rcx;
1714 1743 __ get_thread(thread);
1715 1744 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1716 1745 // rax, is OSR buffer, move it to expected parameter location
1717 1746 __ mov(rcx, rax);
1718 1747
1719 1748 // pop the interpreter frame
1720 1749 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1721 1750 __ leave(); // remove frame anchor
1722 1751 __ pop(rdi); // get return address
1723 1752 __ mov(rsp, rdx); // set sp to sender sp
1724 1753
1725 1754 // Align stack pointer for compiled code (note that caller is
1726 1755 // responsible for undoing this fixup by remembering the old SP
1727 1756 // in an rbp,-relative location)
1728 1757 __ andptr(rsp, -(StackAlignmentInBytes));
1729 1758
1730 1759 // push the (possibly adjusted) return address
1731 1760 __ push(rdi);
1732 1761
1733 1762 // and begin the OSR nmethod
1734 1763 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1735 1764 }
1736 1765 }
1737 1766 }
1738 1767
1739 1768
1740 1769 void TemplateTable::if_0cmp(Condition cc) {
1741 1770 transition(itos, vtos);
1742 1771 // assume branch is more often taken than not (loops use backward branches)
1743 1772 Label not_taken;
1744 1773 __ testl(rax, rax);
1745 1774 __ jcc(j_not(cc), not_taken);
1746 1775 branch(false, false);
1747 1776 __ bind(not_taken);
1748 1777 __ profile_not_taken_branch(rax);
1749 1778 }
1750 1779
1751 1780
1752 1781 void TemplateTable::if_icmp(Condition cc) {
1753 1782 transition(itos, vtos);
1754 1783 // assume branch is more often taken than not (loops use backward branches)
1755 1784 Label not_taken;
1756 1785 __ pop_i(rdx);
1757 1786 __ cmpl(rdx, rax);
1758 1787 __ jcc(j_not(cc), not_taken);
1759 1788 branch(false, false);
1760 1789 __ bind(not_taken);
1761 1790 __ profile_not_taken_branch(rax);
1762 1791 }
1763 1792
1764 1793
1765 1794 void TemplateTable::if_nullcmp(Condition cc) {
1766 1795 transition(atos, vtos);
1767 1796 // assume branch is more often taken than not (loops use backward branches)
1768 1797 Label not_taken;
1769 1798 __ testptr(rax, rax);
1770 1799 __ jcc(j_not(cc), not_taken);
1771 1800 branch(false, false);
1772 1801 __ bind(not_taken);
1773 1802 __ profile_not_taken_branch(rax);
1774 1803 }
1775 1804
1776 1805
1777 1806 void TemplateTable::if_acmp(Condition cc) {
1778 1807 transition(atos, vtos);
1779 1808 // assume branch is more often taken than not (loops use backward branches)
1780 1809 Label not_taken;
1781 1810 __ pop_ptr(rdx);
1782 1811 __ cmpptr(rdx, rax);
1783 1812 __ jcc(j_not(cc), not_taken);
1784 1813 branch(false, false);
1785 1814 __ bind(not_taken);
1786 1815 __ profile_not_taken_branch(rax);
1787 1816 }
1788 1817
1789 1818
1790 1819 void TemplateTable::ret() {
1791 1820 transition(vtos, vtos);
1792 1821 locals_index(rbx);
1793 1822 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1794 1823 __ profile_ret(rbx, rcx);
1795 1824 __ get_method(rax);
1796 1825 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1797 1826 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1798 1827 constMethodOopDesc::codes_offset()));
1799 1828 __ dispatch_next(vtos);
1800 1829 }
1801 1830
1802 1831
1803 1832 void TemplateTable::wide_ret() {
1804 1833 transition(vtos, vtos);
1805 1834 locals_index_wide(rbx);
1806 1835 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1807 1836 __ profile_ret(rbx, rcx);
1808 1837 __ get_method(rax);
1809 1838 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
1810 1839 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
1811 1840 __ dispatch_next(vtos);
1812 1841 }
1813 1842
1814 1843
1815 1844 void TemplateTable::tableswitch() {
1816 1845 Label default_case, continue_execution;
1817 1846 transition(itos, vtos);
1818 1847 // align rsi
1819 1848 __ lea(rbx, at_bcp(wordSize));
1820 1849 __ andptr(rbx, -wordSize);
1821 1850 // load lo & hi
1822 1851 __ movl(rcx, Address(rbx, 1 * wordSize));
1823 1852 __ movl(rdx, Address(rbx, 2 * wordSize));
1824 1853 __ bswapl(rcx);
1825 1854 __ bswapl(rdx);
1826 1855 // check against lo & hi
1827 1856 __ cmpl(rax, rcx);
1828 1857 __ jccb(Assembler::less, default_case);
1829 1858 __ cmpl(rax, rdx);
1830 1859 __ jccb(Assembler::greater, default_case);
1831 1860 // lookup dispatch offset
1832 1861 __ subl(rax, rcx);
1833 1862 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1834 1863 __ profile_switch_case(rax, rbx, rcx);
1835 1864 // continue execution
1836 1865 __ bind(continue_execution);
1837 1866 __ bswapl(rdx);
1838 1867 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1839 1868 __ addptr(rsi, rdx);
1840 1869 __ dispatch_only(vtos);
1841 1870 // handle default
1842 1871 __ bind(default_case);
1843 1872 __ profile_switch_default(rax);
1844 1873 __ movl(rdx, Address(rbx, 0));
1845 1874 __ jmp(continue_execution);
1846 1875 }
1847 1876
1848 1877
1849 1878 void TemplateTable::lookupswitch() {
1850 1879 transition(itos, itos);
1851 1880 __ stop("lookupswitch bytecode should have been rewritten");
1852 1881 }
1853 1882
1854 1883
1855 1884 void TemplateTable::fast_linearswitch() {
1856 1885 transition(itos, vtos);
1857 1886 Label loop_entry, loop, found, continue_execution;
1858 1887 // bswapl rax, so we can avoid bswapping the table entries
1859 1888 __ bswapl(rax);
1860 1889 // align rsi
1861 1890 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1862 1891 __ andptr(rbx, -wordSize);
1863 1892 // set counter
1864 1893 __ movl(rcx, Address(rbx, wordSize));
1865 1894 __ bswapl(rcx);
1866 1895 __ jmpb(loop_entry);
1867 1896 // table search
1868 1897 __ bind(loop);
1869 1898 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1870 1899 __ jccb(Assembler::equal, found);
1871 1900 __ bind(loop_entry);
1872 1901 __ decrementl(rcx);
1873 1902 __ jcc(Assembler::greaterEqual, loop);
1874 1903 // default case
1875 1904 __ profile_switch_default(rax);
1876 1905 __ movl(rdx, Address(rbx, 0));
1877 1906 __ jmpb(continue_execution);
1878 1907 // entry found -> get offset
1879 1908 __ bind(found);
1880 1909 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1881 1910 __ profile_switch_case(rcx, rax, rbx);
1882 1911 // continue execution
1883 1912 __ bind(continue_execution);
1884 1913 __ bswapl(rdx);
1885 1914 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1886 1915 __ addptr(rsi, rdx);
1887 1916 __ dispatch_only(vtos);
1888 1917 }
1889 1918
1890 1919
1891 1920 void TemplateTable::fast_binaryswitch() {
1892 1921 transition(itos, vtos);
1893 1922 // Implementation using the following core algorithm:
1894 1923 //
1895 1924 // int binary_search(int key, LookupswitchPair* array, int n) {
1896 1925 // // Binary search according to "Methodik des Programmierens" by
1897 1926 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1898 1927 // int i = 0;
1899 1928 // int j = n;
1900 1929 // while (i+1 < j) {
1901 1930 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1902 1931 // // with Q: for all i: 0 <= i < n: key < a[i]
1903 1932 // // where a stands for the array and assuming that the (inexisting)
1904 1933 // // element a[n] is infinitely big.
1905 1934 // int h = (i + j) >> 1;
1906 1935 // // i < h < j
1907 1936 // if (key < array[h].fast_match()) {
1908 1937 // j = h;
1909 1938 // } else {
1910 1939 // i = h;
1911 1940 // }
1912 1941 // }
1913 1942 // // R: a[i] <= key < a[i+1] or Q
1914 1943 // // (i.e., if key is within array, i is the correct index)
1915 1944 // return i;
1916 1945 // }
1917 1946
1918 1947 // register allocation
1919 1948 const Register key = rax; // already set (tosca)
1920 1949 const Register array = rbx;
1921 1950 const Register i = rcx;
1922 1951 const Register j = rdx;
1923 1952 const Register h = rdi; // needs to be restored
1924 1953 const Register temp = rsi;
1925 1954 // setup array
1926 1955 __ save_bcp();
1927 1956
1928 1957 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1929 1958 __ andptr(array, -wordSize);
1930 1959 // initialize i & j
1931 1960 __ xorl(i, i); // i = 0;
1932 1961 __ movl(j, Address(array, -wordSize)); // j = length(array);
1933 1962 // Convert j into native byteordering
1934 1963 __ bswapl(j);
1935 1964 // and start
1936 1965 Label entry;
1937 1966 __ jmp(entry);
1938 1967
1939 1968 // binary search loop
1940 1969 { Label loop;
1941 1970 __ bind(loop);
1942 1971 // int h = (i + j) >> 1;
1943 1972 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1944 1973 __ sarl(h, 1); // h = (i + j) >> 1;
1945 1974 // if (key < array[h].fast_match()) {
1946 1975 // j = h;
1947 1976 // } else {
1948 1977 // i = h;
1949 1978 // }
1950 1979 // Convert array[h].match to native byte-ordering before compare
1951 1980 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1952 1981 __ bswapl(temp);
1953 1982 __ cmpl(key, temp);
1954 1983 // j = h if (key < array[h].fast_match())
1955 1984 __ cmov32(Assembler::less , j, h);
1956 1985 // i = h if (key >= array[h].fast_match())
1957 1986 __ cmov32(Assembler::greaterEqual, i, h);
1958 1987 // while (i+1 < j)
1959 1988 __ bind(entry);
1960 1989 __ leal(h, Address(i, 1)); // i+1
1961 1990 __ cmpl(h, j); // i+1 < j
1962 1991 __ jcc(Assembler::less, loop);
1963 1992 }
1964 1993
1965 1994 // end of binary search, result index is i (must check again!)
1966 1995 Label default_case;
1967 1996 // Convert array[i].match to native byte-ordering before compare
1968 1997 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1969 1998 __ bswapl(temp);
1970 1999 __ cmpl(key, temp);
1971 2000 __ jcc(Assembler::notEqual, default_case);
1972 2001
1973 2002 // entry found -> j = offset
1974 2003 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1975 2004 __ profile_switch_case(i, key, array);
1976 2005 __ bswapl(j);
1977 2006 LP64_ONLY(__ movslq(j, j));
1978 2007 __ restore_bcp();
1979 2008 __ restore_locals(); // restore rdi
1980 2009 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1981 2010
1982 2011 __ addptr(rsi, j);
1983 2012 __ dispatch_only(vtos);
1984 2013
1985 2014 // default case -> j = default offset
1986 2015 __ bind(default_case);
1987 2016 __ profile_switch_default(i);
1988 2017 __ movl(j, Address(array, -2*wordSize));
1989 2018 __ bswapl(j);
1990 2019 LP64_ONLY(__ movslq(j, j));
1991 2020 __ restore_bcp();
1992 2021 __ restore_locals(); // restore rdi
1993 2022 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
1994 2023 __ addptr(rsi, j);
1995 2024 __ dispatch_only(vtos);
1996 2025 }
1997 2026
1998 2027
1999 2028 void TemplateTable::_return(TosState state) {
2000 2029 transition(state, state);
2001 2030 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2002 2031
2003 2032 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2004 2033 assert(state == vtos, "only valid state");
2005 2034 __ movptr(rax, aaddress(0));
2006 2035 __ load_klass(rdi, rax);
2007 2036 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
2008 2037 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2009 2038 Label skip_register_finalizer;
2010 2039 __ jcc(Assembler::zero, skip_register_finalizer);
2011 2040
2012 2041 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2013 2042
2014 2043 __ bind(skip_register_finalizer);
2015 2044 }
2016 2045
2017 2046 __ remove_activation(state, rsi);
2018 2047 __ jmp(rsi);
2019 2048 }
2020 2049
2021 2050
2022 2051 // ----------------------------------------------------------------------------
2023 2052 // Volatile variables demand their effects be made known to all CPU's in
2024 2053 // order. Store buffers on most chips allow reads & writes to reorder; the
2025 2054 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2026 2055 // memory barrier (i.e., it's not sufficient that the interpreter does not
2027 2056 // reorder volatile references, the hardware also must not reorder them).
2028 2057 //
2029 2058 // According to the new Java Memory Model (JMM):
2030 2059 // (1) All volatiles are serialized wrt to each other.
2031 2060 // ALSO reads & writes act as aquire & release, so:
2032 2061 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2033 2062 // the read float up to before the read. It's OK for non-volatile memory refs
2034 2063 // that happen before the volatile read to float down below it.
2035 2064 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2036 2065 // that happen BEFORE the write float down to after the write. It's OK for
2037 2066 // non-volatile memory refs that happen after the volatile write to float up
2038 2067 // before it.
2039 2068 //
2040 2069 // We only put in barriers around volatile refs (they are expensive), not
2041 2070 // _between_ memory refs (that would require us to track the flavor of the
2042 2071 // previous memory refs). Requirements (2) and (3) require some barriers
2043 2072 // before volatile stores and after volatile loads. These nearly cover
2044 2073 // requirement (1) but miss the volatile-store-volatile-load case. This final
2045 2074 // case is placed after volatile-stores although it could just as well go
2046 2075 // before volatile-loads.
2047 2076 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2048 2077 // Helper function to insert a is-volatile test and memory barrier
2049 2078 if( !os::is_MP() ) return; // Not needed on single CPU
2050 2079 __ membar(order_constraint);
2051 2080 }
2052 2081
↓ open down ↓ |
1799 lines elided |
↑ open up ↑ |
2053 2082 void TemplateTable::resolve_cache_and_index(int byte_no,
2054 2083 Register result,
2055 2084 Register Rcache,
2056 2085 Register index,
2057 2086 size_t index_size) {
2058 2087 Register temp = rbx;
2059 2088
2060 2089 assert_different_registers(result, Rcache, index, temp);
2061 2090
2062 2091 Label resolved;
2063 - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2064 2092 if (byte_no == f1_oop) {
2065 2093 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2066 2094 // This kind of CP cache entry does not need to match the flags byte, because
2067 2095 // there is a 1-1 relation between bytecode type and CP entry type.
2068 2096 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
2097 + __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2069 2098 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2070 2099 __ testptr(result, result);
2071 2100 __ jcc(Assembler::notEqual, resolved);
2072 2101 } else {
2073 2102 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2074 2103 assert(result == noreg, ""); //else change code for setting result
2075 - const int shift_count = (1 + byte_no)*BitsPerByte;
2076 - __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
2077 - __ shrl(temp, shift_count);
2078 - // have we resolved this bytecode?
2079 - __ andl(temp, 0xFF);
2080 - __ cmpl(temp, (int)bytecode());
2104 + __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2105 + __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2081 2106 __ jcc(Assembler::equal, resolved);
2082 2107 }
2083 2108
2084 2109 // resolve first time through
2085 2110 address entry;
2086 2111 switch (bytecode()) {
2087 2112 case Bytecodes::_getstatic : // fall through
2088 2113 case Bytecodes::_putstatic : // fall through
2089 2114 case Bytecodes::_getfield : // fall through
2090 2115 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2091 2116 case Bytecodes::_invokevirtual : // fall through
2092 2117 case Bytecodes::_invokespecial : // fall through
2093 2118 case Bytecodes::_invokestatic : // fall through
2094 2119 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2095 2120 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2096 2121 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2097 2122 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2098 2123 default : ShouldNotReachHere(); break;
2099 2124 }
2100 2125 __ movl(temp, (int)bytecode());
2101 2126 __ call_VM(noreg, entry, temp);
2102 2127 // Update registers with resolved info
2103 2128 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2104 2129 if (result != noreg)
2105 2130 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
2106 2131 __ bind(resolved);
2107 2132 }
2108 2133
2109 2134
2110 2135 // The cache and index registers must be set before call
2111 2136 void TemplateTable::load_field_cp_cache_entry(Register obj,
2112 2137 Register cache,
2113 2138 Register index,
2114 2139 Register off,
2115 2140 Register flags,
2116 2141 bool is_static = false) {
2117 2142 assert_different_registers(cache, index, flags, off);
2118 2143
2119 2144 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2120 2145 // Field offset
2121 2146 __ movptr(off, Address(cache, index, Address::times_ptr,
2122 2147 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2123 2148 // Flags
2124 2149 __ movl(flags, Address(cache, index, Address::times_ptr,
2125 2150 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2126 2151
2127 2152 // klass overwrite register
2128 2153 if (is_static) {
2129 2154 __ movptr(obj, Address(cache, index, Address::times_ptr,
2130 2155 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2131 2156 }
2132 2157 }
2133 2158
2134 2159 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2135 2160 Register method,
2136 2161 Register itable_index,
2137 2162 Register flags,
2138 2163 bool is_invokevirtual,
2139 2164 bool is_invokevfinal /*unused*/,
2140 2165 bool is_invokedynamic) {
2141 2166 // setup registers
2142 2167 const Register cache = rcx;
2143 2168 const Register index = rdx;
2144 2169 assert_different_registers(method, flags);
2145 2170 assert_different_registers(method, cache, index);
2146 2171 assert_different_registers(itable_index, flags);
2147 2172 assert_different_registers(itable_index, cache, index);
2148 2173 // determine constant pool cache field offsets
2149 2174 const int method_offset = in_bytes(
2150 2175 constantPoolCacheOopDesc::base_offset() +
2151 2176 (is_invokevirtual
2152 2177 ? ConstantPoolCacheEntry::f2_offset()
2153 2178 : ConstantPoolCacheEntry::f1_offset()
2154 2179 )
2155 2180 );
2156 2181 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2157 2182 ConstantPoolCacheEntry::flags_offset());
2158 2183 // access constant pool cache fields
2159 2184 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
2160 2185 ConstantPoolCacheEntry::f2_offset());
2161 2186
2162 2187 if (byte_no == f1_oop) {
2163 2188 // Resolved f1_oop goes directly into 'method' register.
2164 2189 assert(is_invokedynamic, "");
2165 2190 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
2166 2191 } else {
2167 2192 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2168 2193 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2169 2194 }
2170 2195 if (itable_index != noreg) {
2171 2196 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2172 2197 }
2173 2198 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2174 2199 }
2175 2200
2176 2201
2177 2202 // The registers cache and index expected to be set before call.
2178 2203 // Correct values of the cache and index registers are preserved.
2179 2204 void TemplateTable::jvmti_post_field_access(Register cache,
2180 2205 Register index,
2181 2206 bool is_static,
2182 2207 bool has_tos) {
2183 2208 if (JvmtiExport::can_post_field_access()) {
2184 2209 // Check to see if a field access watch has been set before we take
2185 2210 // the time to call into the VM.
2186 2211 Label L1;
2187 2212 assert_different_registers(cache, index, rax);
2188 2213 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2189 2214 __ testl(rax,rax);
2190 2215 __ jcc(Assembler::zero, L1);
2191 2216
2192 2217 // cache entry pointer
2193 2218 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
2194 2219 __ shll(index, LogBytesPerWord);
2195 2220 __ addptr(cache, index);
2196 2221 if (is_static) {
2197 2222 __ xorptr(rax, rax); // NULL object reference
2198 2223 } else {
2199 2224 __ pop(atos); // Get the object
2200 2225 __ verify_oop(rax);
2201 2226 __ push(atos); // Restore stack state
2202 2227 }
2203 2228 // rax,: object pointer or NULL
2204 2229 // cache: cache entry pointer
2205 2230 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2206 2231 rax, cache);
2207 2232 __ get_cache_and_index_at_bcp(cache, index, 1);
2208 2233 __ bind(L1);
2209 2234 }
2210 2235 }
2211 2236
2212 2237 void TemplateTable::pop_and_check_object(Register r) {
2213 2238 __ pop_ptr(r);
2214 2239 __ null_check(r); // for field access must check obj.
2215 2240 __ verify_oop(r);
2216 2241 }
2217 2242
2218 2243 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2219 2244 transition(vtos, vtos);
2220 2245
2221 2246 const Register cache = rcx;
2222 2247 const Register index = rdx;
2223 2248 const Register obj = rcx;
2224 2249 const Register off = rbx;
2225 2250 const Register flags = rax;
2226 2251
2227 2252 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2228 2253 jvmti_post_field_access(cache, index, is_static, false);
2229 2254 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2230 2255
2231 2256 if (!is_static) pop_and_check_object(obj);
2232 2257
2233 2258 const Address lo(obj, off, Address::times_1, 0*wordSize);
2234 2259 const Address hi(obj, off, Address::times_1, 1*wordSize);
2235 2260
2236 2261 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2237 2262
2238 2263 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2239 2264 assert(btos == 0, "change code, btos != 0");
2240 2265 // btos
2241 2266 __ andptr(flags, 0x0f);
2242 2267 __ jcc(Assembler::notZero, notByte);
2243 2268
2244 2269 __ load_signed_byte(rax, lo );
2245 2270 __ push(btos);
2246 2271 // Rewrite bytecode to be faster
2247 2272 if (!is_static) {
2248 2273 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2249 2274 }
2250 2275 __ jmp(Done);
2251 2276
2252 2277 __ bind(notByte);
2253 2278 // itos
2254 2279 __ cmpl(flags, itos );
2255 2280 __ jcc(Assembler::notEqual, notInt);
2256 2281
2257 2282 __ movl(rax, lo );
2258 2283 __ push(itos);
2259 2284 // Rewrite bytecode to be faster
2260 2285 if (!is_static) {
2261 2286 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2262 2287 }
2263 2288 __ jmp(Done);
2264 2289
2265 2290 __ bind(notInt);
2266 2291 // atos
2267 2292 __ cmpl(flags, atos );
2268 2293 __ jcc(Assembler::notEqual, notObj);
2269 2294
2270 2295 __ movl(rax, lo );
2271 2296 __ push(atos);
2272 2297 if (!is_static) {
2273 2298 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2274 2299 }
2275 2300 __ jmp(Done);
2276 2301
2277 2302 __ bind(notObj);
2278 2303 // ctos
2279 2304 __ cmpl(flags, ctos );
2280 2305 __ jcc(Assembler::notEqual, notChar);
2281 2306
2282 2307 __ load_unsigned_short(rax, lo );
2283 2308 __ push(ctos);
2284 2309 if (!is_static) {
2285 2310 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2286 2311 }
2287 2312 __ jmp(Done);
2288 2313
2289 2314 __ bind(notChar);
2290 2315 // stos
2291 2316 __ cmpl(flags, stos );
2292 2317 __ jcc(Assembler::notEqual, notShort);
2293 2318
2294 2319 __ load_signed_short(rax, lo );
2295 2320 __ push(stos);
2296 2321 if (!is_static) {
2297 2322 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2298 2323 }
2299 2324 __ jmp(Done);
2300 2325
2301 2326 __ bind(notShort);
2302 2327 // ltos
2303 2328 __ cmpl(flags, ltos );
2304 2329 __ jcc(Assembler::notEqual, notLong);
2305 2330
2306 2331 // Generate code as if volatile. There just aren't enough registers to
2307 2332 // save that information and this code is faster than the test.
2308 2333 __ fild_d(lo); // Must load atomically
2309 2334 __ subptr(rsp,2*wordSize); // Make space for store
2310 2335 __ fistp_d(Address(rsp,0));
2311 2336 __ pop(rax);
2312 2337 __ pop(rdx);
2313 2338
2314 2339 __ push(ltos);
2315 2340 // Don't rewrite to _fast_lgetfield for potential volatile case.
2316 2341 __ jmp(Done);
2317 2342
2318 2343 __ bind(notLong);
2319 2344 // ftos
2320 2345 __ cmpl(flags, ftos );
2321 2346 __ jcc(Assembler::notEqual, notFloat);
2322 2347
2323 2348 __ fld_s(lo);
2324 2349 __ push(ftos);
2325 2350 if (!is_static) {
2326 2351 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2327 2352 }
2328 2353 __ jmp(Done);
2329 2354
2330 2355 __ bind(notFloat);
2331 2356 // dtos
2332 2357 __ cmpl(flags, dtos );
2333 2358 __ jcc(Assembler::notEqual, notDouble);
2334 2359
2335 2360 __ fld_d(lo);
2336 2361 __ push(dtos);
2337 2362 if (!is_static) {
2338 2363 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2339 2364 }
2340 2365 __ jmpb(Done);
2341 2366
2342 2367 __ bind(notDouble);
2343 2368
2344 2369 __ stop("Bad state");
2345 2370
2346 2371 __ bind(Done);
2347 2372 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2348 2373 // volatile_barrier( );
2349 2374 }
2350 2375
2351 2376
2352 2377 void TemplateTable::getfield(int byte_no) {
2353 2378 getfield_or_static(byte_no, false);
2354 2379 }
2355 2380
2356 2381
2357 2382 void TemplateTable::getstatic(int byte_no) {
2358 2383 getfield_or_static(byte_no, true);
2359 2384 }
2360 2385
2361 2386 // The registers cache and index expected to be set before call.
2362 2387 // The function may destroy various registers, just not the cache and index registers.
2363 2388 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2364 2389
2365 2390 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2366 2391
2367 2392 if (JvmtiExport::can_post_field_modification()) {
2368 2393 // Check to see if a field modification watch has been set before we take
2369 2394 // the time to call into the VM.
2370 2395 Label L1;
2371 2396 assert_different_registers(cache, index, rax);
2372 2397 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2373 2398 __ testl(rax, rax);
2374 2399 __ jcc(Assembler::zero, L1);
2375 2400
2376 2401 // The cache and index registers have been already set.
2377 2402 // This allows to eliminate this call but the cache and index
2378 2403 // registers have to be correspondingly used after this line.
2379 2404 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2380 2405
2381 2406 if (is_static) {
2382 2407 // Life is simple. Null out the object pointer.
2383 2408 __ xorptr(rbx, rbx);
2384 2409 } else {
2385 2410 // Life is harder. The stack holds the value on top, followed by the object.
2386 2411 // We don't know the size of the value, though; it could be one or two words
2387 2412 // depending on its type. As a result, we must find the type to determine where
2388 2413 // the object is.
2389 2414 Label two_word, valsize_known;
2390 2415 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2391 2416 ConstantPoolCacheEntry::flags_offset())));
2392 2417 __ mov(rbx, rsp);
2393 2418 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
2394 2419 // Make sure we don't need to mask rcx for tosBits after the above shift
2395 2420 ConstantPoolCacheEntry::verify_tosBits();
2396 2421 __ cmpl(rcx, ltos);
2397 2422 __ jccb(Assembler::equal, two_word);
2398 2423 __ cmpl(rcx, dtos);
2399 2424 __ jccb(Assembler::equal, two_word);
2400 2425 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2401 2426 __ jmpb(valsize_known);
2402 2427
2403 2428 __ bind(two_word);
2404 2429 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2405 2430
2406 2431 __ bind(valsize_known);
2407 2432 // setup object pointer
2408 2433 __ movptr(rbx, Address(rbx, 0));
2409 2434 }
2410 2435 // cache entry pointer
2411 2436 __ addptr(rax, in_bytes(cp_base_offset));
2412 2437 __ shll(rdx, LogBytesPerWord);
2413 2438 __ addptr(rax, rdx);
2414 2439 // object (tos)
2415 2440 __ mov(rcx, rsp);
2416 2441 // rbx,: object pointer set up above (NULL if static)
2417 2442 // rax,: cache entry pointer
2418 2443 // rcx: jvalue object on the stack
2419 2444 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2420 2445 rbx, rax, rcx);
2421 2446 __ get_cache_and_index_at_bcp(cache, index, 1);
2422 2447 __ bind(L1);
2423 2448 }
2424 2449 }
2425 2450
2426 2451
2427 2452 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2428 2453 transition(vtos, vtos);
2429 2454
2430 2455 const Register cache = rcx;
2431 2456 const Register index = rdx;
2432 2457 const Register obj = rcx;
2433 2458 const Register off = rbx;
2434 2459 const Register flags = rax;
2435 2460
2436 2461 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
2437 2462 jvmti_post_field_mod(cache, index, is_static);
2438 2463 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2439 2464
2440 2465 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2441 2466 // volatile_barrier( );
2442 2467
2443 2468 Label notVolatile, Done;
2444 2469 __ movl(rdx, flags);
2445 2470 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
↓ open down ↓ |
355 lines elided |
↑ open up ↑ |
2446 2471 __ andl(rdx, 0x1);
2447 2472
2448 2473 // field addresses
2449 2474 const Address lo(obj, off, Address::times_1, 0*wordSize);
2450 2475 const Address hi(obj, off, Address::times_1, 1*wordSize);
2451 2476
2452 2477 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2453 2478
2454 2479 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2455 2480 assert(btos == 0, "change code, btos != 0");
2456 - // btos
2457 2481 __ andl(flags, 0x0f);
2458 2482 __ jcc(Assembler::notZero, notByte);
2459 2483
2460 - __ pop(btos);
2461 - if (!is_static) pop_and_check_object(obj);
2462 - __ movb(lo, rax );
2463 - if (!is_static) {
2464 - patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
2484 + // btos
2485 + {
2486 + __ pop(btos);
2487 + if (!is_static) pop_and_check_object(obj);
2488 + __ movb(lo, rax);
2489 + if (!is_static) {
2490 + patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2491 + }
2492 + __ jmp(Done);
2465 2493 }
2466 - __ jmp(Done);
2467 2494
2468 2495 __ bind(notByte);
2469 - // itos
2470 - __ cmpl(flags, itos );
2496 + __ cmpl(flags, itos);
2471 2497 __ jcc(Assembler::notEqual, notInt);
2472 2498
2473 - __ pop(itos);
2474 - if (!is_static) pop_and_check_object(obj);
2475 -
2476 - __ movl(lo, rax );
2477 - if (!is_static) {
2478 - patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
2499 + // itos
2500 + {
2501 + __ pop(itos);
2502 + if (!is_static) pop_and_check_object(obj);
2503 + __ movl(lo, rax);
2504 + if (!is_static) {
2505 + patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2506 + }
2507 + __ jmp(Done);
2479 2508 }
2480 - __ jmp(Done);
2481 2509
2482 2510 __ bind(notInt);
2483 - // atos
2484 - __ cmpl(flags, atos );
2511 + __ cmpl(flags, atos);
2485 2512 __ jcc(Assembler::notEqual, notObj);
2486 2513
2487 - __ pop(atos);
2488 - if (!is_static) pop_and_check_object(obj);
2489 -
2490 - do_oop_store(_masm, lo, rax, _bs->kind(), false);
2491 -
2492 - if (!is_static) {
2493 - patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
2514 + // atos
2515 + {
2516 + __ pop(atos);
2517 + if (!is_static) pop_and_check_object(obj);
2518 + do_oop_store(_masm, lo, rax, _bs->kind(), false);
2519 + if (!is_static) {
2520 + patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2521 + }
2522 + __ jmp(Done);
2494 2523 }
2495 2524
2496 - __ jmp(Done);
2497 -
2498 2525 __ bind(notObj);
2499 - // ctos
2500 - __ cmpl(flags, ctos );
2526 + __ cmpl(flags, ctos);
2501 2527 __ jcc(Assembler::notEqual, notChar);
2502 2528
2503 - __ pop(ctos);
2504 - if (!is_static) pop_and_check_object(obj);
2505 - __ movw(lo, rax );
2506 - if (!is_static) {
2507 - patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
2529 + // ctos
2530 + {
2531 + __ pop(ctos);
2532 + if (!is_static) pop_and_check_object(obj);
2533 + __ movw(lo, rax);
2534 + if (!is_static) {
2535 + patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2536 + }
2537 + __ jmp(Done);
2508 2538 }
2509 - __ jmp(Done);
2510 2539
2511 2540 __ bind(notChar);
2512 - // stos
2513 - __ cmpl(flags, stos );
2541 + __ cmpl(flags, stos);
2514 2542 __ jcc(Assembler::notEqual, notShort);
2515 2543
2516 - __ pop(stos);
2517 - if (!is_static) pop_and_check_object(obj);
2518 - __ movw(lo, rax );
2519 - if (!is_static) {
2520 - patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
2544 + // stos
2545 + {
2546 + __ pop(stos);
2547 + if (!is_static) pop_and_check_object(obj);
2548 + __ movw(lo, rax);
2549 + if (!is_static) {
2550 + patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2551 + }
2552 + __ jmp(Done);
2521 2553 }
2522 - __ jmp(Done);
2523 2554
2524 2555 __ bind(notShort);
2525 - // ltos
2526 - __ cmpl(flags, ltos );
2556 + __ cmpl(flags, ltos);
2527 2557 __ jcc(Assembler::notEqual, notLong);
2528 2558
2529 - Label notVolatileLong;
2530 - __ testl(rdx, rdx);
2531 - __ jcc(Assembler::zero, notVolatileLong);
2532 -
2533 - __ pop(ltos); // overwrites rdx, do this after testing volatile.
2534 - if (!is_static) pop_and_check_object(obj);
2535 -
2536 - // Replace with real volatile test
2537 - __ push(rdx);
2538 - __ push(rax); // Must update atomically with FIST
2539 - __ fild_d(Address(rsp,0)); // So load into FPU register
2540 - __ fistp_d(lo); // and put into memory atomically
2541 - __ addptr(rsp, 2*wordSize);
2542 - // volatile_barrier();
2543 - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2544 - Assembler::StoreStore));
2545 - // Don't rewrite volatile version
2546 - __ jmp(notVolatile);
2547 -
2548 - __ bind(notVolatileLong);
2549 -
2550 - __ pop(ltos); // overwrites rdx
2551 - if (!is_static) pop_and_check_object(obj);
2552 - NOT_LP64(__ movptr(hi, rdx));
2553 - __ movptr(lo, rax);
2554 - if (!is_static) {
2555 - patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
2559 + // ltos
2560 + {
2561 + Label notVolatileLong;
2562 + __ testl(rdx, rdx);
2563 + __ jcc(Assembler::zero, notVolatileLong);
2564 +
2565 + __ pop(ltos); // overwrites rdx, do this after testing volatile.
2566 + if (!is_static) pop_and_check_object(obj);
2567 +
2568 + // Replace with real volatile test
2569 + __ push(rdx);
2570 + __ push(rax); // Must update atomically with FIST
2571 + __ fild_d(Address(rsp,0)); // So load into FPU register
2572 + __ fistp_d(lo); // and put into memory atomically
2573 + __ addptr(rsp, 2*wordSize);
2574 + // volatile_barrier();
2575 + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2576 + Assembler::StoreStore));
2577 + // Don't rewrite volatile version
2578 + __ jmp(notVolatile);
2579 +
2580 + __ bind(notVolatileLong);
2581 +
2582 + __ pop(ltos); // overwrites rdx
2583 + if (!is_static) pop_and_check_object(obj);
2584 + NOT_LP64(__ movptr(hi, rdx));
2585 + __ movptr(lo, rax);
2586 + if (!is_static) {
2587 + patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2588 + }
2589 + __ jmp(notVolatile);
2556 2590 }
2557 - __ jmp(notVolatile);
2558 2591
2559 2592 __ bind(notLong);
2560 - // ftos
2561 - __ cmpl(flags, ftos );
2593 + __ cmpl(flags, ftos);
2562 2594 __ jcc(Assembler::notEqual, notFloat);
2563 2595
2564 - __ pop(ftos);
2565 - if (!is_static) pop_and_check_object(obj);
2566 - __ fstp_s(lo);
2567 - if (!is_static) {
2568 - patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
2596 + // ftos
2597 + {
2598 + __ pop(ftos);
2599 + if (!is_static) pop_and_check_object(obj);
2600 + __ fstp_s(lo);
2601 + if (!is_static) {
2602 + patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2603 + }
2604 + __ jmp(Done);
2569 2605 }
2570 - __ jmp(Done);
2571 2606
2572 2607 __ bind(notFloat);
2573 - // dtos
2574 - __ cmpl(flags, dtos );
2608 +#ifdef ASSERT
2609 + __ cmpl(flags, dtos);
2575 2610 __ jcc(Assembler::notEqual, notDouble);
2611 +#endif
2576 2612
2577 - __ pop(dtos);
2578 - if (!is_static) pop_and_check_object(obj);
2579 - __ fstp_d(lo);
2580 - if (!is_static) {
2581 - patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
2613 + // dtos
2614 + {
2615 + __ pop(dtos);
2616 + if (!is_static) pop_and_check_object(obj);
2617 + __ fstp_d(lo);
2618 + if (!is_static) {
2619 + patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2620 + }
2621 + __ jmp(Done);
2582 2622 }
2583 - __ jmp(Done);
2584 2623
2624 +#ifdef ASSERT
2585 2625 __ bind(notDouble);
2586 -
2587 2626 __ stop("Bad state");
2627 +#endif
2588 2628
2589 2629 __ bind(Done);
2590 2630
2591 2631 // Check for volatile store
2592 2632 __ testl(rdx, rdx);
2593 2633 __ jcc(Assembler::zero, notVolatile);
2594 2634 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2595 2635 Assembler::StoreStore));
2596 2636 __ bind(notVolatile);
2597 2637 }
2598 2638
2599 2639
2600 2640 void TemplateTable::putfield(int byte_no) {
2601 2641 putfield_or_static(byte_no, false);
2602 2642 }
2603 2643
2604 2644
2605 2645 void TemplateTable::putstatic(int byte_no) {
2606 2646 putfield_or_static(byte_no, true);
2607 2647 }
2608 2648
2609 2649 void TemplateTable::jvmti_post_fast_field_mod() {
2610 2650 if (JvmtiExport::can_post_field_modification()) {
2611 2651 // Check to see if a field modification watch has been set before we take
2612 2652 // the time to call into the VM.
2613 2653 Label L2;
2614 2654 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2615 2655 __ testl(rcx,rcx);
2616 2656 __ jcc(Assembler::zero, L2);
2617 2657 __ pop_ptr(rbx); // copy the object pointer from tos
2618 2658 __ verify_oop(rbx);
2619 2659 __ push_ptr(rbx); // put the object pointer back on tos
2620 2660 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
2621 2661 __ mov(rcx, rsp);
2622 2662 __ push_ptr(rbx); // save object pointer so we can steal rbx,
2623 2663 __ xorptr(rbx, rbx);
2624 2664 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
2625 2665 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
2626 2666 switch (bytecode()) { // load values into the jvalue object
2627 2667 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
2628 2668 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
2629 2669 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
2630 2670 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
2631 2671 case Bytecodes::_fast_lputfield:
2632 2672 NOT_LP64(__ movptr(hi_value, rdx));
2633 2673 __ movptr(lo_value, rax);
2634 2674 break;
2635 2675
2636 2676 // need to call fld_s() after fstp_s() to restore the value for below
2637 2677 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
2638 2678
2639 2679 // need to call fld_d() after fstp_d() to restore the value for below
2640 2680 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
2641 2681
2642 2682 // since rcx is not an object we don't call store_check() here
2643 2683 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
2644 2684
2645 2685 default: ShouldNotReachHere();
2646 2686 }
2647 2687 __ pop_ptr(rbx); // restore copy of object pointer
2648 2688
2649 2689 // Save rax, and sometimes rdx because call_VM() will clobber them,
2650 2690 // then use them for JVM/DI purposes
2651 2691 __ push(rax);
2652 2692 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2653 2693 // access constant pool cache entry
2654 2694 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2655 2695 __ verify_oop(rbx);
2656 2696 // rbx,: object pointer copied above
2657 2697 // rax,: cache entry pointer
2658 2698 // rcx: jvalue object on the stack
2659 2699 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2660 2700 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
2661 2701 __ pop(rax); // restore lower value
2662 2702 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
2663 2703 __ bind(L2);
2664 2704 }
2665 2705 }
2666 2706
2667 2707 void TemplateTable::fast_storefield(TosState state) {
2668 2708 transition(state, vtos);
2669 2709
2670 2710 ByteSize base = constantPoolCacheOopDesc::base_offset();
2671 2711
2672 2712 jvmti_post_fast_field_mod();
2673 2713
2674 2714 // access constant pool cache
2675 2715 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2676 2716
2677 2717 // test for volatile with rdx but rdx is tos register for lputfield.
2678 2718 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2679 2719 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2680 2720 ConstantPoolCacheEntry::flags_offset())));
2681 2721
2682 2722 // replace index with field offset from cache entry
2683 2723 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2684 2724
2685 2725 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2686 2726 // volatile_barrier( );
2687 2727
2688 2728 Label notVolatile, Done;
2689 2729 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
2690 2730 __ andl(rdx, 0x1);
2691 2731 // Check for volatile store
2692 2732 __ testl(rdx, rdx);
2693 2733 __ jcc(Assembler::zero, notVolatile);
2694 2734
2695 2735 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2696 2736
2697 2737 // Get object from stack
2698 2738 pop_and_check_object(rcx);
2699 2739
2700 2740 // field addresses
2701 2741 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2702 2742 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2703 2743
2704 2744 // access field
2705 2745 switch (bytecode()) {
2706 2746 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2707 2747 case Bytecodes::_fast_sputfield: // fall through
2708 2748 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2709 2749 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2710 2750 case Bytecodes::_fast_lputfield:
2711 2751 NOT_LP64(__ movptr(hi, rdx));
2712 2752 __ movptr(lo, rax);
2713 2753 break;
2714 2754 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2715 2755 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2716 2756 case Bytecodes::_fast_aputfield: {
2717 2757 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2718 2758 break;
2719 2759 }
2720 2760 default:
2721 2761 ShouldNotReachHere();
2722 2762 }
2723 2763
2724 2764 Label done;
2725 2765 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2726 2766 Assembler::StoreStore));
2727 2767 // Barriers are so large that short branch doesn't reach!
2728 2768 __ jmp(done);
2729 2769
2730 2770 // Same code as above, but don't need rdx to test for volatile.
2731 2771 __ bind(notVolatile);
2732 2772
2733 2773 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2734 2774
2735 2775 // Get object from stack
2736 2776 pop_and_check_object(rcx);
2737 2777
2738 2778 // access field
2739 2779 switch (bytecode()) {
2740 2780 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2741 2781 case Bytecodes::_fast_sputfield: // fall through
2742 2782 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2743 2783 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2744 2784 case Bytecodes::_fast_lputfield:
2745 2785 NOT_LP64(__ movptr(hi, rdx));
2746 2786 __ movptr(lo, rax);
2747 2787 break;
2748 2788 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2749 2789 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2750 2790 case Bytecodes::_fast_aputfield: {
2751 2791 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2752 2792 break;
2753 2793 }
2754 2794 default:
2755 2795 ShouldNotReachHere();
2756 2796 }
2757 2797 __ bind(done);
2758 2798 }
2759 2799
2760 2800
2761 2801 void TemplateTable::fast_accessfield(TosState state) {
2762 2802 transition(atos, state);
2763 2803
2764 2804 // do the JVMTI work here to avoid disturbing the register state below
2765 2805 if (JvmtiExport::can_post_field_access()) {
2766 2806 // Check to see if a field access watch has been set before we take
2767 2807 // the time to call into the VM.
2768 2808 Label L1;
2769 2809 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2770 2810 __ testl(rcx,rcx);
2771 2811 __ jcc(Assembler::zero, L1);
2772 2812 // access constant pool cache entry
2773 2813 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2774 2814 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2775 2815 __ verify_oop(rax);
2776 2816 // rax,: object pointer copied above
2777 2817 // rcx: cache entry pointer
2778 2818 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2779 2819 __ pop_ptr(rax); // restore object pointer
2780 2820 __ bind(L1);
2781 2821 }
2782 2822
2783 2823 // access constant pool cache
2784 2824 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2785 2825 // replace index with field offset from cache entry
2786 2826 __ movptr(rbx, Address(rcx,
2787 2827 rbx,
2788 2828 Address::times_ptr,
2789 2829 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2790 2830
2791 2831
2792 2832 // rax,: object
2793 2833 __ verify_oop(rax);
2794 2834 __ null_check(rax);
2795 2835 // field addresses
2796 2836 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2797 2837 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2798 2838
2799 2839 // access field
2800 2840 switch (bytecode()) {
2801 2841 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2802 2842 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2803 2843 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2804 2844 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2805 2845 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2806 2846 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2807 2847 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2808 2848 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2809 2849 default:
2810 2850 ShouldNotReachHere();
2811 2851 }
2812 2852
2813 2853 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2814 2854 // volatile_barrier( );
2815 2855 }
2816 2856
2817 2857 void TemplateTable::fast_xaccess(TosState state) {
2818 2858 transition(vtos, state);
2819 2859 // get receiver
2820 2860 __ movptr(rax, aaddress(0));
2821 2861 // access constant pool cache
2822 2862 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2823 2863 __ movptr(rbx, Address(rcx,
2824 2864 rdx,
2825 2865 Address::times_ptr,
2826 2866 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2827 2867 // make sure exception is reported in correct bcp range (getfield is next instruction)
2828 2868 __ increment(rsi);
2829 2869 __ null_check(rax);
2830 2870 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2831 2871 if (state == itos) {
2832 2872 __ movl(rax, lo);
2833 2873 } else if (state == atos) {
2834 2874 __ movptr(rax, lo);
2835 2875 __ verify_oop(rax);
2836 2876 } else if (state == ftos) {
2837 2877 __ fld_s(lo);
2838 2878 } else {
2839 2879 ShouldNotReachHere();
2840 2880 }
2841 2881 __ decrement(rsi);
2842 2882 }
2843 2883
2844 2884
2845 2885
2846 2886 //----------------------------------------------------------------------------------------------------
2847 2887 // Calls
2848 2888
2849 2889 void TemplateTable::count_calls(Register method, Register temp) {
2850 2890 // implemented elsewhere
2851 2891 ShouldNotReachHere();
2852 2892 }
2853 2893
2854 2894
2855 2895 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
2856 2896 // determine flags
2857 2897 Bytecodes::Code code = bytecode();
2858 2898 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2859 2899 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2860 2900 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2861 2901 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2862 2902 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
2863 2903 const bool receiver_null_check = is_invokespecial;
2864 2904 const bool save_flags = is_invokeinterface || is_invokevirtual;
2865 2905 // setup registers & access constant pool cache
2866 2906 const Register recv = rcx;
2867 2907 const Register flags = rdx;
2868 2908 assert_different_registers(method, index, recv, flags);
2869 2909
2870 2910 // save 'interpreter return address'
2871 2911 __ save_bcp();
2872 2912
2873 2913 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2874 2914
2875 2915 // load receiver if needed (note: no return address pushed yet)
2876 2916 if (load_receiver) {
2877 2917 assert(!is_invokedynamic, "");
2878 2918 __ movl(recv, flags);
2879 2919 __ andl(recv, 0xFF);
2880 2920 // recv count is 0 based?
2881 2921 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
2882 2922 __ movptr(recv, recv_addr);
2883 2923 __ verify_oop(recv);
2884 2924 }
2885 2925
2886 2926 // do null check if needed
2887 2927 if (receiver_null_check) {
2888 2928 __ null_check(recv);
2889 2929 }
2890 2930
2891 2931 if (save_flags) {
2892 2932 __ mov(rsi, flags);
2893 2933 }
2894 2934
2895 2935 // compute return type
2896 2936 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
2897 2937 // Make sure we don't need to mask flags for tosBits after the above shift
2898 2938 ConstantPoolCacheEntry::verify_tosBits();
2899 2939 // load return address
2900 2940 {
2901 2941 address table_addr;
2902 2942 if (is_invokeinterface || is_invokedynamic)
2903 2943 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
2904 2944 else
2905 2945 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
2906 2946 ExternalAddress table(table_addr);
2907 2947 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2908 2948 }
2909 2949
2910 2950 // push return address
2911 2951 __ push(flags);
2912 2952
2913 2953 // Restore flag value from the constant pool cache, and restore rsi
2914 2954 // for later null checks. rsi is the bytecode pointer
2915 2955 if (save_flags) {
2916 2956 __ mov(flags, rsi);
2917 2957 __ restore_bcp();
2918 2958 }
2919 2959 }
2920 2960
2921 2961
2922 2962 void TemplateTable::invokevirtual_helper(Register index, Register recv,
2923 2963 Register flags) {
2924 2964
2925 2965 // Uses temporary registers rax, rdx
2926 2966 assert_different_registers(index, recv, rax, rdx);
2927 2967
2928 2968 // Test for an invoke of a final method
2929 2969 Label notFinal;
2930 2970 __ movl(rax, flags);
2931 2971 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
2932 2972 __ jcc(Assembler::zero, notFinal);
2933 2973
2934 2974 Register method = index; // method must be rbx,
2935 2975 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
2936 2976
2937 2977 // do the call - the index is actually the method to call
2938 2978 __ verify_oop(method);
2939 2979
2940 2980 // It's final, need a null check here!
2941 2981 __ null_check(recv);
2942 2982
2943 2983 // profile this call
2944 2984 __ profile_final_call(rax);
2945 2985
2946 2986 __ jump_from_interpreted(method, rax);
2947 2987
2948 2988 __ bind(notFinal);
2949 2989
2950 2990 // get receiver klass
2951 2991 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2952 2992 // Keep recv in rcx for callee expects it there
2953 2993 __ load_klass(rax, recv);
2954 2994 __ verify_oop(rax);
2955 2995
2956 2996 // profile this call
2957 2997 __ profile_virtual_call(rax, rdi, rdx);
2958 2998
2959 2999 // get target methodOop & entry point
2960 3000 const int base = instanceKlass::vtable_start_offset() * wordSize;
2961 3001 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
2962 3002 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
2963 3003 __ jump_from_interpreted(method, rdx);
2964 3004 }
2965 3005
2966 3006
2967 3007 void TemplateTable::invokevirtual(int byte_no) {
2968 3008 transition(vtos, vtos);
2969 3009 assert(byte_no == f2_byte, "use this argument");
2970 3010 prepare_invoke(rbx, noreg, byte_no);
2971 3011
2972 3012 // rbx,: index
2973 3013 // rcx: receiver
2974 3014 // rdx: flags
2975 3015
2976 3016 invokevirtual_helper(rbx, rcx, rdx);
2977 3017 }
2978 3018
2979 3019
2980 3020 void TemplateTable::invokespecial(int byte_no) {
2981 3021 transition(vtos, vtos);
2982 3022 assert(byte_no == f1_byte, "use this argument");
2983 3023 prepare_invoke(rbx, noreg, byte_no);
2984 3024 // do the call
2985 3025 __ verify_oop(rbx);
2986 3026 __ profile_call(rax);
2987 3027 __ jump_from_interpreted(rbx, rax);
2988 3028 }
2989 3029
2990 3030
2991 3031 void TemplateTable::invokestatic(int byte_no) {
2992 3032 transition(vtos, vtos);
2993 3033 assert(byte_no == f1_byte, "use this argument");
2994 3034 prepare_invoke(rbx, noreg, byte_no);
2995 3035 // do the call
2996 3036 __ verify_oop(rbx);
2997 3037 __ profile_call(rax);
2998 3038 __ jump_from_interpreted(rbx, rax);
2999 3039 }
3000 3040
3001 3041
3002 3042 void TemplateTable::fast_invokevfinal(int byte_no) {
3003 3043 transition(vtos, vtos);
3004 3044 assert(byte_no == f2_byte, "use this argument");
3005 3045 __ stop("fast_invokevfinal not used on x86");
3006 3046 }
3007 3047
3008 3048
3009 3049 void TemplateTable::invokeinterface(int byte_no) {
3010 3050 transition(vtos, vtos);
3011 3051 assert(byte_no == f1_byte, "use this argument");
3012 3052 prepare_invoke(rax, rbx, byte_no);
3013 3053
3014 3054 // rax,: Interface
3015 3055 // rbx,: index
3016 3056 // rcx: receiver
3017 3057 // rdx: flags
3018 3058
3019 3059 // Special case of invokeinterface called for virtual method of
3020 3060 // java.lang.Object. See cpCacheOop.cpp for details.
3021 3061 // This code isn't produced by javac, but could be produced by
3022 3062 // another compliant java compiler.
3023 3063 Label notMethod;
3024 3064 __ movl(rdi, rdx);
3025 3065 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
3026 3066 __ jcc(Assembler::zero, notMethod);
3027 3067
3028 3068 invokevirtual_helper(rbx, rcx, rdx);
3029 3069 __ bind(notMethod);
3030 3070
3031 3071 // Get receiver klass into rdx - also a null check
3032 3072 __ restore_locals(); // restore rdi
3033 3073 __ load_klass(rdx, rcx);
3034 3074 __ verify_oop(rdx);
3035 3075
3036 3076 // profile this call
3037 3077 __ profile_virtual_call(rdx, rsi, rdi);
3038 3078
3039 3079 Label no_such_interface, no_such_method;
3040 3080
3041 3081 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3042 3082 rdx, rax, rbx,
3043 3083 // outputs: method, scan temp. reg
3044 3084 rbx, rsi,
3045 3085 no_such_interface);
3046 3086
3047 3087 // rbx,: methodOop to call
3048 3088 // rcx: receiver
3049 3089 // Check for abstract method error
3050 3090 // Note: This should be done more efficiently via a throw_abstract_method_error
3051 3091 // interpreter entry point and a conditional jump to it in case of a null
3052 3092 // method.
3053 3093 __ testptr(rbx, rbx);
3054 3094 __ jcc(Assembler::zero, no_such_method);
3055 3095
3056 3096 // do the call
3057 3097 // rcx: receiver
3058 3098 // rbx,: methodOop
3059 3099 __ jump_from_interpreted(rbx, rdx);
3060 3100 __ should_not_reach_here();
3061 3101
3062 3102 // exception handling code follows...
3063 3103 // note: must restore interpreter registers to canonical
3064 3104 // state for exception handling to work correctly!
3065 3105
3066 3106 __ bind(no_such_method);
3067 3107 // throw exception
3068 3108 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3069 3109 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3070 3110 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3071 3111 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3072 3112 // the call_VM checks for exception, so we should never return here.
3073 3113 __ should_not_reach_here();
3074 3114
3075 3115 __ bind(no_such_interface);
3076 3116 // throw exception
3077 3117 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3078 3118 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3079 3119 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3080 3120 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3081 3121 InterpreterRuntime::throw_IncompatibleClassChangeError));
3082 3122 // the call_VM checks for exception, so we should never return here.
3083 3123 __ should_not_reach_here();
3084 3124 }
3085 3125
3086 3126 void TemplateTable::invokedynamic(int byte_no) {
3087 3127 transition(vtos, vtos);
3088 3128 assert(byte_no == f1_oop, "use this argument");
3089 3129
3090 3130 if (!EnableInvokeDynamic) {
3091 3131 // We should not encounter this bytecode if !EnableInvokeDynamic.
3092 3132 // The verifier will stop it. However, if we get past the verifier,
3093 3133 // this will stop the thread in a reasonable way, without crashing the JVM.
3094 3134 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3095 3135 InterpreterRuntime::throw_IncompatibleClassChangeError));
3096 3136 // the call_VM checks for exception, so we should never return here.
3097 3137 __ should_not_reach_here();
3098 3138 return;
3099 3139 }
3100 3140
3101 3141 prepare_invoke(rax, rbx, byte_no);
3102 3142
3103 3143 // rax: CallSite object (f1)
3104 3144 // rbx: unused (f2)
3105 3145 // rcx: receiver address
3106 3146 // rdx: flags (unused)
3107 3147
3108 3148 Register rax_callsite = rax;
3109 3149 Register rcx_method_handle = rcx;
3110 3150
3111 3151 // %%% should make a type profile for any invokedynamic that takes a ref argument
3112 3152 // profile this call
3113 3153 __ profile_call(rsi);
3114 3154
3115 3155 __ verify_oop(rax_callsite);
3116 3156 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
3117 3157 __ null_check(rcx_method_handle);
3118 3158 __ verify_oop(rcx_method_handle);
3119 3159 __ prepare_to_jump_from_interpreted();
3120 3160 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
3121 3161 }
3122 3162
3123 3163 //----------------------------------------------------------------------------------------------------
3124 3164 // Allocation
3125 3165
3126 3166 void TemplateTable::_new() {
3127 3167 transition(vtos, atos);
3128 3168 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3129 3169 Label slow_case;
3130 3170 Label slow_case_no_pop;
3131 3171 Label done;
3132 3172 Label initialize_header;
3133 3173 Label initialize_object; // including clearing the fields
3134 3174 Label allocate_shared;
3135 3175
3136 3176 __ get_cpool_and_tags(rcx, rax);
3137 3177
3138 3178 // Make sure the class we're about to instantiate has been resolved.
3139 3179 // This is done before loading instanceKlass to be consistent with the order
3140 3180 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3141 3181 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
3142 3182 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3143 3183 __ jcc(Assembler::notEqual, slow_case_no_pop);
3144 3184
3145 3185 // get instanceKlass
3146 3186 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3147 3187 __ push(rcx); // save the contexts of klass for initializing the header
3148 3188
3149 3189 // make sure klass is initialized & doesn't have finalizer
3150 3190 // make sure klass is fully initialized
3151 3191 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
3152 3192 __ jcc(Assembler::notEqual, slow_case);
3153 3193
3154 3194 // get instance_size in instanceKlass (scaled to a count of bytes)
3155 3195 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
3156 3196 // test to see if it has a finalizer or is malformed in some way
3157 3197 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3158 3198 __ jcc(Assembler::notZero, slow_case);
3159 3199
3160 3200 //
3161 3201 // Allocate the instance
3162 3202 // 1) Try to allocate in the TLAB
3163 3203 // 2) if fail and the object is large allocate in the shared Eden
3164 3204 // 3) if the above fails (or is not applicable), go to a slow case
3165 3205 // (creates a new TLAB, etc.)
3166 3206
3167 3207 const bool allow_shared_alloc =
3168 3208 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3169 3209
3170 3210 const Register thread = rcx;
3171 3211 if (UseTLAB || allow_shared_alloc) {
3172 3212 __ get_thread(thread);
3173 3213 }
3174 3214
3175 3215 if (UseTLAB) {
3176 3216 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3177 3217 __ lea(rbx, Address(rax, rdx, Address::times_1));
3178 3218 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3179 3219 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3180 3220 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3181 3221 if (ZeroTLAB) {
3182 3222 // the fields have been already cleared
3183 3223 __ jmp(initialize_header);
3184 3224 } else {
3185 3225 // initialize both the header and fields
3186 3226 __ jmp(initialize_object);
3187 3227 }
3188 3228 }
3189 3229
3190 3230 // Allocation in the shared Eden, if allowed.
3191 3231 //
3192 3232 // rdx: instance size in bytes
3193 3233 if (allow_shared_alloc) {
3194 3234 __ bind(allocate_shared);
3195 3235
3196 3236 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3197 3237
3198 3238 Label retry;
3199 3239 __ bind(retry);
3200 3240 __ movptr(rax, heap_top);
3201 3241 __ lea(rbx, Address(rax, rdx, Address::times_1));
3202 3242 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3203 3243 __ jcc(Assembler::above, slow_case);
3204 3244
3205 3245 // Compare rax, with the top addr, and if still equal, store the new
3206 3246 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3207 3247 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3208 3248 //
3209 3249 // rax,: object begin
3210 3250 // rbx,: object end
3211 3251 // rdx: instance size in bytes
3212 3252 __ locked_cmpxchgptr(rbx, heap_top);
3213 3253
3214 3254 // if someone beat us on the allocation, try again, otherwise continue
3215 3255 __ jcc(Assembler::notEqual, retry);
3216 3256
3217 3257 __ incr_allocated_bytes(thread, rdx, 0);
3218 3258 }
3219 3259
3220 3260 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3221 3261 // The object is initialized before the header. If the object size is
3222 3262 // zero, go directly to the header initialization.
3223 3263 __ bind(initialize_object);
3224 3264 __ decrement(rdx, sizeof(oopDesc));
3225 3265 __ jcc(Assembler::zero, initialize_header);
3226 3266
3227 3267 // Initialize topmost object field, divide rdx by 8, check if odd and
3228 3268 // test if zero.
3229 3269 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3230 3270 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3231 3271
3232 3272 // rdx must have been multiple of 8
3233 3273 #ifdef ASSERT
3234 3274 // make sure rdx was multiple of 8
3235 3275 Label L;
3236 3276 // Ignore partial flag stall after shrl() since it is debug VM
3237 3277 __ jccb(Assembler::carryClear, L);
3238 3278 __ stop("object size is not multiple of 2 - adjust this code");
3239 3279 __ bind(L);
3240 3280 // rdx must be > 0, no extra check needed here
3241 3281 #endif
3242 3282
3243 3283 // initialize remaining object fields: rdx was a multiple of 8
3244 3284 { Label loop;
3245 3285 __ bind(loop);
3246 3286 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3247 3287 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3248 3288 __ decrement(rdx);
3249 3289 __ jcc(Assembler::notZero, loop);
3250 3290 }
3251 3291
3252 3292 // initialize object header only.
3253 3293 __ bind(initialize_header);
3254 3294 if (UseBiasedLocking) {
3255 3295 __ pop(rcx); // get saved klass back in the register.
3256 3296 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
3257 3297 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3258 3298 } else {
3259 3299 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3260 3300 (int32_t)markOopDesc::prototype()); // header
3261 3301 __ pop(rcx); // get saved klass back in the register.
3262 3302 }
3263 3303 __ store_klass(rax, rcx); // klass
3264 3304
3265 3305 {
3266 3306 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3267 3307 // Trigger dtrace event for fastpath
3268 3308 __ push(atos);
3269 3309 __ call_VM_leaf(
3270 3310 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3271 3311 __ pop(atos);
3272 3312 }
3273 3313
3274 3314 __ jmp(done);
3275 3315 }
3276 3316
3277 3317 // slow case
3278 3318 __ bind(slow_case);
3279 3319 __ pop(rcx); // restore stack pointer to what it was when we came in.
3280 3320 __ bind(slow_case_no_pop);
3281 3321 __ get_constant_pool(rax);
3282 3322 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3283 3323 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3284 3324
3285 3325 // continue
3286 3326 __ bind(done);
3287 3327 }
3288 3328
3289 3329
3290 3330 void TemplateTable::newarray() {
3291 3331 transition(itos, atos);
3292 3332 __ push_i(rax); // make sure everything is on the stack
3293 3333 __ load_unsigned_byte(rdx, at_bcp(1));
3294 3334 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3295 3335 __ pop_i(rdx); // discard size
3296 3336 }
3297 3337
3298 3338
3299 3339 void TemplateTable::anewarray() {
3300 3340 transition(itos, atos);
3301 3341 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3302 3342 __ get_constant_pool(rcx);
3303 3343 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3304 3344 }
3305 3345
3306 3346
3307 3347 void TemplateTable::arraylength() {
3308 3348 transition(atos, itos);
3309 3349 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3310 3350 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3311 3351 }
3312 3352
3313 3353
3314 3354 void TemplateTable::checkcast() {
3315 3355 transition(atos, atos);
3316 3356 Label done, is_null, ok_is_subtype, quicked, resolved;
3317 3357 __ testptr(rax, rax); // Object is in EAX
3318 3358 __ jcc(Assembler::zero, is_null);
3319 3359
3320 3360 // Get cpool & tags index
3321 3361 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3322 3362 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3323 3363 // See if bytecode has already been quicked
3324 3364 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3325 3365 __ jcc(Assembler::equal, quicked);
3326 3366
3327 3367 __ push(atos);
3328 3368 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3329 3369 __ pop_ptr(rdx);
3330 3370 __ jmpb(resolved);
3331 3371
3332 3372 // Get superklass in EAX and subklass in EBX
3333 3373 __ bind(quicked);
3334 3374 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3335 3375 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3336 3376
3337 3377 __ bind(resolved);
3338 3378 __ load_klass(rbx, rdx);
3339 3379
3340 3380 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3341 3381 // Superklass in EAX. Subklass in EBX.
3342 3382 __ gen_subtype_check( rbx, ok_is_subtype );
3343 3383
3344 3384 // Come here on failure
3345 3385 __ push(rdx);
3346 3386 // object is at TOS
3347 3387 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3348 3388
3349 3389 // Come here on success
3350 3390 __ bind(ok_is_subtype);
3351 3391 __ mov(rax,rdx); // Restore object in EDX
3352 3392
3353 3393 // Collect counts on whether this check-cast sees NULLs a lot or not.
3354 3394 if (ProfileInterpreter) {
3355 3395 __ jmp(done);
3356 3396 __ bind(is_null);
3357 3397 __ profile_null_seen(rcx);
3358 3398 } else {
3359 3399 __ bind(is_null); // same as 'done'
3360 3400 }
3361 3401 __ bind(done);
3362 3402 }
3363 3403
3364 3404
3365 3405 void TemplateTable::instanceof() {
3366 3406 transition(atos, itos);
3367 3407 Label done, is_null, ok_is_subtype, quicked, resolved;
3368 3408 __ testptr(rax, rax);
3369 3409 __ jcc(Assembler::zero, is_null);
3370 3410
3371 3411 // Get cpool & tags index
3372 3412 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3373 3413 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3374 3414 // See if bytecode has already been quicked
3375 3415 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
3376 3416 __ jcc(Assembler::equal, quicked);
3377 3417
3378 3418 __ push(atos);
3379 3419 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3380 3420 __ pop_ptr(rdx);
3381 3421 __ load_klass(rdx, rdx);
3382 3422 __ jmp(resolved);
3383 3423
3384 3424 // Get superklass in EAX and subklass in EDX
3385 3425 __ bind(quicked);
3386 3426 __ load_klass(rdx, rax);
3387 3427 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
3388 3428
3389 3429 __ bind(resolved);
3390 3430
3391 3431 // Generate subtype check. Blows ECX. Resets EDI.
3392 3432 // Superklass in EAX. Subklass in EDX.
3393 3433 __ gen_subtype_check( rdx, ok_is_subtype );
3394 3434
3395 3435 // Come here on failure
3396 3436 __ xorl(rax,rax);
3397 3437 __ jmpb(done);
3398 3438 // Come here on success
3399 3439 __ bind(ok_is_subtype);
3400 3440 __ movl(rax, 1);
3401 3441
3402 3442 // Collect counts on whether this test sees NULLs a lot or not.
3403 3443 if (ProfileInterpreter) {
3404 3444 __ jmp(done);
3405 3445 __ bind(is_null);
3406 3446 __ profile_null_seen(rcx);
3407 3447 } else {
3408 3448 __ bind(is_null); // same as 'done'
3409 3449 }
3410 3450 __ bind(done);
3411 3451 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3412 3452 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3413 3453 }
3414 3454
3415 3455
3416 3456 //----------------------------------------------------------------------------------------------------
3417 3457 // Breakpoints
3418 3458 void TemplateTable::_breakpoint() {
3419 3459
3420 3460 // Note: We get here even if we are single stepping..
3421 3461 // jbug inists on setting breakpoints at every bytecode
3422 3462 // even if we are in single step mode.
3423 3463
3424 3464 transition(vtos, vtos);
3425 3465
3426 3466 // get the unpatched byte code
3427 3467 __ get_method(rcx);
3428 3468 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3429 3469 __ mov(rbx, rax);
3430 3470
3431 3471 // post the breakpoint event
3432 3472 __ get_method(rcx);
3433 3473 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3434 3474
3435 3475 // complete the execution of original bytecode
3436 3476 __ dispatch_only_normal(vtos);
3437 3477 }
3438 3478
3439 3479
3440 3480 //----------------------------------------------------------------------------------------------------
3441 3481 // Exceptions
3442 3482
3443 3483 void TemplateTable::athrow() {
3444 3484 transition(atos, vtos);
3445 3485 __ null_check(rax);
3446 3486 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3447 3487 }
3448 3488
3449 3489
3450 3490 //----------------------------------------------------------------------------------------------------
3451 3491 // Synchronization
3452 3492 //
3453 3493 // Note: monitorenter & exit are symmetric routines; which is reflected
3454 3494 // in the assembly code structure as well
3455 3495 //
3456 3496 // Stack layout:
3457 3497 //
3458 3498 // [expressions ] <--- rsp = expression stack top
3459 3499 // ..
3460 3500 // [expressions ]
3461 3501 // [monitor entry] <--- monitor block top = expression stack bot
3462 3502 // ..
3463 3503 // [monitor entry]
3464 3504 // [frame data ] <--- monitor block bot
3465 3505 // ...
3466 3506 // [saved rbp, ] <--- rbp,
3467 3507
3468 3508
3469 3509 void TemplateTable::monitorenter() {
3470 3510 transition(atos, vtos);
3471 3511
3472 3512 // check for NULL object
3473 3513 __ null_check(rax);
3474 3514
3475 3515 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3476 3516 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3477 3517 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3478 3518 Label allocated;
3479 3519
3480 3520 // initialize entry pointer
3481 3521 __ xorl(rdx, rdx); // points to free slot or NULL
3482 3522
3483 3523 // find a free slot in the monitor block (result in rdx)
3484 3524 { Label entry, loop, exit;
3485 3525 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3486 3526
3487 3527 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3488 3528 __ jmpb(entry);
3489 3529
3490 3530 __ bind(loop);
3491 3531 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3492 3532 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3493 3533 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3494 3534 __ jccb(Assembler::equal, exit); // if same object then stop searching
3495 3535 __ addptr(rcx, entry_size); // otherwise advance to next entry
3496 3536 __ bind(entry);
3497 3537 __ cmpptr(rcx, rbx); // check if bottom reached
3498 3538 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3499 3539 __ bind(exit);
3500 3540 }
3501 3541
3502 3542 __ testptr(rdx, rdx); // check if a slot has been found
3503 3543 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3504 3544
3505 3545 // allocate one if there's no free slot
3506 3546 { Label entry, loop;
3507 3547 // 1. compute new pointers // rsp: old expression stack top
3508 3548 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3509 3549 __ subptr(rsp, entry_size); // move expression stack top
3510 3550 __ subptr(rdx, entry_size); // move expression stack bottom
3511 3551 __ mov(rcx, rsp); // set start value for copy loop
3512 3552 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3513 3553 __ jmp(entry);
3514 3554 // 2. move expression stack contents
3515 3555 __ bind(loop);
3516 3556 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3517 3557 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3518 3558 __ addptr(rcx, wordSize); // advance to next word
3519 3559 __ bind(entry);
3520 3560 __ cmpptr(rcx, rdx); // check if bottom reached
3521 3561 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3522 3562 }
3523 3563
3524 3564 // call run-time routine
3525 3565 // rdx: points to monitor entry
3526 3566 __ bind(allocated);
3527 3567
3528 3568 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3529 3569 // The object has already been poped from the stack, so the expression stack looks correct.
3530 3570 __ increment(rsi);
3531 3571
3532 3572 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3533 3573 __ lock_object(rdx);
3534 3574
3535 3575 // check to make sure this monitor doesn't cause stack overflow after locking
3536 3576 __ save_bcp(); // in case of exception
3537 3577 __ generate_stack_overflow_check(0);
3538 3578
3539 3579 // The bcp has already been incremented. Just need to dispatch to next instruction.
3540 3580 __ dispatch_next(vtos);
3541 3581 }
3542 3582
3543 3583
3544 3584 void TemplateTable::monitorexit() {
3545 3585 transition(atos, vtos);
3546 3586
3547 3587 // check for NULL object
3548 3588 __ null_check(rax);
3549 3589
3550 3590 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3551 3591 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3552 3592 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3553 3593 Label found;
3554 3594
3555 3595 // find matching slot
3556 3596 { Label entry, loop;
3557 3597 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3558 3598 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3559 3599 __ jmpb(entry);
3560 3600
3561 3601 __ bind(loop);
3562 3602 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3563 3603 __ jcc(Assembler::equal, found); // if same object then stop searching
3564 3604 __ addptr(rdx, entry_size); // otherwise advance to next entry
3565 3605 __ bind(entry);
3566 3606 __ cmpptr(rdx, rbx); // check if bottom reached
3567 3607 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3568 3608 }
3569 3609
3570 3610 // error handling. Unlocking was not block-structured
3571 3611 Label end;
3572 3612 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3573 3613 __ should_not_reach_here();
3574 3614
3575 3615 // call run-time routine
3576 3616 // rcx: points to monitor entry
3577 3617 __ bind(found);
3578 3618 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3579 3619 __ unlock_object(rdx);
3580 3620 __ pop_ptr(rax); // discard object
3581 3621 __ bind(end);
3582 3622 }
3583 3623
3584 3624
3585 3625 //----------------------------------------------------------------------------------------------------
3586 3626 // Wide instructions
3587 3627
3588 3628 void TemplateTable::wide() {
3589 3629 transition(vtos, vtos);
3590 3630 __ load_unsigned_byte(rbx, at_bcp(1));
3591 3631 ExternalAddress wtable((address)Interpreter::_wentry_point);
3592 3632 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3593 3633 // Note: the rsi increment step is part of the individual wide bytecode implementations
3594 3634 }
3595 3635
3596 3636
3597 3637 //----------------------------------------------------------------------------------------------------
3598 3638 // Multi arrays
3599 3639
3600 3640 void TemplateTable::multianewarray() {
3601 3641 transition(vtos, atos);
3602 3642 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3603 3643 // last dim is on top of stack; we want address of first one:
3604 3644 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3605 3645 // the latter wordSize to point to the beginning of the array.
3606 3646 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3607 3647 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3608 3648 __ load_unsigned_byte(rbx, at_bcp(3));
3609 3649 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3610 3650 }
3611 3651
3612 3652 #endif /* !CC_INTERP */
↓ open down ↓ |
1015 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX