54 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
55
56 set_last_Java_frame(SP, noreg);
57 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
58 save_thread(L7_thread_cache);
59 // do the call
60 call(entry_point, relocInfo::runtime_call_type);
61 if (!VerifyThread) {
62 delayed()->mov(G2_thread, O0); // pass thread as first argument
63 } else {
64 delayed()->nop(); // (thread already passed)
65 }
66 int call_offset = offset(); // offset of return address
67 restore_thread(L7_thread_cache);
68 reset_last_Java_frame();
69
70 // check for pending exceptions
71 { Label L;
72 Address exception_addr(G2_thread, Thread::pending_exception_offset());
73 ld_ptr(exception_addr, Gtemp);
74 br_null(Gtemp, false, pt, L);
75 delayed()->nop();
76 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
77 st_ptr(G0, vm_result_addr);
78 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
79 st_ptr(G0, vm_result_addr_2);
80
81 if (frame_size() == no_frame_size) {
82 // we use O7 linkage so that forward_exception_entry has the issuing PC
83 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
84 delayed()->restore();
85 } else if (_stub_id == Runtime1::forward_exception_id) {
86 should_not_reach_here();
87 } else {
88 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
89 jump_to(exc, G4);
90 delayed()->nop();
91 }
92 bind(L);
93 }
94
95 // get oop result if there is one and reset the value in the thread
316
317 return oop_maps;
318 }
319
320
321 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
322 // make a frame and preserve the caller's caller-save registers
323 OopMap* oop_map = save_live_registers(sasm);
324
325 // call the runtime patching routine, returns non-zero if nmethod got deopted.
326 int call_offset = __ call_RT(noreg, noreg, target);
327 OopMapSet* oop_maps = new OopMapSet();
328 oop_maps->add_gc_map(call_offset, oop_map);
329
330 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
331 // deoptimization handler entry that will cause re-execution of the current bytecode
332 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
333 assert(deopt_blob != NULL, "deoptimization blob must have been created");
334
335 Label no_deopt;
336 __ tst(O0);
337 __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
338 __ delayed()->nop();
339
340 // return to the deoptimization handler entry for unpacking and rexecute
341 // if we simply returned the we'd deopt as if any call we patched had just
342 // returned.
343
344 restore_live_registers(sasm);
345
346 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
347 __ jump_to(dest, O0);
348 __ delayed()->restore();
349
350 __ bind(no_deopt);
351 restore_live_registers(sasm);
352 __ ret();
353 __ delayed()->restore();
354
355 return oop_maps;
356 }
357
358 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
385 assert(id == fast_new_instance_init_check_id, "bad StubID");
386 __ set_info("fast new_instance init check", dont_gc_arguments);
387 }
388
389 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
390 UseTLAB && FastTLABRefill) {
391 Label slow_path;
392 Register G1_obj_size = G1;
393 Register G3_t1 = G3;
394 Register G4_t2 = G4;
395 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
396
397 // Push a frame since we may do dtrace notification for the
398 // allocation which requires calling out and we don't want
399 // to stomp the real return address.
400 __ save_frame(0);
401
402 if (id == fast_new_instance_init_check_id) {
403 // make sure the klass is initialized
404 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
405 __ cmp(G3_t1, instanceKlass::fully_initialized);
406 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
407 __ delayed()->nop();
408 }
409 #ifdef ASSERT
410 // assert object can be fast path allocated
411 {
412 Label ok, not_ok;
413 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
414 __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0)
415 __ br(Assembler::lessEqual, false, Assembler::pn, not_ok);
416 __ delayed()->nop();
417 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
418 __ br(Assembler::zero, false, Assembler::pn, ok);
419 __ delayed()->nop();
420 __ bind(not_ok);
421 __ stop("assert(can be fast path allocated)");
422 __ should_not_reach_here();
423 __ bind(ok);
424 }
425 #endif // ASSERT
426 // if we got here then the TLAB allocation failed, so try
427 // refilling the TLAB or allocating directly from eden.
428 Label retry_tlab, try_eden;
429 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
430
431 __ bind(retry_tlab);
432
433 // get the instance size
434 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
435
436 __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
484 // Use this offset to pick out an individual byte of the layout_helper:
485 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0}
486 - Klass::_lh_header_size_shift / BitsPerByte);
487
488 if (id == new_type_array_id) {
489 __ set_info("new_type_array", dont_gc_arguments);
490 } else {
491 __ set_info("new_object_array", dont_gc_arguments);
492 }
493
494 #ifdef ASSERT
495 // assert object type is really an array of the proper kind
496 {
497 Label ok;
498 Register G3_t1 = G3;
499 __ ld(klass_lh, G3_t1);
500 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
501 int tag = ((id == new_type_array_id)
502 ? Klass::_lh_array_tag_type_value
503 : Klass::_lh_array_tag_obj_value);
504 __ cmp(G3_t1, tag);
505 __ brx(Assembler::equal, false, Assembler::pt, ok);
506 __ delayed()->nop();
507 __ stop("assert(is an array klass)");
508 __ should_not_reach_here();
509 __ bind(ok);
510 }
511 #endif // ASSERT
512
513 if (UseTLAB && FastTLABRefill) {
514 Label slow_path;
515 Register G1_arr_size = G1;
516 Register G3_t1 = G3;
517 Register O1_t2 = O1;
518 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
519
520 // check that array length is small enough for fast path
521 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
522 __ cmp(G4_length, G3_t1);
523 __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
524 __ delayed()->nop();
525
526 // if we got here then the TLAB allocation failed, so try
527 // refilling the TLAB or allocating directly from eden.
528 Label retry_tlab, try_eden;
529 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
530
531 __ bind(retry_tlab);
532
533 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
534 __ ld(klass_lh, G3_t1);
535 __ sll(G4_length, G3_t1, G1_arr_size);
536 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
537 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
538 __ add(G1_arr_size, G3_t1, G1_arr_size);
539 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up
540 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
541
542 __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size
543
544 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
|
54 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
55
56 set_last_Java_frame(SP, noreg);
57 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
58 save_thread(L7_thread_cache);
59 // do the call
60 call(entry_point, relocInfo::runtime_call_type);
61 if (!VerifyThread) {
62 delayed()->mov(G2_thread, O0); // pass thread as first argument
63 } else {
64 delayed()->nop(); // (thread already passed)
65 }
66 int call_offset = offset(); // offset of return address
67 restore_thread(L7_thread_cache);
68 reset_last_Java_frame();
69
70 // check for pending exceptions
71 { Label L;
72 Address exception_addr(G2_thread, Thread::pending_exception_offset());
73 ld_ptr(exception_addr, Gtemp);
74 br_null_short(Gtemp, pt, L);
75 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
76 st_ptr(G0, vm_result_addr);
77 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
78 st_ptr(G0, vm_result_addr_2);
79
80 if (frame_size() == no_frame_size) {
81 // we use O7 linkage so that forward_exception_entry has the issuing PC
82 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
83 delayed()->restore();
84 } else if (_stub_id == Runtime1::forward_exception_id) {
85 should_not_reach_here();
86 } else {
87 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
88 jump_to(exc, G4);
89 delayed()->nop();
90 }
91 bind(L);
92 }
93
94 // get oop result if there is one and reset the value in the thread
315
316 return oop_maps;
317 }
318
319
320 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
321 // make a frame and preserve the caller's caller-save registers
322 OopMap* oop_map = save_live_registers(sasm);
323
324 // call the runtime patching routine, returns non-zero if nmethod got deopted.
325 int call_offset = __ call_RT(noreg, noreg, target);
326 OopMapSet* oop_maps = new OopMapSet();
327 oop_maps->add_gc_map(call_offset, oop_map);
328
329 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
330 // deoptimization handler entry that will cause re-execution of the current bytecode
331 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
332 assert(deopt_blob != NULL, "deoptimization blob must have been created");
333
334 Label no_deopt;
335 __ br_null_short(O0, Assembler::pt, no_deopt);
336
337 // return to the deoptimization handler entry for unpacking and rexecute
338 // if we simply returned the we'd deopt as if any call we patched had just
339 // returned.
340
341 restore_live_registers(sasm);
342
343 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
344 __ jump_to(dest, O0);
345 __ delayed()->restore();
346
347 __ bind(no_deopt);
348 restore_live_registers(sasm);
349 __ ret();
350 __ delayed()->restore();
351
352 return oop_maps;
353 }
354
355 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
382 assert(id == fast_new_instance_init_check_id, "bad StubID");
383 __ set_info("fast new_instance init check", dont_gc_arguments);
384 }
385
386 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
387 UseTLAB && FastTLABRefill) {
388 Label slow_path;
389 Register G1_obj_size = G1;
390 Register G3_t1 = G3;
391 Register G4_t2 = G4;
392 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
393
394 // Push a frame since we may do dtrace notification for the
395 // allocation which requires calling out and we don't want
396 // to stomp the real return address.
397 __ save_frame(0);
398
399 if (id == fast_new_instance_init_check_id) {
400 // make sure the klass is initialized
401 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
402 __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
403 }
404 #ifdef ASSERT
405 // assert object can be fast path allocated
406 {
407 Label ok, not_ok;
408 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
409 // make sure it's an instance (LH > 0)
410 __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
411 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
412 __ br(Assembler::zero, false, Assembler::pn, ok);
413 __ delayed()->nop();
414 __ bind(not_ok);
415 __ stop("assert(can be fast path allocated)");
416 __ should_not_reach_here();
417 __ bind(ok);
418 }
419 #endif // ASSERT
420 // if we got here then the TLAB allocation failed, so try
421 // refilling the TLAB or allocating directly from eden.
422 Label retry_tlab, try_eden;
423 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
424
425 __ bind(retry_tlab);
426
427 // get the instance size
428 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
429
430 __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
478 // Use this offset to pick out an individual byte of the layout_helper:
479 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0}
480 - Klass::_lh_header_size_shift / BitsPerByte);
481
482 if (id == new_type_array_id) {
483 __ set_info("new_type_array", dont_gc_arguments);
484 } else {
485 __ set_info("new_object_array", dont_gc_arguments);
486 }
487
488 #ifdef ASSERT
489 // assert object type is really an array of the proper kind
490 {
491 Label ok;
492 Register G3_t1 = G3;
493 __ ld(klass_lh, G3_t1);
494 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
495 int tag = ((id == new_type_array_id)
496 ? Klass::_lh_array_tag_type_value
497 : Klass::_lh_array_tag_obj_value);
498 __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
499 __ stop("assert(is an array klass)");
500 __ should_not_reach_here();
501 __ bind(ok);
502 }
503 #endif // ASSERT
504
505 if (UseTLAB && FastTLABRefill) {
506 Label slow_path;
507 Register G1_arr_size = G1;
508 Register G3_t1 = G3;
509 Register O1_t2 = O1;
510 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
511
512 // check that array length is small enough for fast path
513 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
514 __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
515
516 // if we got here then the TLAB allocation failed, so try
517 // refilling the TLAB or allocating directly from eden.
518 Label retry_tlab, try_eden;
519 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
520
521 __ bind(retry_tlab);
522
523 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
524 __ ld(klass_lh, G3_t1);
525 __ sll(G4_length, G3_t1, G1_arr_size);
526 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
527 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
528 __ add(G1_arr_size, G3_t1, G1_arr_size);
529 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up
530 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
531
532 __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size
533
534 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
|