src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7063628_1 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/c1_Runtime1_sparc.cpp

Print this page




  54   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
  55 
  56   set_last_Java_frame(SP, noreg);
  57   if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
  58   save_thread(L7_thread_cache);
  59   // do the call
  60   call(entry_point, relocInfo::runtime_call_type);
  61   if (!VerifyThread) {
  62     delayed()->mov(G2_thread, O0);  // pass thread as first argument
  63   } else {
  64     delayed()->nop();             // (thread already passed)
  65   }
  66   int call_offset = offset();  // offset of return address
  67   restore_thread(L7_thread_cache);
  68   reset_last_Java_frame();
  69 
  70   // check for pending exceptions
  71   { Label L;
  72     Address exception_addr(G2_thread, Thread::pending_exception_offset());
  73     ld_ptr(exception_addr, Gtemp);
  74     br_null(Gtemp, false, pt, L);
  75     Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
  76     st_ptr(G0, vm_result_addr);
  77     Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
  78     st_ptr(G0, vm_result_addr_2);
  79 
  80     if (frame_size() == no_frame_size) {
  81       // we use O7 linkage so that forward_exception_entry has the issuing PC
  82       call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
  83       delayed()->restore();
  84     } else if (_stub_id == Runtime1::forward_exception_id) {
  85       should_not_reach_here();
  86     } else {
  87       AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
  88       jump_to(exc, G4);
  89       delayed()->nop();
  90     }
  91     bind(L);
  92   }
  93 
  94   // get oop result if there is one and reset the value in the thread


 315 
 316   return oop_maps;
 317 }
 318 
 319 
 320 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 321   // make a frame and preserve the caller's caller-save registers
 322   OopMap* oop_map = save_live_registers(sasm);
 323 
 324   // call the runtime patching routine, returns non-zero if nmethod got deopted.
 325   int call_offset = __ call_RT(noreg, noreg, target);
 326   OopMapSet* oop_maps = new OopMapSet();
 327   oop_maps->add_gc_map(call_offset, oop_map);
 328 
 329   // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
 330   // deoptimization handler entry that will cause re-execution of the current bytecode
 331   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 332   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 333 
 334   Label no_deopt;
 335   __ br_null(O0, false, Assembler::pt, no_deopt);
 336 
 337   // return to the deoptimization handler entry for unpacking and rexecute
 338   // if we simply returned the we'd deopt as if any call we patched had just
 339   // returned.
 340 
 341   restore_live_registers(sasm);
 342 
 343   AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 344   __ jump_to(dest, O0);
 345   __ delayed()->restore();
 346 
 347   __ bind(no_deopt);
 348   restore_live_registers(sasm);
 349   __ ret();
 350   __ delayed()->restore();
 351 
 352   return oop_maps;
 353 }
 354 
 355 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {


 382           assert(id == fast_new_instance_init_check_id, "bad StubID");
 383           __ set_info("fast new_instance init check", dont_gc_arguments);
 384         }
 385 
 386         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 387             UseTLAB && FastTLABRefill) {
 388           Label slow_path;
 389           Register G1_obj_size = G1;
 390           Register G3_t1 = G3;
 391           Register G4_t2 = G4;
 392           assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
 393 
 394           // Push a frame since we may do dtrace notification for the
 395           // allocation which requires calling out and we don't want
 396           // to stomp the real return address.
 397           __ save_frame(0);
 398 
 399           if (id == fast_new_instance_init_check_id) {
 400             // make sure the klass is initialized
 401             __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
 402             __ cmp_and_br(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, false, Assembler::pn, slow_path);
 403           }
 404 #ifdef ASSERT
 405           // assert object can be fast path allocated
 406           {
 407             Label ok, not_ok;
 408           __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
 409           // make sure it's an instance (LH > 0)
 410           __ cmp_and_br(G1_obj_size, 0, Assembler::lessEqual, false, Assembler::pn, not_ok);
 411           __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
 412           __ br(Assembler::zero, false, Assembler::pn, ok);
 413           __ delayed()->nop();
 414           __ bind(not_ok);
 415           __ stop("assert(can be fast path allocated)");
 416           __ should_not_reach_here();
 417           __ bind(ok);
 418           }
 419 #endif // ASSERT
 420           // if we got here then the TLAB allocation failed, so try
 421           // refilling the TLAB or allocating directly from eden.
 422           Label retry_tlab, try_eden;
 423           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
 424 
 425           __ bind(retry_tlab);
 426 
 427           // get the instance size
 428           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
 429 
 430           __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);


 478         // Use this offset to pick out an individual byte of the layout_helper:
 479         const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
 480                                                  - Klass::_lh_header_size_shift / BitsPerByte);
 481 
 482         if (id == new_type_array_id) {
 483           __ set_info("new_type_array", dont_gc_arguments);
 484         } else {
 485           __ set_info("new_object_array", dont_gc_arguments);
 486         }
 487 
 488 #ifdef ASSERT
 489         // assert object type is really an array of the proper kind
 490         {
 491           Label ok;
 492           Register G3_t1 = G3;
 493           __ ld(klass_lh, G3_t1);
 494           __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
 495           int tag = ((id == new_type_array_id)
 496                      ? Klass::_lh_array_tag_type_value
 497                      : Klass::_lh_array_tag_obj_value);
 498           __ cmp_and_brx(G3_t1, tag, Assembler::equal, false, Assembler::pt, ok);
 499           __ stop("assert(is an array klass)");
 500           __ should_not_reach_here();
 501           __ bind(ok);
 502         }
 503 #endif // ASSERT
 504 
 505         if (UseTLAB && FastTLABRefill) {
 506           Label slow_path;
 507           Register G1_arr_size = G1;
 508           Register G3_t1 = G3;
 509           Register O1_t2 = O1;
 510           assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
 511 
 512           // check that array length is small enough for fast path
 513           __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
 514           __ cmp_and_br(G4_length, G3_t1, Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
 515 
 516           // if we got here then the TLAB allocation failed, so try
 517           // refilling the TLAB or allocating directly from eden.
 518           Label retry_tlab, try_eden;
 519           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
 520 
 521           __ bind(retry_tlab);
 522 
 523           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
 524           __ ld(klass_lh, G3_t1);
 525           __ sll(G4_length, G3_t1, G1_arr_size);
 526           __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
 527           __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
 528           __ add(G1_arr_size, G3_t1, G1_arr_size);
 529           __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
 530           __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
 531 
 532           __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size
 533 
 534           __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);




  54   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
  55 
  56   set_last_Java_frame(SP, noreg);
  57   if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
  58   save_thread(L7_thread_cache);
  59   // do the call
  60   call(entry_point, relocInfo::runtime_call_type);
  61   if (!VerifyThread) {
  62     delayed()->mov(G2_thread, O0);  // pass thread as first argument
  63   } else {
  64     delayed()->nop();             // (thread already passed)
  65   }
  66   int call_offset = offset();  // offset of return address
  67   restore_thread(L7_thread_cache);
  68   reset_last_Java_frame();
  69 
  70   // check for pending exceptions
  71   { Label L;
  72     Address exception_addr(G2_thread, Thread::pending_exception_offset());
  73     ld_ptr(exception_addr, Gtemp);
  74     br_null_short(Gtemp, pt, L);
  75     Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
  76     st_ptr(G0, vm_result_addr);
  77     Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
  78     st_ptr(G0, vm_result_addr_2);
  79 
  80     if (frame_size() == no_frame_size) {
  81       // we use O7 linkage so that forward_exception_entry has the issuing PC
  82       call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
  83       delayed()->restore();
  84     } else if (_stub_id == Runtime1::forward_exception_id) {
  85       should_not_reach_here();
  86     } else {
  87       AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
  88       jump_to(exc, G4);
  89       delayed()->nop();
  90     }
  91     bind(L);
  92   }
  93 
  94   // get oop result if there is one and reset the value in the thread


 315 
 316   return oop_maps;
 317 }
 318 
 319 
 320 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 321   // make a frame and preserve the caller's caller-save registers
 322   OopMap* oop_map = save_live_registers(sasm);
 323 
 324   // call the runtime patching routine, returns non-zero if nmethod got deopted.
 325   int call_offset = __ call_RT(noreg, noreg, target);
 326   OopMapSet* oop_maps = new OopMapSet();
 327   oop_maps->add_gc_map(call_offset, oop_map);
 328 
 329   // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
 330   // deoptimization handler entry that will cause re-execution of the current bytecode
 331   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 332   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 333 
 334   Label no_deopt;
 335   __ br_null_short(O0, Assembler::pt, no_deopt);
 336 
 337   // return to the deoptimization handler entry for unpacking and rexecute
 338   // if we simply returned the we'd deopt as if any call we patched had just
 339   // returned.
 340 
 341   restore_live_registers(sasm);
 342 
 343   AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 344   __ jump_to(dest, O0);
 345   __ delayed()->restore();
 346 
 347   __ bind(no_deopt);
 348   restore_live_registers(sasm);
 349   __ ret();
 350   __ delayed()->restore();
 351 
 352   return oop_maps;
 353 }
 354 
 355 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {


 382           assert(id == fast_new_instance_init_check_id, "bad StubID");
 383           __ set_info("fast new_instance init check", dont_gc_arguments);
 384         }
 385 
 386         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 387             UseTLAB && FastTLABRefill) {
 388           Label slow_path;
 389           Register G1_obj_size = G1;
 390           Register G3_t1 = G3;
 391           Register G4_t2 = G4;
 392           assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
 393 
 394           // Push a frame since we may do dtrace notification for the
 395           // allocation which requires calling out and we don't want
 396           // to stomp the real return address.
 397           __ save_frame(0);
 398 
 399           if (id == fast_new_instance_init_check_id) {
 400             // make sure the klass is initialized
 401             __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
 402             __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
 403           }
 404 #ifdef ASSERT
 405           // assert object can be fast path allocated
 406           {
 407             Label ok, not_ok;
 408           __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
 409           // make sure it's an instance (LH > 0)
 410           __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
 411           __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
 412           __ br(Assembler::zero, false, Assembler::pn, ok);
 413           __ delayed()->nop();
 414           __ bind(not_ok);
 415           __ stop("assert(can be fast path allocated)");
 416           __ should_not_reach_here();
 417           __ bind(ok);
 418           }
 419 #endif // ASSERT
 420           // if we got here then the TLAB allocation failed, so try
 421           // refilling the TLAB or allocating directly from eden.
 422           Label retry_tlab, try_eden;
 423           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
 424 
 425           __ bind(retry_tlab);
 426 
 427           // get the instance size
 428           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
 429 
 430           __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);


 478         // Use this offset to pick out an individual byte of the layout_helper:
 479         const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
 480                                                  - Klass::_lh_header_size_shift / BitsPerByte);
 481 
 482         if (id == new_type_array_id) {
 483           __ set_info("new_type_array", dont_gc_arguments);
 484         } else {
 485           __ set_info("new_object_array", dont_gc_arguments);
 486         }
 487 
 488 #ifdef ASSERT
 489         // assert object type is really an array of the proper kind
 490         {
 491           Label ok;
 492           Register G3_t1 = G3;
 493           __ ld(klass_lh, G3_t1);
 494           __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
 495           int tag = ((id == new_type_array_id)
 496                      ? Klass::_lh_array_tag_type_value
 497                      : Klass::_lh_array_tag_obj_value);
 498           __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
 499           __ stop("assert(is an array klass)");
 500           __ should_not_reach_here();
 501           __ bind(ok);
 502         }
 503 #endif // ASSERT
 504 
 505         if (UseTLAB && FastTLABRefill) {
 506           Label slow_path;
 507           Register G1_arr_size = G1;
 508           Register G3_t1 = G3;
 509           Register O1_t2 = O1;
 510           assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
 511 
 512           // check that array length is small enough for fast path
 513           __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
 514           __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
 515 
 516           // if we got here then the TLAB allocation failed, so try
 517           // refilling the TLAB or allocating directly from eden.
 518           Label retry_tlab, try_eden;
 519           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
 520 
 521           __ bind(retry_tlab);
 522 
 523           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
 524           __ ld(klass_lh, G3_t1);
 525           __ sll(G4_length, G3_t1, G1_arr_size);
 526           __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
 527           __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
 528           __ add(G1_arr_size, G3_t1, G1_arr_size);
 529           __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
 530           __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
 531 
 532           __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size
 533 
 534           __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);


src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File