src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7118863 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/c1_Runtime1_sparc.cpp

Print this page




 381         } else {
 382           assert(id == fast_new_instance_init_check_id, "bad StubID");
 383           __ set_info("fast new_instance init check", dont_gc_arguments);
 384         }
 385 
 386         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 387             UseTLAB && FastTLABRefill) {
 388           Label slow_path;
 389           Register G1_obj_size = G1;
 390           Register G3_t1 = G3;
 391           Register G4_t2 = G4;
 392           assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
 393 
 394           // Push a frame since we may do dtrace notification for the
 395           // allocation which requires calling out and we don't want
 396           // to stomp the real return address.
 397           __ save_frame(0);
 398 
 399           if (id == fast_new_instance_init_check_id) {
 400             // make sure the klass is initialized
 401             __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
 402             __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
 403           }
 404 #ifdef ASSERT
 405           // assert object can be fast path allocated
 406           {
 407             Label ok, not_ok;
 408           __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
 409           // make sure it's an instance (LH > 0)
 410           __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
 411           __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
 412           __ br(Assembler::zero, false, Assembler::pn, ok);
 413           __ delayed()->nop();
 414           __ bind(not_ok);
 415           __ stop("assert(can be fast path allocated)");
 416           __ should_not_reach_here();
 417           __ bind(ok);
 418           }
 419 #endif // ASSERT
 420           // if we got here then the TLAB allocation failed, so try
 421           // refilling the TLAB or allocating directly from eden.
 422           Label retry_tlab, try_eden;
 423           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
 424 
 425           __ bind(retry_tlab);
 426 
 427           // get the instance size
 428           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
 429 
 430           __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
 431 
 432           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
 433           __ verify_oop(O0_obj);
 434           __ mov(O0, I0);
 435           __ ret();
 436           __ delayed()->restore();
 437 
 438           __ bind(try_eden);
 439           // get the instance size
 440           __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
 441           __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
 442           __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
 443 
 444           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
 445           __ verify_oop(O0_obj);
 446           __ mov(O0, I0);
 447           __ ret();
 448           __ delayed()->restore();
 449 
 450           __ bind(slow_path);
 451 
 452           // pop this frame so generate_stub_call can push it's own
 453           __ restore();
 454         }
 455 
 456         oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
 457         // I0->O0: new instance
 458       }
 459 
 460       break;
 461 
 462     case counter_overflow_id:
 463         // G4 contains bci, G5 contains method
 464       oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
 465       break;
 466 
 467     case new_type_array_id:
 468     case new_object_array_id:
 469       {
 470         Register G5_klass = G5; // Incoming
 471         Register G4_length = G4; // Incoming
 472         Register O0_obj   = O0; // Outgoing
 473 
 474         Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
 475                                     + Klass::layout_helper_offset_in_bytes()));
 476         assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 477         assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
 478         // Use this offset to pick out an individual byte of the layout_helper:
 479         const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
 480                                                  - Klass::_lh_header_size_shift / BitsPerByte);
 481 
 482         if (id == new_type_array_id) {
 483           __ set_info("new_type_array", dont_gc_arguments);
 484         } else {
 485           __ set_info("new_object_array", dont_gc_arguments);
 486         }
 487 
 488 #ifdef ASSERT
 489         // assert object type is really an array of the proper kind
 490         {
 491           Label ok;
 492           Register G3_t1 = G3;
 493           __ ld(klass_lh, G3_t1);
 494           __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
 495           int tag = ((id == new_type_array_id)


 575       break;
 576 
 577     case new_multi_array_id:
 578       { // O0: klass
 579         // O1: rank
 580         // O2: address of 1st dimension
 581         __ set_info("new_multi_array", dont_gc_arguments);
 582         oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
 583         // I0 -> O0: new multi array
 584       }
 585       break;
 586 
 587     case register_finalizer_id:
 588       {
 589         __ set_info("register_finalizer", dont_gc_arguments);
 590 
 591         // load the klass and check the has finalizer flag
 592         Label register_finalizer;
 593         Register t = O1;
 594         __ load_klass(O0, t);
 595         __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
 596         __ set(JVM_ACC_HAS_FINALIZER, G3);
 597         __ andcc(G3, t, G0);
 598         __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
 599         __ delayed()->nop();
 600 
 601         // do a leaf return
 602         __ retl();
 603         __ delayed()->nop();
 604 
 605         __ bind(register_finalizer);
 606         OopMap* oop_map = save_live_registers(sasm);
 607         int call_offset = __ call_RT(noreg, noreg,
 608                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
 609         oop_maps = new OopMapSet();
 610         oop_maps->add_gc_map(call_offset, oop_map);
 611 
 612         // Now restore all the live registers
 613         restore_live_registers(sasm);
 614 
 615         __ ret();




 381         } else {
 382           assert(id == fast_new_instance_init_check_id, "bad StubID");
 383           __ set_info("fast new_instance init check", dont_gc_arguments);
 384         }
 385 
 386         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 387             UseTLAB && FastTLABRefill) {
 388           Label slow_path;
 389           Register G1_obj_size = G1;
 390           Register G3_t1 = G3;
 391           Register G4_t2 = G4;
 392           assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
 393 
 394           // Push a frame since we may do dtrace notification for the
 395           // allocation which requires calling out and we don't want
 396           // to stomp the real return address.
 397           __ save_frame(0);
 398 
 399           if (id == fast_new_instance_init_check_id) {
 400             // make sure the klass is initialized
 401             __ ld(G5_klass, in_bytes(instanceKlass::init_state_offset()), G3_t1);
 402             __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
 403           }
 404 #ifdef ASSERT
 405           // assert object can be fast path allocated
 406           {
 407             Label ok, not_ok;
 408           __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
 409           // make sure it's an instance (LH > 0)
 410           __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
 411           __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
 412           __ br(Assembler::zero, false, Assembler::pn, ok);
 413           __ delayed()->nop();
 414           __ bind(not_ok);
 415           __ stop("assert(can be fast path allocated)");
 416           __ should_not_reach_here();
 417           __ bind(ok);
 418           }
 419 #endif // ASSERT
 420           // if we got here then the TLAB allocation failed, so try
 421           // refilling the TLAB or allocating directly from eden.
 422           Label retry_tlab, try_eden;
 423           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
 424 
 425           __ bind(retry_tlab);
 426 
 427           // get the instance size
 428           __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
 429 
 430           __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
 431 
 432           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
 433           __ verify_oop(O0_obj);
 434           __ mov(O0, I0);
 435           __ ret();
 436           __ delayed()->restore();
 437 
 438           __ bind(try_eden);
 439           // get the instance size
 440           __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
 441           __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
 442           __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
 443 
 444           __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
 445           __ verify_oop(O0_obj);
 446           __ mov(O0, I0);
 447           __ ret();
 448           __ delayed()->restore();
 449 
 450           __ bind(slow_path);
 451 
 452           // pop this frame so generate_stub_call can push it's own
 453           __ restore();
 454         }
 455 
 456         oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
 457         // I0->O0: new instance
 458       }
 459 
 460       break;
 461 
 462     case counter_overflow_id:
 463         // G4 contains bci, G5 contains method
 464       oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
 465       break;
 466 
 467     case new_type_array_id:
 468     case new_object_array_id:
 469       {
 470         Register G5_klass = G5; // Incoming
 471         Register G4_length = G4; // Incoming
 472         Register O0_obj   = O0; // Outgoing
 473 
 474         Address klass_lh(G5_klass, Klass::layout_helper_offset());

 475         assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 476         assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
 477         // Use this offset to pick out an individual byte of the layout_helper:
 478         const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
 479                                                  - Klass::_lh_header_size_shift / BitsPerByte);
 480 
 481         if (id == new_type_array_id) {
 482           __ set_info("new_type_array", dont_gc_arguments);
 483         } else {
 484           __ set_info("new_object_array", dont_gc_arguments);
 485         }
 486 
 487 #ifdef ASSERT
 488         // assert object type is really an array of the proper kind
 489         {
 490           Label ok;
 491           Register G3_t1 = G3;
 492           __ ld(klass_lh, G3_t1);
 493           __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
 494           int tag = ((id == new_type_array_id)


 574       break;
 575 
 576     case new_multi_array_id:
 577       { // O0: klass
 578         // O1: rank
 579         // O2: address of 1st dimension
 580         __ set_info("new_multi_array", dont_gc_arguments);
 581         oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
 582         // I0 -> O0: new multi array
 583       }
 584       break;
 585 
 586     case register_finalizer_id:
 587       {
 588         __ set_info("register_finalizer", dont_gc_arguments);
 589 
 590         // load the klass and check the has finalizer flag
 591         Label register_finalizer;
 592         Register t = O1;
 593         __ load_klass(O0, t);
 594         __ ld(t, in_bytes(Klass::access_flags_offset()), t);
 595         __ set(JVM_ACC_HAS_FINALIZER, G3);
 596         __ andcc(G3, t, G0);
 597         __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
 598         __ delayed()->nop();
 599 
 600         // do a leaf return
 601         __ retl();
 602         __ delayed()->nop();
 603 
 604         __ bind(register_finalizer);
 605         OopMap* oop_map = save_live_registers(sasm);
 606         int call_offset = __ call_RT(noreg, noreg,
 607                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
 608         oop_maps = new OopMapSet();
 609         oop_maps->add_gc_map(call_offset, oop_map);
 610 
 611         // Now restore all the live registers
 612         restore_live_registers(sasm);
 613 
 614         __ ret();


src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File