3855
3856 // make sure klass is initialized & doesn't have finalizer
3857 // make sure klass is fully initialized
3858 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3859 __ jcc(Assembler::notEqual, slow_case);
3860
3861 // get instance_size in InstanceKlass (scaled to a count of bytes)
3862 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3863 // test to see if it has a finalizer or is malformed in some way
3864 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3865 __ jcc(Assembler::notZero, slow_case);
3866
3867 //
3868 // Allocate the instance
3869 // 1) Try to allocate in the TLAB
3870 // 2) if fail and the object is large allocate in the shared Eden
3871 // 3) if the above fails (or is not applicable), go to a slow case
3872 // (creates a new TLAB, etc.)
3873
3874 const bool allow_shared_alloc =
3875 Universe::heap()->supports_inline_contig_alloc();
3876
3877 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3878 #ifndef _LP64
3879 if (UseTLAB || allow_shared_alloc) {
3880 __ get_thread(thread);
3881 }
3882 #endif // _LP64
3883
3884 if (UseTLAB) {
3885 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3886 __ lea(rbx, Address(rax, rdx, Address::times_1));
3887 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3888 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3889 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3890 if (ZeroTLAB) {
3891 // the fields have been already cleared
3892 __ jmp(initialize_header);
3893 } else {
3894 // initialize both the header and fields
3895 __ jmp(initialize_object);
3896 }
3897 }
3898
3899 // Allocation in the shared Eden, if allowed.
3900 //
3901 // rdx: instance size in bytes
3902 if (allow_shared_alloc) {
3903 __ bind(allocate_shared);
3904
3905 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3906 ExternalAddress heap_end((address)Universe::heap()->end_addr());
3907
3908 Label retry;
3909 __ bind(retry);
3910 __ movptr(rax, heap_top);
3911 __ lea(rbx, Address(rax, rdx, Address::times_1));
3912 __ cmpptr(rbx, heap_end);
3913 __ jcc(Assembler::above, slow_case);
3914
3915 // Compare rax, with the top addr, and if still equal, store the new
3916 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3917 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3918 //
3919 // rax,: object begin
3920 // rbx,: object end
3921 // rdx: instance size in bytes
3922 __ locked_cmpxchgptr(rbx, heap_top);
3923
3924 // if someone beat us on the allocation, try again, otherwise continue
3925 __ jcc(Assembler::notEqual, retry);
3926
3927 __ incr_allocated_bytes(thread, rdx, 0);
3928 }
3929
3930 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3931 // The object is initialized before the header. If the object size is
3932 // zero, go directly to the header initialization.
3933 __ bind(initialize_object);
3934 __ decrement(rdx, sizeof(oopDesc));
3935 __ jcc(Assembler::zero, initialize_header);
3936
3937 // Initialize topmost object field, divide rdx by 8, check if odd and
3938 // test if zero.
3939 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3940 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3941
3942 // rdx must have been multiple of 8
3943 #ifdef ASSERT
3944 // make sure rdx was multiple of 8
3945 Label L;
3946 // Ignore partial flag stall after shrl() since it is debug VM
3947 __ jccb(Assembler::carryClear, L);
3948 __ stop("object size is not multiple of 2 - adjust this code");
3949 __ bind(L);
3950 // rdx must be > 0, no extra check needed here
|
3855
3856 // make sure klass is initialized & doesn't have finalizer
3857 // make sure klass is fully initialized
3858 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3859 __ jcc(Assembler::notEqual, slow_case);
3860
3861 // get instance_size in InstanceKlass (scaled to a count of bytes)
3862 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3863 // test to see if it has a finalizer or is malformed in some way
3864 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3865 __ jcc(Assembler::notZero, slow_case);
3866
3867 //
3868 // Allocate the instance
3869 // 1) Try to allocate in the TLAB
3870 // 2) if fail and the object is large allocate in the shared Eden
3871 // 3) if the above fails (or is not applicable), go to a slow case
3872 // (creates a new TLAB, etc.)
3873
3874 const bool allow_shared_alloc =
3875 GC::gc()->heap()->supports_inline_contig_alloc();
3876
3877 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3878 #ifndef _LP64
3879 if (UseTLAB || allow_shared_alloc) {
3880 __ get_thread(thread);
3881 }
3882 #endif // _LP64
3883
3884 if (UseTLAB) {
3885 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3886 __ lea(rbx, Address(rax, rdx, Address::times_1));
3887 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3888 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3889 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3890 if (ZeroTLAB) {
3891 // the fields have been already cleared
3892 __ jmp(initialize_header);
3893 } else {
3894 // initialize both the header and fields
3895 __ jmp(initialize_object);
3896 }
3897 }
3898
3899 // Allocation in the shared Eden, if allowed.
3900 //
3901 // rdx: instance size in bytes
3902 if (allow_shared_alloc) {
3903 __ bind(allocate_shared);
3904
3905 ExternalAddress heap_top((address)GC::gc()->heap()->top_addr());
3906 ExternalAddress heap_end((address)GC::gc()->heap()->end_addr());
3907
3908 Label retry;
3909 __ bind(retry);
3910 __ movptr(rax, heap_top);
3911 __ lea(rbx, Address(rax, rdx, Address::times_1));
3912 __ cmpptr(rbx, heap_end);
3913 __ jcc(Assembler::above, slow_case);
3914
3915 // Compare rax, with the top addr, and if still equal, store the new
3916 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3917 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3918 //
3919 // rax,: object begin
3920 // rbx,: object end
3921 // rdx: instance size in bytes
3922 __ locked_cmpxchgptr(rbx, heap_top);
3923
3924 // if someone beat us on the allocation, try again, otherwise continue
3925 __ jcc(Assembler::notEqual, retry);
3926
3927 __ incr_allocated_bytes(thread, rdx, 0);
3928 }
3929
3930 if (UseTLAB || GC::gc()->heap()->supports_inline_contig_alloc()) {
3931 // The object is initialized before the header. If the object size is
3932 // zero, go directly to the header initialization.
3933 __ bind(initialize_object);
3934 __ decrement(rdx, sizeof(oopDesc));
3935 __ jcc(Assembler::zero, initialize_header);
3936
3937 // Initialize topmost object field, divide rdx by 8, check if odd and
3938 // test if zero.
3939 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3940 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3941
3942 // rdx must have been multiple of 8
3943 #ifdef ASSERT
3944 // make sure rdx was multiple of 8
3945 Label L;
3946 // Ignore partial flag stall after shrl() since it is debug VM
3947 __ jccb(Assembler::carryClear, L);
3948 __ stop("object size is not multiple of 2 - adjust this code");
3949 __ bind(L);
3950 // rdx must be > 0, no extra check needed here
|