150 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1, Register tmp2,
151 RegisterOrConstant size_expression, Label& slow_case) {
152 if (!Universe::heap()->supports_inline_contig_alloc()) {
153 __ b(slow_case);
154 return;
155 }
156
157 CollectedHeap* ch = Universe::heap();
158
159 const Register top_addr = tmp1;
160 const Register heap_end = tmp2;
161
162 if (size_expression.is_register()) {
163 assert_different_registers(obj, obj_end, top_addr, heap_end, size_expression.as_register());
164 } else {
165 assert_different_registers(obj, obj_end, top_addr, heap_end);
166 }
167
168 bool load_const = VM_Version::supports_movw();
169 if (load_const) {
170 __ mov_address(top_addr, (address)Universe::heap()->top_addr(), symbolic_Relocation::eden_top_reference);
171 } else {
172 __ ldr(top_addr, Address(Rthread, JavaThread::heap_top_addr_offset()));
173 }
174 // Calculate new heap_top by adding the size of the object
175 Label retry;
176 __ bind(retry);
177 __ ldr(obj, Address(top_addr));
178 __ ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr()));
179 __ add_rc(obj_end, obj, size_expression);
180 // Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case.
181 __ cmp(obj_end, obj);
182 __ b(slow_case, lo);
183 // Update heap_top if allocation succeeded
184 __ cmp(obj_end, heap_end);
185 __ b(slow_case, hi);
186
187 __ atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/);
188 __ b(retry, ne);
189
190 incr_allocated_bytes(masm, size_expression, tmp1);
|
150 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1, Register tmp2,
151 RegisterOrConstant size_expression, Label& slow_case) {
152 if (!Universe::heap()->supports_inline_contig_alloc()) {
153 __ b(slow_case);
154 return;
155 }
156
157 CollectedHeap* ch = Universe::heap();
158
159 const Register top_addr = tmp1;
160 const Register heap_end = tmp2;
161
162 if (size_expression.is_register()) {
163 assert_different_registers(obj, obj_end, top_addr, heap_end, size_expression.as_register());
164 } else {
165 assert_different_registers(obj, obj_end, top_addr, heap_end);
166 }
167
168 bool load_const = VM_Version::supports_movw();
169 if (load_const) {
170 __ mov_address(top_addr, (address)Universe::heap()->top_addr());
171 } else {
172 __ ldr(top_addr, Address(Rthread, JavaThread::heap_top_addr_offset()));
173 }
174 // Calculate new heap_top by adding the size of the object
175 Label retry;
176 __ bind(retry);
177 __ ldr(obj, Address(top_addr));
178 __ ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr()));
179 __ add_rc(obj_end, obj, size_expression);
180 // Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case.
181 __ cmp(obj_end, obj);
182 __ b(slow_case, lo);
183 // Update heap_top if allocation succeeded
184 __ cmp(obj_end, heap_end);
185 __ b(slow_case, hi);
186
187 __ atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/);
188 __ b(retry, ne);
189
190 incr_allocated_bytes(masm, size_expression, tmp1);
|