332 // explicit NULL check not needed since load from [klass_offset] causes a trap
333 // check against inline cache
334 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
335 int start_offset = offset();
336
337 if (UseCompressedClassPointers) {
338 load_klass(rscratch1, receiver);
339 cmpptr(rscratch1, iCache);
340 } else {
341 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
342 }
343 // if icache check fails, then jump to runtime routine
344 // Note: RECEIVER must still contain the receiver!
345 jump_cc(Assembler::notEqual,
346 RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
347 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
348 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
349 }
350
351
352 void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
353 // Make sure there is enough stack space for this method's activation.
354 // Note that we do this before doing an enter(). This matches the
355 // ordering of C2's stack overflow check / rsp decrement and allows
356 // the SharedRuntime stack overflow handling to be consistent
357 // between the two compilers.
358 generate_stack_overflow_check(frame_size_in_bytes);
359
360 push(rbp);
361 #ifdef TIERED
362 // c2 leaves fpu stack dirty. Clean it on entry
363 if (UseSSE < 2 ) {
364 empty_FPU_stack();
365 }
366 #endif // TIERED
367 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
368 }
369
370
371 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
372 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0
373 pop(rbp);
374 }
375
376
377 void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
378 if (C1Breakpoint) int3();
|
332 // explicit NULL check not needed since load from [klass_offset] causes a trap
333 // check against inline cache
334 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
335 int start_offset = offset();
336
337 if (UseCompressedClassPointers) {
338 load_klass(rscratch1, receiver);
339 cmpptr(rscratch1, iCache);
340 } else {
341 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
342 }
343 // if icache check fails, then jump to runtime routine
344 // Note: RECEIVER must still contain the receiver!
345 jump_cc(Assembler::notEqual,
346 RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
347 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
348 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
349 }
350
351
352 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
353 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
354 // Make sure there is enough stack space for this method's activation.
355 // Note that we do this before doing an enter(). This matches the
356 // ordering of C2's stack overflow check / rsp decrement and allows
357 // the SharedRuntime stack overflow handling to be consistent
358 // between the two compilers.
359 generate_stack_overflow_check(bang_size_in_bytes);
360
361 push(rbp);
362 #ifdef TIERED
363 // c2 leaves fpu stack dirty. Clean it on entry
364 if (UseSSE < 2 ) {
365 empty_FPU_stack();
366 }
367 #endif // TIERED
368 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
369 }
370
371
372 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
373 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0
374 pop(rbp);
375 }
376
377
378 void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
379 if (C1Breakpoint) int3();
|