384 // check info (currently consumed only by C1). If
385 // swap_reg_contains_mark is true then returns -1 as it is assumed
386 // the calling code has already passed any potential faults.
387 // Notes:
388 // - swap_reg and tmp_reg are scratched
389 // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
390 int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
391 bool swap_reg_contains_mark,
392 Register tmp2,
393 Label& done, Label& slow_case,
394 BiasedLockingCounters* counters = NULL);
395 void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
396
397 // Building block for CAS cases of biased locking: makes CAS and records statistics.
398 // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
399 void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
400 Register tmp, Label& slow_case, int* counter_addr);
401
402 void resolve_jobject(Register value, Register tmp1, Register tmp2);
403
404 #if INCLUDE_ALL_GCS
405 // G1 pre-barrier.
406 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
407 // If store_addr != noreg, then previous value is loaded from [store_addr];
408 // in such case store_addr and new_val registers are preserved;
409 // otherwise pre_val register is preserved.
410 void g1_write_barrier_pre(Register store_addr,
411 Register new_val,
412 Register pre_val,
413 Register tmp1,
414 Register tmp2);
415
416 // G1 post-barrier.
417 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
418 void g1_write_barrier_post(Register store_addr,
419 Register new_val,
420 Register tmp1,
421 Register tmp2,
422 Register tmp3);
423 #endif // INCLUDE_ALL_GCS
424
425 #ifndef AARCH64
426 void nop() {
427 mov(R0, R0);
428 }
429
430 void push(Register rd, AsmCondition cond = al) {
431 assert(rd != SP, "unpredictable instruction");
432 str(rd, Address(SP, -wordSize, pre_indexed), cond);
433 }
434
435 void push(RegisterSet reg_set, AsmCondition cond = al) {
436 assert(!reg_set.contains(SP), "unpredictable instruction");
437 stmdb(SP, reg_set, writeback, cond);
438 }
439
440 void pop(Register rd, AsmCondition cond = al) {
441 assert(rd != SP, "unpredictable instruction");
442 ldr(rd, Address(SP, wordSize, post_indexed), cond);
443 }
444
1055 #endif
1056 }
1057
1058
1059 // klass oop manipulations if compressed
1060
1061 #ifdef AARCH64
1062 void load_klass(Register dst_klass, Register src_oop);
1063 #else
1064 void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al);
1065 #endif // AARCH64
1066
1067 void store_klass(Register src_klass, Register dst_oop);
1068
1069 #ifdef AARCH64
1070 void store_klass_gap(Register dst);
1071 #endif // AARCH64
1072
1073 // oop manipulations
1074
1075 void load_heap_oop(Register dst, Address src);
1076 void store_heap_oop(Register src, Address dst);
1077 void store_heap_oop(Address dst, Register src) {
1078 store_heap_oop(src, dst);
1079 }
1080 void store_heap_oop_null(Register src, Address dst);
1081
1082 #ifdef AARCH64
1083 void encode_heap_oop(Register dst, Register src);
1084 void encode_heap_oop(Register r) {
1085 encode_heap_oop(r, r);
1086 }
1087 void decode_heap_oop(Register dst, Register src);
1088 void decode_heap_oop(Register r) {
1089 decode_heap_oop(r, r);
1090 }
1091
1092 #ifdef COMPILER2
1093 void encode_heap_oop_not_null(Register dst, Register src);
1094 void decode_heap_oop_not_null(Register dst, Register src);
1095
1096 void set_narrow_klass(Register dst, Klass* k);
1097 void set_narrow_oop(Register dst, jobject obj);
1098 #endif
1099
1100 void encode_klass_not_null(Register r);
|
384 // check info (currently consumed only by C1). If
385 // swap_reg_contains_mark is true then returns -1 as it is assumed
386 // the calling code has already passed any potential faults.
387 // Notes:
388 // - swap_reg and tmp_reg are scratched
389 // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
390 int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
391 bool swap_reg_contains_mark,
392 Register tmp2,
393 Label& done, Label& slow_case,
394 BiasedLockingCounters* counters = NULL);
395 void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
396
397 // Building block for CAS cases of biased locking: makes CAS and records statistics.
398 // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
399 void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
400 Register tmp, Label& slow_case, int* counter_addr);
401
402 void resolve_jobject(Register value, Register tmp1, Register tmp2);
403
404 #ifndef AARCH64
405 void nop() {
406 mov(R0, R0);
407 }
408
409 void push(Register rd, AsmCondition cond = al) {
410 assert(rd != SP, "unpredictable instruction");
411 str(rd, Address(SP, -wordSize, pre_indexed), cond);
412 }
413
414 void push(RegisterSet reg_set, AsmCondition cond = al) {
415 assert(!reg_set.contains(SP), "unpredictable instruction");
416 stmdb(SP, reg_set, writeback, cond);
417 }
418
419 void pop(Register rd, AsmCondition cond = al) {
420 assert(rd != SP, "unpredictable instruction");
421 ldr(rd, Address(SP, wordSize, post_indexed), cond);
422 }
423
1034 #endif
1035 }
1036
1037
1038 // klass oop manipulations if compressed
1039
1040 #ifdef AARCH64
1041 void load_klass(Register dst_klass, Register src_oop);
1042 #else
1043 void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al);
1044 #endif // AARCH64
1045
1046 void store_klass(Register src_klass, Register dst_oop);
1047
1048 #ifdef AARCH64
1049 void store_klass_gap(Register dst);
1050 #endif // AARCH64
1051
1052 // oop manipulations
1053
1054 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
1055 void store_heap_oop(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
1056 void store_heap_oop_null(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
1057
1058 void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3);
1059 void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
1060
1061 #ifdef AARCH64
1062 void encode_heap_oop(Register dst, Register src);
1063 void encode_heap_oop(Register r) {
1064 encode_heap_oop(r, r);
1065 }
1066 void decode_heap_oop(Register dst, Register src);
1067 void decode_heap_oop(Register r) {
1068 decode_heap_oop(r, r);
1069 }
1070
1071 #ifdef COMPILER2
1072 void encode_heap_oop_not_null(Register dst, Register src);
1073 void decode_heap_oop_not_null(Register dst, Register src);
1074
1075 void set_narrow_klass(Register dst, Klass* k);
1076 void set_narrow_oop(Register dst, jobject obj);
1077 #endif
1078
1079 void encode_klass_not_null(Register r);
|