src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7118863 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.cpp

Print this page




4615     pop(tmp_reg);
4616   }
4617   jcc(Assembler::notEqual, cas_label);
4618   // The bias pattern is present in the object's header. Need to check
4619   // whether the bias owner and the epoch are both still current.
4620   // Note that because there is no current thread register on x86 we
4621   // need to store off the mark word we read out of the object to
4622   // avoid reloading it and needing to recheck invariants below. This
4623   // store is unfortunate but it makes the overall code shorter and
4624   // simpler.
4625   movl(saved_mark_addr, swap_reg);
4626   if (need_tmp_reg) {
4627     push(tmp_reg);
4628   }
4629   get_thread(tmp_reg);
4630   xorl(swap_reg, tmp_reg);
4631   if (swap_reg_contains_mark) {
4632     null_check_offset = offset();
4633   }
4634   movl(tmp_reg, klass_addr);
4635   xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4636   andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4637   if (need_tmp_reg) {
4638     pop(tmp_reg);
4639   }
4640   if (counters != NULL) {
4641     cond_inc32(Assembler::zero,
4642                ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4643   }
4644   jcc(Assembler::equal, done);
4645 
4646   Label try_revoke_bias;
4647   Label try_rebias;
4648 
4649   // At this point we know that the header has the bias pattern and
4650   // that we are not the bias owner in the current epoch. We need to
4651   // figure out more details about the state of the header in order to
4652   // know what operations can be legally performed on the object's
4653   // header.
4654 
4655   // If the low three bits in the xor result aren't clear, that means


4702   if (slow_case != NULL) {
4703     jcc(Assembler::notZero, *slow_case);
4704   }
4705   jmp(done);
4706 
4707   bind(try_rebias);
4708   // At this point we know the epoch has expired, meaning that the
4709   // current "bias owner", if any, is actually invalid. Under these
4710   // circumstances _only_, we are allowed to use the current header's
4711   // value as the comparison value when doing the cas to acquire the
4712   // bias in the current epoch. In other words, we allow transfer of
4713   // the bias from one thread to another directly in this situation.
4714   //
4715   // FIXME: due to a lack of registers we currently blow away the age
4716   // bits in this situation. Should attempt to preserve them.
4717   if (need_tmp_reg) {
4718     push(tmp_reg);
4719   }
4720   get_thread(tmp_reg);
4721   movl(swap_reg, klass_addr);
4722   orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4723   movl(swap_reg, saved_mark_addr);
4724   if (os::is_MP()) {
4725     lock();
4726   }
4727   cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4728   if (need_tmp_reg) {
4729     pop(tmp_reg);
4730   }
4731   // If the biasing toward our thread failed, then another thread
4732   // succeeded in biasing it toward itself and we need to revoke that
4733   // bias. The revocation will occur in the runtime in the slow case.
4734   if (counters != NULL) {
4735     cond_inc32(Assembler::zero,
4736                ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4737   }
4738   if (slow_case != NULL) {
4739     jcc(Assembler::notZero, *slow_case);
4740   }
4741   jmp(done);
4742 
4743   bind(try_revoke_bias);
4744   // The prototype mark in the klass doesn't have the bias bit set any
4745   // more, indicating that objects of this data type are not supposed
4746   // to be biased any more. We are going to try to reset the mark of
4747   // this object to the prototype value and fall through to the
4748   // CAS-based locking scheme. Note that if our CAS fails, it means
4749   // that another thread raced us for the privilege of revoking the
4750   // bias of this particular object, so it's okay to continue in the
4751   // normal locking code.
4752   //
4753   // FIXME: due to a lack of registers we currently blow away the age
4754   // bits in this situation. Should attempt to preserve them.
4755   movl(swap_reg, saved_mark_addr);
4756   if (need_tmp_reg) {
4757     push(tmp_reg);
4758   }
4759   movl(tmp_reg, klass_addr);
4760   movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4761   if (os::is_MP()) {
4762     lock();
4763   }
4764   cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4765   if (need_tmp_reg) {
4766     pop(tmp_reg);
4767   }
4768   // Fall through to the normal CAS-based lock, because no matter what
4769   // the result of the above CAS, some thread must have succeeded in
4770   // removing the bias bit from the object's header.
4771   if (counters != NULL) {
4772     cond_inc32(Assembler::zero,
4773                ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4774   }
4775 
4776   bind(cas_label);
4777 
4778   return null_check_offset;
4779 }
4780 void MacroAssembler::call_VM_leaf_base(address entry_point,


7666                                                    Label* L_success,
7667                                                    Label* L_failure,
7668                                                    Label* L_slow_path,
7669                                         RegisterOrConstant super_check_offset) {
7670   assert_different_registers(sub_klass, super_klass, temp_reg);
7671   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
7672   if (super_check_offset.is_register()) {
7673     assert_different_registers(sub_klass, super_klass,
7674                                super_check_offset.as_register());
7675   } else if (must_load_sco) {
7676     assert(temp_reg != noreg, "supply either a temp or a register offset");
7677   }
7678 
7679   Label L_fallthrough;
7680   int label_nulls = 0;
7681   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
7682   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
7683   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
7684   assert(label_nulls <= 1, "at most one NULL in the batch");
7685 
7686   int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
7687                    Klass::secondary_super_cache_offset_in_bytes());
7688   int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
7689                     Klass::super_check_offset_offset_in_bytes());
7690   Address super_check_offset_addr(super_klass, sco_offset);
7691 
7692   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
7693   // range of a jccb.  If this routine grows larger, reconsider at
7694   // least some of these.
7695 #define local_jcc(assembler_cond, label)                                \
7696   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
7697   else                             jcc( assembler_cond, label) /*omit semi*/
7698 
7699   // Hacked jmp, which may only be used just before L_fallthrough.
7700 #define final_jmp(label)                                                \
7701   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
7702   else                            jmp(label)                /*omit semi*/
7703 
7704   // If the pointers are equal, we are done (e.g., String[] elements).
7705   // This self-check enables sharing of secondary supertype arrays among
7706   // non-primary types such as array-of-interface.  Otherwise, each such
7707   // type would need its own customized SSA.
7708   // We move this check to the front of the fast path because many
7709   // type checks are in fact trivially successful in this manner,


7767 
7768 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
7769                                                    Register super_klass,
7770                                                    Register temp_reg,
7771                                                    Register temp2_reg,
7772                                                    Label* L_success,
7773                                                    Label* L_failure,
7774                                                    bool set_cond_codes) {
7775   assert_different_registers(sub_klass, super_klass, temp_reg);
7776   if (temp2_reg != noreg)
7777     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
7778 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
7779 
7780   Label L_fallthrough;
7781   int label_nulls = 0;
7782   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
7783   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
7784   assert(label_nulls <= 1, "at most one NULL in the batch");
7785 
7786   // a couple of useful fields in sub_klass:
7787   int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
7788                    Klass::secondary_supers_offset_in_bytes());
7789   int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
7790                    Klass::secondary_super_cache_offset_in_bytes());
7791   Address secondary_supers_addr(sub_klass, ss_offset);
7792   Address super_cache_addr(     sub_klass, sc_offset);
7793 
7794   // Do a linear scan of the secondary super-klass chain.
7795   // This code is rarely used, so simplicity is a virtue here.
7796   // The repne_scan instruction uses fixed registers, which we must spill.
7797   // Don't worry too much about pre-existing connections with the input regs.
7798 
7799   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
7800   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
7801 
7802   // Get super_klass value into rax (even if it was in rdi or rcx).
7803   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
7804   if (super_klass != rax || UseCompressedOops) {
7805     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
7806     mov(rax, super_klass);
7807   }
7808   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
7809   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
7810 


8468 }
8469 
8470 void MacroAssembler::load_klass(Register dst, Register src) {
8471 #ifdef _LP64
8472   if (UseCompressedOops) {
8473     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8474     decode_heap_oop_not_null(dst);
8475   } else
8476 #endif
8477     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8478 }
8479 
8480 void MacroAssembler::load_prototype_header(Register dst, Register src) {
8481 #ifdef _LP64
8482   if (UseCompressedOops) {
8483     assert (Universe::heap() != NULL, "java heap should be initialized");
8484     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8485     if (Universe::narrow_oop_shift() != 0) {
8486       assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8487       if (LogMinObjAlignmentInBytes == Address::times_8) {
8488         movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8489       } else {
8490         // OK to use shift since we don't need to preserve flags.
8491         shlq(dst, LogMinObjAlignmentInBytes);
8492         movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8493       }
8494     } else {
8495       movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8496     }
8497   } else
8498 #endif
8499   {
8500     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8501     movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8502   }
8503 }
8504 
8505 void MacroAssembler::store_klass(Register dst, Register src) {
8506 #ifdef _LP64
8507   if (UseCompressedOops) {
8508     encode_heap_oop_not_null(src);
8509     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8510   } else
8511 #endif
8512     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8513 }
8514 
8515 void MacroAssembler::load_heap_oop(Register dst, Address src) {
8516 #ifdef _LP64
8517   if (UseCompressedOops) {
8518     movl(dst, src);
8519     decode_heap_oop(dst);
8520   } else
8521 #endif




4615     pop(tmp_reg);
4616   }
4617   jcc(Assembler::notEqual, cas_label);
4618   // The bias pattern is present in the object's header. Need to check
4619   // whether the bias owner and the epoch are both still current.
4620   // Note that because there is no current thread register on x86 we
4621   // need to store off the mark word we read out of the object to
4622   // avoid reloading it and needing to recheck invariants below. This
4623   // store is unfortunate but it makes the overall code shorter and
4624   // simpler.
4625   movl(saved_mark_addr, swap_reg);
4626   if (need_tmp_reg) {
4627     push(tmp_reg);
4628   }
4629   get_thread(tmp_reg);
4630   xorl(swap_reg, tmp_reg);
4631   if (swap_reg_contains_mark) {
4632     null_check_offset = offset();
4633   }
4634   movl(tmp_reg, klass_addr);
4635   xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes()));
4636   andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4637   if (need_tmp_reg) {
4638     pop(tmp_reg);
4639   }
4640   if (counters != NULL) {
4641     cond_inc32(Assembler::zero,
4642                ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4643   }
4644   jcc(Assembler::equal, done);
4645 
4646   Label try_revoke_bias;
4647   Label try_rebias;
4648 
4649   // At this point we know that the header has the bias pattern and
4650   // that we are not the bias owner in the current epoch. We need to
4651   // figure out more details about the state of the header in order to
4652   // know what operations can be legally performed on the object's
4653   // header.
4654 
4655   // If the low three bits in the xor result aren't clear, that means


4702   if (slow_case != NULL) {
4703     jcc(Assembler::notZero, *slow_case);
4704   }
4705   jmp(done);
4706 
4707   bind(try_rebias);
4708   // At this point we know the epoch has expired, meaning that the
4709   // current "bias owner", if any, is actually invalid. Under these
4710   // circumstances _only_, we are allowed to use the current header's
4711   // value as the comparison value when doing the cas to acquire the
4712   // bias in the current epoch. In other words, we allow transfer of
4713   // the bias from one thread to another directly in this situation.
4714   //
4715   // FIXME: due to a lack of registers we currently blow away the age
4716   // bits in this situation. Should attempt to preserve them.
4717   if (need_tmp_reg) {
4718     push(tmp_reg);
4719   }
4720   get_thread(tmp_reg);
4721   movl(swap_reg, klass_addr);
4722   orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes()));
4723   movl(swap_reg, saved_mark_addr);
4724   if (os::is_MP()) {
4725     lock();
4726   }
4727   cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4728   if (need_tmp_reg) {
4729     pop(tmp_reg);
4730   }
4731   // If the biasing toward our thread failed, then another thread
4732   // succeeded in biasing it toward itself and we need to revoke that
4733   // bias. The revocation will occur in the runtime in the slow case.
4734   if (counters != NULL) {
4735     cond_inc32(Assembler::zero,
4736                ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4737   }
4738   if (slow_case != NULL) {
4739     jcc(Assembler::notZero, *slow_case);
4740   }
4741   jmp(done);
4742 
4743   bind(try_revoke_bias);
4744   // The prototype mark in the klass doesn't have the bias bit set any
4745   // more, indicating that objects of this data type are not supposed
4746   // to be biased any more. We are going to try to reset the mark of
4747   // this object to the prototype value and fall through to the
4748   // CAS-based locking scheme. Note that if our CAS fails, it means
4749   // that another thread raced us for the privilege of revoking the
4750   // bias of this particular object, so it's okay to continue in the
4751   // normal locking code.
4752   //
4753   // FIXME: due to a lack of registers we currently blow away the age
4754   // bits in this situation. Should attempt to preserve them.
4755   movl(swap_reg, saved_mark_addr);
4756   if (need_tmp_reg) {
4757     push(tmp_reg);
4758   }
4759   movl(tmp_reg, klass_addr);
4760   movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes()));
4761   if (os::is_MP()) {
4762     lock();
4763   }
4764   cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4765   if (need_tmp_reg) {
4766     pop(tmp_reg);
4767   }
4768   // Fall through to the normal CAS-based lock, because no matter what
4769   // the result of the above CAS, some thread must have succeeded in
4770   // removing the bias bit from the object's header.
4771   if (counters != NULL) {
4772     cond_inc32(Assembler::zero,
4773                ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4774   }
4775 
4776   bind(cas_label);
4777 
4778   return null_check_offset;
4779 }
4780 void MacroAssembler::call_VM_leaf_base(address entry_point,


7666                                                    Label* L_success,
7667                                                    Label* L_failure,
7668                                                    Label* L_slow_path,
7669                                         RegisterOrConstant super_check_offset) {
7670   assert_different_registers(sub_klass, super_klass, temp_reg);
7671   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
7672   if (super_check_offset.is_register()) {
7673     assert_different_registers(sub_klass, super_klass,
7674                                super_check_offset.as_register());
7675   } else if (must_load_sco) {
7676     assert(temp_reg != noreg, "supply either a temp or a register offset");
7677   }
7678 
7679   Label L_fallthrough;
7680   int label_nulls = 0;
7681   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
7682   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
7683   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
7684   assert(label_nulls <= 1, "at most one NULL in the batch");
7685 
7686   int sc_offset = Klass::secondary_super_cache_offset_in_bytes();
7687   int sco_offset = Klass::super_check_offset_offset_in_bytes();


7688   Address super_check_offset_addr(super_klass, sco_offset);
7689 
7690   // Hacked jcc, which "knows" that L_fallthrough, at least, is in
7691   // range of a jccb.  If this routine grows larger, reconsider at
7692   // least some of these.
7693 #define local_jcc(assembler_cond, label)                                \
7694   if (&(label) == &L_fallthrough)  jccb(assembler_cond, label);         \
7695   else                             jcc( assembler_cond, label) /*omit semi*/
7696 
7697   // Hacked jmp, which may only be used just before L_fallthrough.
7698 #define final_jmp(label)                                                \
7699   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
7700   else                            jmp(label)                /*omit semi*/
7701 
7702   // If the pointers are equal, we are done (e.g., String[] elements).
7703   // This self-check enables sharing of secondary supertype arrays among
7704   // non-primary types such as array-of-interface.  Otherwise, each such
7705   // type would need its own customized SSA.
7706   // We move this check to the front of the fast path because many
7707   // type checks are in fact trivially successful in this manner,


7765 
7766 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
7767                                                    Register super_klass,
7768                                                    Register temp_reg,
7769                                                    Register temp2_reg,
7770                                                    Label* L_success,
7771                                                    Label* L_failure,
7772                                                    bool set_cond_codes) {
7773   assert_different_registers(sub_klass, super_klass, temp_reg);
7774   if (temp2_reg != noreg)
7775     assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
7776 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
7777 
7778   Label L_fallthrough;
7779   int label_nulls = 0;
7780   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
7781   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
7782   assert(label_nulls <= 1, "at most one NULL in the batch");
7783 
7784   // a couple of useful fields in sub_klass:
7785   int ss_offset = Klass::secondary_supers_offset_in_bytes();
7786   int sc_offset = Klass::secondary_super_cache_offset_in_bytes();


7787   Address secondary_supers_addr(sub_klass, ss_offset);
7788   Address super_cache_addr(     sub_klass, sc_offset);
7789 
7790   // Do a linear scan of the secondary super-klass chain.
7791   // This code is rarely used, so simplicity is a virtue here.
7792   // The repne_scan instruction uses fixed registers, which we must spill.
7793   // Don't worry too much about pre-existing connections with the input regs.
7794 
7795   assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
7796   assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
7797 
7798   // Get super_klass value into rax (even if it was in rdi or rcx).
7799   bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
7800   if (super_klass != rax || UseCompressedOops) {
7801     if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
7802     mov(rax, super_klass);
7803   }
7804   if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
7805   if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
7806 


8464 }
8465 
8466 void MacroAssembler::load_klass(Register dst, Register src) {
8467 #ifdef _LP64
8468   if (UseCompressedOops) {
8469     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8470     decode_heap_oop_not_null(dst);
8471   } else
8472 #endif
8473     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8474 }
8475 
8476 void MacroAssembler::load_prototype_header(Register dst, Register src) {
8477 #ifdef _LP64
8478   if (UseCompressedOops) {
8479     assert (Universe::heap() != NULL, "java heap should be initialized");
8480     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8481     if (Universe::narrow_oop_shift() != 0) {
8482       assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8483       if (LogMinObjAlignmentInBytes == Address::times_8) {
8484         movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes()));
8485       } else {
8486         // OK to use shift since we don't need to preserve flags.
8487         shlq(dst, LogMinObjAlignmentInBytes);
8488         movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes()));
8489       }
8490     } else {
8491       movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes()));
8492     }
8493   } else
8494 #endif
8495   {
8496     movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8497     movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes()));
8498   }
8499 }
8500 
8501 void MacroAssembler::store_klass(Register dst, Register src) {
8502 #ifdef _LP64
8503   if (UseCompressedOops) {
8504     encode_heap_oop_not_null(src);
8505     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8506   } else
8507 #endif
8508     movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8509 }
8510 
8511 void MacroAssembler::load_heap_oop(Register dst, Address src) {
8512 #ifdef _LP64
8513   if (UseCompressedOops) {
8514     movl(dst, src);
8515     decode_heap_oop(dst);
8516   } else
8517 #endif


src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File