379 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
380 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
381 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
382 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
383 break;
384
385 case instruction_code_float_s: // 0xd9 fld_s a
386 case instruction_code_float_d: // 0xdd fld_d a
387 case instruction_code_xmm_load: // 0x10 movsd xmm, a
388 case instruction_code_xmm_store: // 0x11 movsd a, xmm
389 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
390 break;
391
392 default:
393 fatal ("not a mov [reg+offs], reg instruction");
394 }
395 }
396
397
398 void NativeMovRegMem::print() {
399 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
400 }
401
402 //-------------------------------------------------------------------
403
404 void NativeLoadAddress::verify() {
405 // make sure code pattern is actually a mov [reg+offset], reg instruction
406 u_char test_byte = *(u_char*)instruction_address();
407 #ifdef _LP64
408 if ( (test_byte == instruction_prefix_wide ||
409 test_byte == instruction_prefix_wide_extended) ) {
410 test_byte = *(u_char*)(instruction_address() + 1);
411 }
412 #endif // _LP64
413 if ( ! ((test_byte == lea_instruction_code)
414 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
415 fatal ("not a lea reg, [reg+offs] instruction");
416 }
417 }
418
419
420 void NativeLoadAddress::print() {
421 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
422 }
423
424 //--------------------------------------------------------------------------------
425
426 void NativeJump::verify() {
427 if (*(u_char*)instruction_address() != instruction_code) {
428 fatal("not a jump instruction");
429 }
430 }
431
432
433 void NativeJump::insert(address code_pos, address entry) {
434 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
435 #ifdef AMD64
436 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
437 #endif // AMD64
438
439 *code_pos = instruction_code;
440 *((int32_t*)(code_pos + 1)) = (int32_t)disp;
441
|
379 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
380 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
381 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
382 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
383 break;
384
385 case instruction_code_float_s: // 0xd9 fld_s a
386 case instruction_code_float_d: // 0xdd fld_d a
387 case instruction_code_xmm_load: // 0x10 movsd xmm, a
388 case instruction_code_xmm_store: // 0x11 movsd a, xmm
389 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
390 break;
391
392 default:
393 fatal ("not a mov [reg+offs], reg instruction");
394 }
395 }
396
397
398 void NativeMovRegMem::print() {
399 tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset());
400 }
401
402 //-------------------------------------------------------------------
403
404 void NativeLoadAddress::verify() {
405 // make sure code pattern is actually a mov [reg+offset], reg instruction
406 u_char test_byte = *(u_char*)instruction_address();
407 #ifdef _LP64
408 if ( (test_byte == instruction_prefix_wide ||
409 test_byte == instruction_prefix_wide_extended) ) {
410 test_byte = *(u_char*)(instruction_address() + 1);
411 }
412 #endif // _LP64
413 if ( ! ((test_byte == lea_instruction_code)
414 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
415 fatal ("not a lea reg, [reg+offs] instruction");
416 }
417 }
418
419
420 void NativeLoadAddress::print() {
421 tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", instruction_address(), offset());
422 }
423
424 //--------------------------------------------------------------------------------
425
426 void NativeJump::verify() {
427 if (*(u_char*)instruction_address() != instruction_code) {
428 fatal("not a jump instruction");
429 }
430 }
431
432
433 void NativeJump::insert(address code_pos, address entry) {
434 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
435 #ifdef AMD64
436 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
437 #endif // AMD64
438
439 *code_pos = instruction_code;
440 *((int32_t*)(code_pos + 1)) = (int32_t)disp;
441
|