1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "asm/macroAssembler.hpp"
  27 #include "classfile/classLoader.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "jvm_linux.h"
  34 #include "memory/allocation.inline.hpp"
  35 #include "mutex_linux.inline.hpp"
  36 #include "os_share_linux.hpp"
  37 #include "prims/jniFastGetField.hpp"
  38 #include "prims/jvm.h"
  39 #include "prims/jvm_misc.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/extendedPC.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/interfaceSupport.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/javaCalls.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/osThread.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/thread.inline.hpp"
  51 #include "runtime/timer.hpp"
  52 #include "utilities/events.hpp"
  53 #include "utilities/vmError.hpp"
  54 
  55 // put OS-includes here
  56 # include <sys/types.h>
  57 # include <sys/mman.h>
  58 # include <pthread.h>
  59 # include <signal.h>
  60 # include <errno.h>
  61 # include <dlfcn.h>
  62 # include <stdlib.h>
  63 # include <stdio.h>
  64 # include <unistd.h>
  65 # include <sys/resource.h>
  66 # include <pthread.h>
  67 # include <sys/stat.h>
  68 # include <sys/time.h>
  69 # include <sys/utsname.h>
  70 # include <sys/socket.h>
  71 # include <sys/wait.h>
  72 # include <pwd.h>
  73 # include <poll.h>
  74 # include <ucontext.h>
  75 # include <fpu_control.h>
  76 
  77 #define REG_FP 29
  78 
  79 #define NOINLINE __attribute__ ((noinline))
  80 
  81 NOINLINE address os::current_stack_pointer() {
  82   return (address)__builtin_frame_address(0);
  83 }
  84 
  85 char* os::non_memory_address_word() {
  86   // Must never look like an address returned by reserve_memory,
  87   // even in its subfields (as defined by the CPU immediate fields,
  88   // if the CPU splits constants across multiple instructions).
  89 
  90   return (char*) 0xffffffffffff;
  91 }
  92 
  93 void os::initialize_thread(Thread *thr) {
  94 }
  95 
  96 address os::Linux::ucontext_get_pc(ucontext_t * uc) {
  97   return (address)uc->uc_mcontext.pc;
  98 }
  99 
 100 intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
 101   return (intptr_t*)uc->uc_mcontext.sp;
 102 }
 103 
 104 intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
 105   return (intptr_t*)uc->uc_mcontext.regs[REG_FP];
 106 }
 107 
 108 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 109 // is currently interrupted by SIGPROF.
 110 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 111 // frames. Currently we don't do that on Linux, so it's the same as
 112 // os::fetch_frame_from_context().
 113 ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
 114   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 115 
 116   assert(thread != NULL, "just checking");
 117   assert(ret_sp != NULL, "just checking");
 118   assert(ret_fp != NULL, "just checking");
 119 
 120   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 121 }
 122 
 123 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
 124                     intptr_t** ret_sp, intptr_t** ret_fp) {
 125 
 126   ExtendedPC  epc;
 127   ucontext_t* uc = (ucontext_t*)ucVoid;
 128 
 129   if (uc != NULL) {
 130     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
 131     if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
 132     if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
 133   } else {
 134     // construct empty ExtendedPC for return value checking
 135     epc = ExtendedPC(NULL);
 136     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 137     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 138   }
 139 
 140   return epc;
 141 }
 142 
 143 frame os::fetch_frame_from_context(void* ucVoid) {
 144   intptr_t* sp;
 145   intptr_t* fp;
 146   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 147   return frame(sp, fp, epc.pc());
 148 }
 149 
 150 // By default, gcc always saves frame pointer rfp on this stack. This
 151 // may get turned off by -fomit-frame-pointer.
 152 frame os::get_sender_for_C_frame(frame* fr) {
 153   return frame(fr->link(), fr->link(), fr->sender_pc());
 154 }
 155 
 156 NOINLINE frame os::current_frame() {
 157   intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
 158   frame myframe((intptr_t*)os::current_stack_pointer(),
 159                 (intptr_t*)fp,
 160                 CAST_FROM_FN_PTR(address, os::current_frame));
 161   if (os::is_first_C_frame(&myframe)) {
 162     // stack is not walkable
 163     return frame();
 164   } else {
 165     return os::get_sender_for_C_frame(&myframe);
 166   }
 167 }
 168 
 169 // Utility functions
 170 
 171 // An operation in Unsafe has faulted.  We're going to return to the
 172 // instruction after the faulting load or store.  We also set
 173 // pending_unsafe_access_error so that at some point in the future our
 174 // user will get a helpful message.
 175 static address handle_unsafe_access(JavaThread* thread, address pc) {
 176   // pc is the instruction which we must emulate
 177   // doing a no-op is fine:  return garbage from the load
 178   // therefore, compute npc
 179   address npc = pc + NativeCall::instruction_size;
 180 
 181   // request an async exception
 182   thread->set_pending_unsafe_access_error();
 183 
 184   // return address of next instruction to execute
 185   return npc;
 186 }
 187 
 188 extern "C" JNIEXPORT int
 189 JVM_handle_linux_signal(int sig,
 190                         siginfo_t* info,
 191                         void* ucVoid,
 192                         int abort_if_unrecognized) {
 193   ucontext_t* uc = (ucontext_t*) ucVoid;
 194 
 195   Thread* t = ThreadLocalStorage::get_thread_slow();
 196 
 197   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
 198   // (no destructors can be run)
 199   os::ThreadCrashProtection::check_crash_protection(sig, t);
 200 
 201   SignalHandlerMark shm(t);
 202 
 203   // Note: it's not uncommon that JNI code uses signal/sigset to install
 204   // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
 205   // or have a SIGILL handler when detecting CPU type). When that happens,
 206   // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
 207   // avoid unnecessary crash when libjsig is not preloaded, try handle signals
 208   // that do not require siginfo/ucontext first.
 209 
 210   if (sig == SIGPIPE || sig == SIGXFSZ) {
 211     // allow chained handler to go first
 212     if (os::Linux::chained_handler(sig, info, ucVoid)) {
 213       return true;
 214     } else {
 215       if (PrintMiscellaneous && (WizardMode || Verbose)) {
 216         char buf[64];
 217         warning("Ignoring %s - see bugs 4229104 or 646499219",
 218                 os::exception_name(sig, buf, sizeof(buf)));
 219       }
 220       return true;
 221     }
 222   }
 223 
 224   JavaThread* thread = NULL;
 225   VMThread* vmthread = NULL;
 226   if (os::Linux::signal_handlers_are_installed) {
 227     if (t != NULL ){
 228       if(t->is_Java_thread()) {
 229         thread = (JavaThread*)t;
 230       }
 231       else if(t->is_VM_thread()){
 232         vmthread = (VMThread *)t;
 233       }
 234     }
 235   }
 236 /*
 237   NOTE: does not seem to work on linux.
 238   if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
 239     // can't decode this kind of signal
 240     info = NULL;
 241   } else {
 242     assert(sig == info->si_signo, "bad siginfo");
 243   }
 244 */
 245   // decide if this trap can be handled by a stub
 246   address stub = NULL;
 247 
 248   address pc          = NULL;
 249 
 250   //%note os_trap_1
 251   if (info != NULL && uc != NULL && thread != NULL) {
 252     pc = (address) os::Linux::ucontext_get_pc(uc);
 253 
 254     if (StubRoutines::is_safefetch_fault(pc)) {
 255       uc->uc_mcontext.pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
 256       return 1;
 257     }
 258 
 259 #ifndef AMD64
 260     // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
 261     // This can happen in any running code (currently more frequently in
 262     // interpreter code but has been seen in compiled code)
 263     if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
 264       fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
 265             "to unstable signal handling in this distribution.");
 266     }
 267 #endif // AMD64
 268 
 269     // Handle ALL stack overflow variations here
 270     if (sig == SIGSEGV) {
 271       address addr = (address) info->si_addr;
 272 
 273       // check if fault address is within thread stack
 274       if (addr < thread->stack_base() &&
 275           addr >= thread->stack_base() - thread->stack_size()) {
 276         // stack overflow
 277         if (thread->in_stack_yellow_zone(addr)) {
 278           thread->disable_stack_yellow_zone();
 279           if (thread->thread_state() == _thread_in_Java) {
 280             // Throw a stack overflow exception.  Guard pages will be reenabled
 281             // while unwinding the stack.
 282             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 283           } else {
 284             // Thread was in the vm or native code.  Return and try to finish.
 285             return 1;
 286           }
 287         } else if (thread->in_stack_red_zone(addr)) {
 288           // Fatal red zone violation.  Disable the guard pages and fall through
 289           // to handle_unexpected_exception way down below.
 290           thread->disable_stack_red_zone();
 291           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
 292 
 293           // This is a likely cause, but hard to verify. Let's just print
 294           // it as a hint.
 295           tty->print_raw_cr("Please check if any of your loaded .so files has "
 296                             "enabled executable stack (see man page execstack(8))");
 297         } else {
 298           // Accessing stack address below sp may cause SEGV if current
 299           // thread has MAP_GROWSDOWN stack. This should only happen when
 300           // current thread was created by user code with MAP_GROWSDOWN flag
 301           // and then attached to VM. See notes in os_linux.cpp.
 302           if (thread->osthread()->expanding_stack() == 0) {
 303              thread->osthread()->set_expanding_stack();
 304              if (os::Linux::manually_expand_stack(thread, addr)) {
 305                thread->osthread()->clear_expanding_stack();
 306                return 1;
 307              }
 308              thread->osthread()->clear_expanding_stack();
 309           } else {
 310              fatal("recursive segv. expanding stack.");
 311           }
 312         }
 313       }
 314     }
 315 
 316     if (thread->thread_state() == _thread_in_Java) {
 317       // Java thread running in Java code => find exception handler if any
 318       // a fault inside compiled code, the interpreter, or a stub
 319 
 320       // Handle signal from NativeJump::patch_verified_entry().
 321       if ((sig == SIGILL || sig == SIGTRAP)
 322           && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
 323         if (TraceTraps) {
 324           tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
 325         }
 326         stub = SharedRuntime::get_handle_wrong_method_stub();
 327       } else if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
 328         stub = SharedRuntime::get_poll_stub(pc);
 329       } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
 330         // BugId 4454115: A read from a MappedByteBuffer can fault
 331         // here if the underlying file has been truncated.
 332         // Do not crash the VM in such a case.
 333         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
 334         nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
 335         if (nm != NULL && nm->has_unsafe_access()) {
 336           stub = handle_unsafe_access(thread, pc);
 337         }
 338       }
 339       else
 340 
 341       if (sig == SIGFPE  &&
 342           (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
 343         stub =
 344           SharedRuntime::
 345           continuation_for_implicit_exception(thread,
 346                                               pc,
 347                                               SharedRuntime::
 348                                               IMPLICIT_DIVIDE_BY_ZERO);
 349       } else if (sig == SIGSEGV &&
 350                !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
 351           // Determination of interpreter/vtable stub/compiled code null exception
 352           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
 353       }
 354     } else if (thread->thread_state() == _thread_in_vm &&
 355                sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
 356                thread->doing_unsafe_access()) {
 357         stub = handle_unsafe_access(thread, pc);
 358     }
 359 
 360     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
 361     // and the heap gets shrunk before the field access.
 362     if ((sig == SIGSEGV) || (sig == SIGBUS)) {
 363       address addr = JNI_FastGetField::find_slowcase_pc(pc);
 364       if (addr != (address)-1) {
 365         stub = addr;
 366       }
 367     }
 368 
 369     // Check to see if we caught the safepoint code in the
 370     // process of write protecting the memory serialization page.
 371     // It write enables the page immediately after protecting it
 372     // so we can just return to retry the write.
 373     if ((sig == SIGSEGV) &&
 374         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
 375       // Block current thread until the memory serialize page permission restored.
 376       os::block_on_serialize_page_trap();
 377       return true;
 378     }
 379   }
 380 
 381   if (stub != NULL) {
 382     // save all thread context in case we need to restore it
 383     if (thread != NULL) thread->set_saved_exception_pc(pc);
 384 
 385     uc->uc_mcontext.pc = (__u64)stub;
 386     return true;
 387   }
 388 
 389   // signal-chaining
 390   if (os::Linux::chained_handler(sig, info, ucVoid)) {
 391      return true;
 392   }
 393 
 394   if (!abort_if_unrecognized) {
 395     // caller wants another chance, so give it to him
 396     return false;
 397   }
 398 
 399   if (pc == NULL && uc != NULL) {
 400     pc = os::Linux::ucontext_get_pc(uc);
 401   }
 402 
 403   // unmask current signal
 404   sigset_t newset;
 405   sigemptyset(&newset);
 406   sigaddset(&newset, sig);
 407   sigprocmask(SIG_UNBLOCK, &newset, NULL);
 408 
 409   VMError err(t, sig, pc, info, ucVoid);
 410   err.report_and_die();
 411 
 412   ShouldNotReachHere();
 413   return true; // Mute compiler
 414 }
 415 
 416 void os::Linux::init_thread_fpu_state(void) {
 417 }
 418 
 419 int os::Linux::get_fpu_control_word(void) {
 420   return 0;
 421 }
 422 
 423 void os::Linux::set_fpu_control_word(int fpu_control) {
 424 }
 425 
 426 // Check that the linux kernel version is 2.4 or higher since earlier
 427 // versions do not support SSE without patches.
 428 bool os::supports_sse() {
 429   return true;
 430 }
 431 
 432 bool os::is_allocatable(size_t bytes) {
 433   return true;
 434 }
 435 
 436 ////////////////////////////////////////////////////////////////////////////////
 437 // thread stack
 438 
 439 size_t os::Linux::min_stack_allowed  = 64 * K;
 440 
 441 // amd64: pthread on amd64 is always in floating stack mode
 442 bool os::Linux::supports_variable_stack_size() {  return true; }
 443 
 444 // return default stack size for thr_type
 445 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
 446   // default stack size (compiler thread needs larger stack)
 447   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
 448   return s;
 449 }
 450 
 451 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
 452   // Creating guard page is very expensive. Java thread has HotSpot
 453   // guard page, only enable glibc guard page for non-Java threads.
 454   return (thr_type == java_thread ? 0 : page_size());
 455 }
 456 
 457 // Java thread:
 458 //
 459 //   Low memory addresses
 460 //    +------------------------+
 461 //    |                        |\  JavaThread created by VM does not have glibc
 462 //    |    glibc guard page    | - guard, attached Java thread usually has
 463 //    |                        |/  1 page glibc guard.
 464 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
 465 //    |                        |\
 466 //    |  HotSpot Guard Pages   | - red and yellow pages
 467 //    |                        |/
 468 //    +------------------------+ JavaThread::stack_yellow_zone_base()
 469 //    |                        |\
 470 //    |      Normal Stack      | -
 471 //    |                        |/
 472 // P2 +------------------------+ Thread::stack_base()
 473 //
 474 // Non-Java thread:
 475 //
 476 //   Low memory addresses
 477 //    +------------------------+
 478 //    |                        |\
 479 //    |  glibc guard page      | - usually 1 page
 480 //    |                        |/
 481 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
 482 //    |                        |\
 483 //    |      Normal Stack      | -
 484 //    |                        |/
 485 // P2 +------------------------+ Thread::stack_base()
 486 //
 487 // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
 488 //    pthread_attr_getstack()
 489 
 490 static void current_stack_region(address * bottom, size_t * size) {
 491   if (os::is_primordial_thread()) {
 492      // primordial thread needs special handling because pthread_getattr_np()
 493      // may return bogus value.
 494      *bottom = os::Linux::initial_thread_stack_bottom();
 495      *size   = os::Linux::initial_thread_stack_size();
 496   } else {
 497      pthread_attr_t attr;
 498 
 499      int rslt = pthread_getattr_np(pthread_self(), &attr);
 500 
 501      // JVM needs to know exact stack location, abort if it fails
 502      if (rslt != 0) {
 503        if (rslt == ENOMEM) {
 504          vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
 505        } else {
 506          fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
 507        }
 508      }
 509 
 510      if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
 511          fatal("Can not locate current stack attributes!");
 512      }
 513 
 514      pthread_attr_destroy(&attr);
 515 
 516   }
 517   assert(os::current_stack_pointer() >= *bottom &&
 518          os::current_stack_pointer() < *bottom + *size, "just checking");
 519 }
 520 
 521 address os::current_stack_base() {
 522   address bottom;
 523   size_t size;
 524   current_stack_region(&bottom, &size);
 525   return (bottom + size);
 526 }
 527 
 528 size_t os::current_stack_size() {
 529   // stack size includes normal stack and HotSpot guard pages
 530   address bottom;
 531   size_t size;
 532   current_stack_region(&bottom, &size);
 533   return size;
 534 }
 535 
 536 /////////////////////////////////////////////////////////////////////////////
 537 // helper functions for fatal error handler
 538 
 539 void os::print_context(outputStream *st, void *context) {
 540   if (context == NULL) return;
 541 
 542   ucontext_t *uc = (ucontext_t*)context;
 543   st->print_cr("Registers:");
 544   for (int r = 0; r < 31; r++)
 545           st->print_cr(  "R%d=" INTPTR_FORMAT, r, (int64_t)uc->uc_mcontext.regs[r]);
 546   st->cr();
 547 
 548   intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
 549   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
 550   print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
 551   st->cr();
 552 
 553   // Note: it may be unsafe to inspect memory near pc. For example, pc may
 554   // point to garbage if entry point in an nmethod is corrupted. Leave
 555   // this at the end, and hope for the best.
 556   address pc = os::Linux::ucontext_get_pc(uc);
 557   st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
 558   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 559 }
 560 
 561 void os::print_register_info(outputStream *st, void *context) {
 562   if (context == NULL) return;
 563 
 564   ucontext_t *uc = (ucontext_t*)context;
 565 
 566   st->print_cr("Register to memory mapping:");
 567   st->cr();
 568 
 569   // this is horrendously verbose but the layout of the registers in the
 570   // context does not match how we defined our abstract Register set, so
 571   // we can't just iterate through the gregs area
 572 
 573   // this is only for the "general purpose" registers
 574 
 575   for (int r = 0; r < 31; r++)
 576           st->print_cr(  "R%d=" INTPTR_FORMAT, r, (int64_t)uc->uc_mcontext.regs[r]);
 577   st->cr();
 578 }
 579 
 580 void os::setup_fpu() {
 581 }
 582 
 583 #ifndef PRODUCT
 584 void os::verify_stack_alignment() {
 585   assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
 586 }
 587 #endif
 588 
 589 extern "C" {
 590   int SpinPause() {
 591     return 0;
 592   }
 593 
 594   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
 595     if (from > to) {
 596       const jshort *end = from + count;
 597       while (from < end)
 598         *(to++) = *(from++);
 599     }
 600     else if (from < to) {
 601       const jshort *end = from;
 602       from += count - 1;
 603       to   += count - 1;
 604       while (from >= end)
 605         *(to--) = *(from--);
 606     }
 607   }
 608   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
 609     if (from > to) {
 610       const jint *end = from + count;
 611       while (from < end)
 612         *(to++) = *(from++);
 613     }
 614     else if (from < to) {
 615       const jint *end = from;
 616       from += count - 1;
 617       to   += count - 1;
 618       while (from >= end)
 619         *(to--) = *(from--);
 620     }
 621   }
 622   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
 623     if (from > to) {
 624       const jlong *end = from + count;
 625       while (from < end)
 626         os::atomic_copy64(from++, to++);
 627     }
 628     else if (from < to) {
 629       const jlong *end = from;
 630       from += count - 1;
 631       to   += count - 1;
 632       while (from >= end)
 633         os::atomic_copy64(from--, to--);
 634     }
 635   }
 636 
 637   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
 638                                     HeapWord* to,
 639                                     size_t    count) {
 640     memmove(to, from, count);
 641   }
 642   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
 643                                       HeapWord* to,
 644                                       size_t    count) {
 645     memmove(to, from, count * 2);
 646   }
 647   void _Copy_arrayof_conjoint_jints(HeapWord* from,
 648                                     HeapWord* to,
 649                                     size_t    count) {
 650     memmove(to, from, count * 4);
 651   }
 652   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
 653                                      HeapWord* to,
 654                                      size_t    count) {
 655     memmove(to, from, count * 8);
 656   }
 657 };