1 /*
   2  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/sharedRuntime.hpp"
  27 #include "utilities/copy.hpp"
  28 
  29 
  30 // Copy bytes; larger units are filled atomically if everything is aligned.
  31 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
  32   address src = (address) from;
  33   address dst = (address) to;
  34   uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
  35 
  36   // (Note:  We could improve performance by ignoring the low bits of size,
  37   // and putting a short cleanup loop after each bulk copy loop.
  38   // There are plenty of other ways to make this faster also,
  39   // and it's a slippery slope.  For now, let's keep this code simple
  40   // since the simplicity helps clarify the atomicity semantics of
  41   // this operation.  There are also CPU-specific assembly versions
  42   // which may or may not want to include such optimizations.)
  43 
  44   if (bits % sizeof(jlong) == 0) {
  45     Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
  46   } else if (bits % sizeof(jint) == 0) {
  47     Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
  48   } else if (bits % sizeof(jshort) == 0) {
  49     Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
  50   } else {
  51     // Not aligned, so no need to be atomic.
  52     Copy::conjoint_jbytes((void*) src, (void*) dst, size);
  53   }
  54 }
  55 
  56 
  57 // Fill bytes; larger units are filled atomically if everything is aligned.
  58 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
  59   address dst = (address) to;
  60   uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
  61   if (bits % sizeof(jlong) == 0) {
  62     jlong fill = (julong)( (jubyte)value ); // zero-extend
  63     if (fill != 0) {
  64       fill += fill << 8;
  65       fill += fill << 16;
  66       fill += fill << 32;
  67     }
  68     //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
  69     for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
  70       *(jlong*)(dst + off) = fill;
  71     }
  72   } else if (bits % sizeof(jint) == 0) {
  73     jint fill = (juint)( (jubyte)value ); // zero-extend
  74     if (fill != 0) {
  75       fill += fill << 8;
  76       fill += fill << 16;
  77     }
  78     //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
  79     for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
  80       *(jint*)(dst + off) = fill;
  81     }
  82   } else if (bits % sizeof(jshort) == 0) {
  83     jshort fill = (jushort)( (jubyte)value ); // zero-extend
  84     fill += fill << 8;
  85     //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
  86     for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
  87       *(jshort*)(dst + off) = fill;
  88     }
  89   } else {
  90     // Not aligned, so no need to be atomic.
  91     Copy::fill_to_bytes(dst, size, value);
  92   }
  93 }