1 /*
   2  * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package jdk.vm.ci.code;
  24 
  25 /**
  26  * Constants and intrinsic definition for memory barriers.
  27  *
  28  * The documentation for each constant is taken from Doug Lea's <a
  29  * href="http://gee.cs.oswego.edu/dl/jmm/cookbook.html">The JSR-133 Cookbook for Compiler
  30  * Writers</a>.
  31  * <p>
  32  * The {@code JMM_*} constants capture the memory barriers necessary to implement the Java Memory
  33  * Model with respect to volatile field accesses. Their values are explained by this comment from
  34  * templateTable_i486.cpp in the HotSpot source code:
  35  *
  36  * <pre>
  37  * Volatile variables demand their effects be made known to all CPU's in
  38  * order.  Store buffers on most chips allow reads &amp; writes to reorder; the
  39  * JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
  40  * memory barrier (i.e., it's not sufficient that the interpreter does not
  41  * reorder volatile references, the hardware also must not reorder them).
  42  *
  43  * According to the new Java Memory Model (JMM):
  44  * (1) All volatiles are serialized wrt to each other.
  45  * ALSO reads &amp; writes act as acquire &amp; release, so:
  46  * (2) A read cannot let unrelated NON-volatile memory refs that happen after
  47  * the read float up to before the read.  It's OK for non-volatile memory refs
  48  * that happen before the volatile read to float down below it.
  49  * (3) Similarly, a volatile write cannot let unrelated NON-volatile memory refs
  50  * that happen BEFORE the write float down to after the write.  It's OK for
  51  * non-volatile memory refs that happen after the volatile write to float up
  52  * before it.
  53  *
  54  * We only put in barriers around volatile refs (they are expensive), not
  55  * _between_ memory refs (which would require us to track the flavor of the
  56  * previous memory refs).  Requirements (2) and (3) require some barriers
  57  * before volatile stores and after volatile loads.  These nearly cover
  58  * requirement (1) but miss the volatile-store-volatile-load case.  This final
  59  * case is placed after volatile-stores although it could just as well go
  60  * before volatile-loads.
  61  * </pre>
  62  */
  63 public class MemoryBarriers {
  64 
  65     /**
  66      * The sequence {@code Load1; LoadLoad; Load2} ensures that {@code Load1}'s data are loaded
  67      * before data accessed by {@code Load2} and all subsequent load instructions are loaded. In
  68      * general, explicit {@code LoadLoad} barriers are needed on processors that perform speculative
  69      * loads and/or out-of-order processing in which waiting load instructions can bypass waiting
  70      * stores. On processors that guarantee to always preserve load ordering, these barriers amount
  71      * to no-ops.
  72      */
  73     public static final int LOAD_LOAD = 0x0001;
  74 
  75     /**
  76      * The sequence {@code Load1; LoadStore; Store2} ensures that {@code Load1}'s data are loaded
  77      * before all data associated with {@code Store2} and subsequent store instructions are flushed.
  78      * {@code LoadStore} barriers are needed only on those out-of-order processors in which waiting
  79      * store instructions can bypass loads.
  80      */
  81     public static final int LOAD_STORE = 0x0002;
  82 
  83     /**
  84      * The sequence {@code Store1; StoreLoad; Load2} ensures that {@code Store1}'s data are made
  85      * visible to other processors (i.e., flushed to main memory) before data accessed by
  86      * {@code Load2} and all subsequent load instructions are loaded. {@code StoreLoad} barriers
  87      * protect against a subsequent load incorrectly using {@code Store1}'s data value rather than
  88      * that from a more recent store to the same location performed by a different processor.
  89      * Because of this, on the processors discussed below, a {@code StoreLoad} is strictly necessary
  90      * only for separating stores from subsequent loads of the same location(s) as were stored
  91      * before the barrier. {@code StoreLoad} barriers are needed on nearly all recent
  92      * multiprocessors, and are usually the most expensive kind. Part of the reason they are
  93      * expensive is that they must disable mechanisms that ordinarily bypass cache to satisfy loads
  94      * from write-buffers. This might be implemented by letting the buffer fully flush, among other
  95      * possible stalls.
  96      */
  97     public static final int STORE_LOAD = 0x0004;
  98 
  99     /**
 100      * The sequence {@code Store1; StoreStore; Store2} ensures that {@code Store1}'s data are
 101      * visible to other processors (i.e., flushed to memory) before the data associated with
 102      * {@code Store2} and all subsequent store instructions. In general, {@code StoreStore} barriers
 103      * are needed on processors that do not otherwise guarantee strict ordering of flushes from
 104      * write buffers and/or caches to other processors or main memory.
 105      */
 106     public static final int STORE_STORE = 0x0008;
 107 
 108     public static final int JMM_PRE_VOLATILE_WRITE = LOAD_STORE | STORE_STORE;
 109     public static final int JMM_POST_VOLATILE_WRITE = STORE_LOAD | STORE_STORE;
 110     public static final int JMM_PRE_VOLATILE_READ = 0;
 111     public static final int JMM_POST_VOLATILE_READ = LOAD_LOAD | LOAD_STORE;
 112 
 113     public static String barriersString(int barriers) {
 114         StringBuilder sb = new StringBuilder();
 115         sb.append((barriers & LOAD_LOAD) != 0 ? "LOAD_LOAD " : "");
 116         sb.append((barriers & LOAD_STORE) != 0 ? "LOAD_STORE " : "");
 117         sb.append((barriers & STORE_LOAD) != 0 ? "STORE_LOAD " : "");
 118         sb.append((barriers & STORE_STORE) != 0 ? "STORE_STORE " : "");
 119         return sb.toString().trim();
 120     }
 121 }