< prev index next >

src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp

Print this page




  33 // Implementation of class OrderAccess.
  34 // - we define the high level barriers below and use the general
  35 //   implementation in orderAccess.hpp, with customizations
  36 //   on AARCH64 via the specialized_* template functions
  37 
  38 // Memory Ordering on ARM is weak.
  39 //
  40 // Implement all 4 memory ordering barriers by DMB, since it is a
  41 // lighter version of DSB.
  42 // dmb_sy implies full system shareability domain. RD/WR access type.
  43 // dmb_st implies full system shareability domain. WR only access type.
  44 //
  45 // NOP on < ARMv6 (MP not supported)
  46 //
  47 // Non mcr instructions can be used if we build for armv7 or higher arch
  48 //    __asm__ __volatile__ ("dmb" : : : "memory");
  49 //    __asm__ __volatile__ ("dsb" : : : "memory");
  50 //
  51 // inline void _OrderAccess_dsb() {
  52 //    volatile intptr_t dummy = 0;
  53 //    if (os::is_MP()) {
  54 //      __asm__ volatile (
  55 //        "mcr p15, 0, %0, c7, c10, 4"
  56 //        : : "r" (dummy) : "memory");
  57 //    }
  58 // }
  59 
  60 inline static void dmb_sy() {
  61    if (!os::is_MP()) {
  62      return;
  63    }
  64 #ifdef AARCH64
  65    __asm__ __volatile__ ("dmb sy" : : : "memory");
  66 #else
  67    if (VM_Version::arm_arch() >= 7) {
  68 #ifdef __thumb__
  69      __asm__ volatile (
  70      "dmb sy": : : "memory");
  71 #else
  72      __asm__ volatile (
  73      ".word 0xF57FF050 | 0xf" : : : "memory");
  74 #endif
  75    } else {
  76      intptr_t zero = 0;
  77      __asm__ volatile (
  78        "mcr p15, 0, %0, c7, c10, 5"
  79        : : "r" (zero) : "memory");
  80    }
  81 #endif
  82 }
  83 
  84 inline static void dmb_st() {
  85    if (!os::is_MP()) {
  86      return;
  87    }
  88 #ifdef AARCH64
  89    __asm__ __volatile__ ("dmb st" : : : "memory");
  90 #else
  91    if (VM_Version::arm_arch() >= 7) {
  92 #ifdef __thumb__
  93      __asm__ volatile (
  94      "dmb st": : : "memory");
  95 #else
  96      __asm__ volatile (
  97      ".word 0xF57FF050 | 0xe" : : : "memory");
  98 #endif
  99    } else {
 100      intptr_t zero = 0;
 101      __asm__ volatile (
 102        "mcr p15, 0, %0, c7, c10, 5"
 103        : : "r" (zero) : "memory");
 104    }
 105 #endif
 106 }
 107 
 108 // Load-Load/Store barrier
 109 inline static void dmb_ld() {
 110 #ifdef AARCH64
 111    if (!os::is_MP()) {
 112      return;
 113    }
 114    __asm__ __volatile__ ("dmb ld" : : : "memory");
 115 #else
 116    dmb_sy();
 117 #endif
 118 }
 119 
 120 
 121 inline void OrderAccess::loadload()   { dmb_ld(); }
 122 inline void OrderAccess::loadstore()  { dmb_ld(); }
 123 inline void OrderAccess::acquire()    { dmb_ld(); }
 124 inline void OrderAccess::storestore() { dmb_st(); }
 125 inline void OrderAccess::storeload()  { dmb_sy(); }
 126 inline void OrderAccess::release()    { dmb_sy(); }
 127 inline void OrderAccess::fence()      { dmb_sy(); }
 128 
 129 // specializations for Aarch64
 130 // TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
 131 
 132 #ifdef AARCH64
 133 




  33 // Implementation of class OrderAccess.
  34 // - we define the high level barriers below and use the general
  35 //   implementation in orderAccess.hpp, with customizations
  36 //   on AARCH64 via the specialized_* template functions
  37 
  38 // Memory Ordering on ARM is weak.
  39 //
  40 // Implement all 4 memory ordering barriers by DMB, since it is a
  41 // lighter version of DSB.
  42 // dmb_sy implies full system shareability domain. RD/WR access type.
  43 // dmb_st implies full system shareability domain. WR only access type.
  44 //
  45 // NOP on < ARMv6 (MP not supported)
  46 //
  47 // Non mcr instructions can be used if we build for armv7 or higher arch
  48 //    __asm__ __volatile__ ("dmb" : : : "memory");
  49 //    __asm__ __volatile__ ("dsb" : : : "memory");
  50 //
  51 // inline void _OrderAccess_dsb() {
  52 //    volatile intptr_t dummy = 0;

  53 //    __asm__ volatile (
  54 //      "mcr p15, 0, %0, c7, c10, 4"
  55 //      : : "r" (dummy) : "memory");
  56 // }

  57 
  58 inline static void dmb_sy() {



  59 #ifdef AARCH64
  60    __asm__ __volatile__ ("dmb sy" : : : "memory");
  61 #else
  62    if (VM_Version::arm_arch() >= 7) {
  63 #ifdef __thumb__
  64      __asm__ volatile (
  65      "dmb sy": : : "memory");
  66 #else
  67      __asm__ volatile (
  68      ".word 0xF57FF050 | 0xf" : : : "memory");
  69 #endif
  70    } else {
  71      intptr_t zero = 0;
  72      __asm__ volatile (
  73        "mcr p15, 0, %0, c7, c10, 5"
  74        : : "r" (zero) : "memory");
  75    }
  76 #endif
  77 }
  78 
  79 inline static void dmb_st() {



  80 #ifdef AARCH64
  81    __asm__ __volatile__ ("dmb st" : : : "memory");
  82 #else
  83    if (VM_Version::arm_arch() >= 7) {
  84 #ifdef __thumb__
  85      __asm__ volatile (
  86      "dmb st": : : "memory");
  87 #else
  88      __asm__ volatile (
  89      ".word 0xF57FF050 | 0xe" : : : "memory");
  90 #endif
  91    } else {
  92      intptr_t zero = 0;
  93      __asm__ volatile (
  94        "mcr p15, 0, %0, c7, c10, 5"
  95        : : "r" (zero) : "memory");
  96    }
  97 #endif
  98 }
  99 
 100 // Load-Load/Store barrier
 101 inline static void dmb_ld() {
 102 #ifdef AARCH64



 103    __asm__ __volatile__ ("dmb ld" : : : "memory");
 104 #else
 105    dmb_sy();
 106 #endif
 107 }
 108 
 109 
 110 inline void OrderAccess::loadload()   { dmb_ld(); }
 111 inline void OrderAccess::loadstore()  { dmb_ld(); }
 112 inline void OrderAccess::acquire()    { dmb_ld(); }
 113 inline void OrderAccess::storestore() { dmb_st(); }
 114 inline void OrderAccess::storeload()  { dmb_sy(); }
 115 inline void OrderAccess::release()    { dmb_sy(); }
 116 inline void OrderAccess::fence()      { dmb_sy(); }
 117 
 118 // specializations for Aarch64
 119 // TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
 120 
 121 #ifdef AARCH64
 122 


< prev index next >