1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP 26 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP 27 28 #ifndef __STDC_FORMAT_MACROS 29 #define __STDC_FORMAT_MACROS 30 #endif 31 32 #ifdef TARGET_COMPILER_gcc 33 # include "utilities/globalDefinitions_gcc.hpp" 34 #endif 35 #ifdef TARGET_COMPILER_visCPP 36 # include "utilities/globalDefinitions_visCPP.hpp" 37 #endif 38 #ifdef TARGET_COMPILER_sparcWorks 39 # include "utilities/globalDefinitions_sparcWorks.hpp" 40 #endif 41 #ifdef TARGET_COMPILER_xlc 42 # include "utilities/globalDefinitions_xlc.hpp" 43 #endif 44 45 #ifndef PRAGMA_DIAG_PUSH 46 #define PRAGMA_DIAG_PUSH 47 #endif 48 #ifndef PRAGMA_DIAG_POP 49 #define PRAGMA_DIAG_POP 50 #endif 51 #ifndef PRAGMA_FORMAT_NONLITERAL_IGNORED 52 #define PRAGMA_FORMAT_NONLITERAL_IGNORED 53 #endif 54 #ifndef PRAGMA_FORMAT_IGNORED 55 #define PRAGMA_FORMAT_IGNORED 56 #endif 57 #ifndef PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL 58 #define PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL 59 #endif 60 #ifndef PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL 61 #define PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL 62 #endif 63 #ifndef PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 64 #define PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 65 #endif 66 #ifndef ATTRIBUTE_PRINTF 67 #define ATTRIBUTE_PRINTF(fmt, vargs) 68 #endif 69 #ifndef ATTRIBUTE_SCANF 70 #define ATTRIBUTE_SCANF(fmt, vargs) 71 #endif 72 73 74 #include "utilities/macros.hpp" 75 76 // This file holds all globally used constants & types, class (forward) 77 // declarations and a few frequently used utility functions. 78 79 //---------------------------------------------------------------------------------------------------- 80 // Constants 81 82 const int LogBytesPerShort = 1; 83 const int LogBytesPerInt = 2; 84 #ifdef _LP64 85 const int LogBytesPerWord = 3; 86 #else 87 const int LogBytesPerWord = 2; 88 #endif 89 const int LogBytesPerLong = 3; 90 91 const int BytesPerShort = 1 << LogBytesPerShort; 92 const int BytesPerInt = 1 << LogBytesPerInt; 93 const int BytesPerWord = 1 << LogBytesPerWord; 94 const int BytesPerLong = 1 << LogBytesPerLong; 95 96 const int LogBitsPerByte = 3; 97 const int LogBitsPerShort = LogBitsPerByte + LogBytesPerShort; 98 const int LogBitsPerInt = LogBitsPerByte + LogBytesPerInt; 99 const int LogBitsPerWord = LogBitsPerByte + LogBytesPerWord; 100 const int LogBitsPerLong = LogBitsPerByte + LogBytesPerLong; 101 102 const int BitsPerByte = 1 << LogBitsPerByte; 103 const int BitsPerShort = 1 << LogBitsPerShort; 104 const int BitsPerInt = 1 << LogBitsPerInt; 105 const int BitsPerWord = 1 << LogBitsPerWord; 106 const int BitsPerLong = 1 << LogBitsPerLong; 107 108 const int WordAlignmentMask = (1 << LogBytesPerWord) - 1; 109 const int LongAlignmentMask = (1 << LogBytesPerLong) - 1; 110 111 const int WordsPerLong = 2; // Number of stack entries for longs 112 113 const int oopSize = sizeof(char*); // Full-width oop 114 extern int heapOopSize; // Oop within a java object 115 const int wordSize = sizeof(char*); 116 const int longSize = sizeof(jlong); 117 const int jintSize = sizeof(jint); 118 const int size_tSize = sizeof(size_t); 119 120 const int BytesPerOop = BytesPerWord; // Full-width oop 121 122 extern int LogBytesPerHeapOop; // Oop within a java object 123 extern int LogBitsPerHeapOop; 124 extern int BytesPerHeapOop; 125 extern int BitsPerHeapOop; 126 127 // Oop encoding heap max 128 extern uint64_t OopEncodingHeapMax; 129 130 const int BitsPerJavaInteger = 32; 131 const int BitsPerJavaLong = 64; 132 const int BitsPerSize_t = size_tSize * BitsPerByte; 133 134 // Size of a char[] needed to represent a jint as a string in decimal. 135 const int jintAsStringSize = 12; 136 137 // In fact this should be 138 // log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); 139 // see os::set_memory_serialize_page() 140 #ifdef _LP64 141 const int SerializePageShiftCount = 4; 142 #else 143 const int SerializePageShiftCount = 3; 144 #endif 145 146 // An opaque struct of heap-word width, so that HeapWord* can be a generic 147 // pointer into the heap. We require that object sizes be measured in 148 // units of heap words, so that that 149 // HeapWord* hw; 150 // hw += oop(hw)->foo(); 151 // works, where foo is a method (like size or scavenge) that returns the 152 // object size. 153 class HeapWord { 154 friend class VMStructs; 155 private: 156 char* i; 157 #ifndef PRODUCT 158 public: 159 char* value() { return i; } 160 #endif 161 }; 162 163 // Analogous opaque struct for metadata allocated from 164 // metaspaces. 165 class MetaWord { 166 friend class VMStructs; 167 private: 168 char* i; 169 }; 170 171 // HeapWordSize must be 2^LogHeapWordSize. 172 const int HeapWordSize = sizeof(HeapWord); 173 #ifdef _LP64 174 const int LogHeapWordSize = 3; 175 #else 176 const int LogHeapWordSize = 2; 177 #endif 178 const int HeapWordsPerLong = BytesPerLong / HeapWordSize; 179 const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize; 180 181 // The larger HeapWordSize for 64bit requires larger heaps 182 // for the same application running in 64bit. See bug 4967770. 183 // The minimum alignment to a heap word size is done. Other 184 // parts of the memory system may require additional alignment 185 // and are responsible for those alignments. 186 #ifdef _LP64 187 #define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize) 188 #else 189 #define ScaleForWordSize(x) (x) 190 #endif 191 192 // The minimum number of native machine words necessary to contain "byte_size" 193 // bytes. 194 inline size_t heap_word_size(size_t byte_size) { 195 return (byte_size + (HeapWordSize-1)) >> LogHeapWordSize; 196 } 197 198 199 const size_t K = 1024; 200 const size_t M = K*K; 201 const size_t G = M*K; 202 const size_t HWperKB = K / sizeof(HeapWord); 203 204 const jint min_jint = (jint)1 << (sizeof(jint)*BitsPerByte-1); // 0x80000000 == smallest jint 205 const jint max_jint = (juint)min_jint - 1; // 0x7FFFFFFF == largest jint 206 207 // Constants for converting from a base unit to milli-base units. For 208 // example from seconds to milliseconds and microseconds 209 210 const int MILLIUNITS = 1000; // milli units per base unit 211 const int MICROUNITS = 1000000; // micro units per base unit 212 const int NANOUNITS = 1000000000; // nano units per base unit 213 214 const jlong NANOSECS_PER_SEC = CONST64(1000000000); 215 const jint NANOSECS_PER_MILLISEC = 1000000; 216 217 inline const char* proper_unit_for_byte_size(size_t s) { 218 #ifdef _LP64 219 if (s >= 10*G) { 220 return "G"; 221 } 222 #endif 223 if (s >= 10*M) { 224 return "M"; 225 } else if (s >= 10*K) { 226 return "K"; 227 } else { 228 return "B"; 229 } 230 } 231 232 template <class T> 233 inline T byte_size_in_proper_unit(T s) { 234 #ifdef _LP64 235 if (s >= 10*G) { 236 return (T)(s/G); 237 } 238 #endif 239 if (s >= 10*M) { 240 return (T)(s/M); 241 } else if (s >= 10*K) { 242 return (T)(s/K); 243 } else { 244 return s; 245 } 246 } 247 248 //---------------------------------------------------------------------------------------------------- 249 // VM type definitions 250 251 // intx and uintx are the 'extended' int and 'extended' unsigned int types; 252 // they are 32bit wide on a 32-bit platform, and 64bit wide on a 64bit platform. 253 254 typedef intptr_t intx; 255 typedef uintptr_t uintx; 256 257 const intx min_intx = (intx)1 << (sizeof(intx)*BitsPerByte-1); 258 const intx max_intx = (uintx)min_intx - 1; 259 const uintx max_uintx = (uintx)-1; 260 261 // Table of values: 262 // sizeof intx 4 8 263 // min_intx 0x80000000 0x8000000000000000 264 // max_intx 0x7FFFFFFF 0x7FFFFFFFFFFFFFFF 265 // max_uintx 0xFFFFFFFF 0xFFFFFFFFFFFFFFFF 266 267 typedef unsigned int uint; NEEDS_CLEANUP 268 269 270 //---------------------------------------------------------------------------------------------------- 271 // Java type definitions 272 273 // All kinds of 'plain' byte addresses 274 typedef signed char s_char; 275 typedef unsigned char u_char; 276 typedef u_char* address; 277 typedef uintptr_t address_word; // unsigned integer which will hold a pointer 278 // except for some implementations of a C++ 279 // linkage pointer to function. Should never 280 // need one of those to be placed in this 281 // type anyway. 282 283 // Utility functions to "portably" (?) bit twiddle pointers 284 // Where portable means keep ANSI C++ compilers quiet 285 286 inline address set_address_bits(address x, int m) { return address(intptr_t(x) | m); } 287 inline address clear_address_bits(address x, int m) { return address(intptr_t(x) & ~m); } 288 289 // Utility functions to "portably" make cast to/from function pointers. 290 291 inline address_word mask_address_bits(address x, int m) { return address_word(x) & m; } 292 inline address_word castable_address(address x) { return address_word(x) ; } 293 inline address_word castable_address(void* x) { return address_word(x) ; } 294 295 // Pointer subtraction. 296 // The idea here is to avoid ptrdiff_t, which is signed and so doesn't have 297 // the range we might need to find differences from one end of the heap 298 // to the other. 299 // A typical use might be: 300 // if (pointer_delta(end(), top()) >= size) { 301 // // enough room for an object of size 302 // ... 303 // and then additions like 304 // ... top() + size ... 305 // are safe because we know that top() is at least size below end(). 306 inline size_t pointer_delta(const void* left, 307 const void* right, 308 size_t element_size) { 309 return (((uintptr_t) left) - ((uintptr_t) right)) / element_size; 310 } 311 // A version specialized for HeapWord*'s. 312 inline size_t pointer_delta(const HeapWord* left, const HeapWord* right) { 313 return pointer_delta(left, right, sizeof(HeapWord)); 314 } 315 // A version specialized for MetaWord*'s. 316 inline size_t pointer_delta(const MetaWord* left, const MetaWord* right) { 317 return pointer_delta(left, right, sizeof(MetaWord)); 318 } 319 320 // 321 // ANSI C++ does not allow casting from one pointer type to a function pointer 322 // directly without at best a warning. This macro accomplishes it silently 323 // In every case that is present at this point the value be cast is a pointer 324 // to a C linkage function. In somecase the type used for the cast reflects 325 // that linkage and a picky compiler would not complain. In other cases because 326 // there is no convenient place to place a typedef with extern C linkage (i.e 327 // a platform dependent header file) it doesn't. At this point no compiler seems 328 // picky enough to catch these instances (which are few). It is possible that 329 // using templates could fix these for all cases. This use of templates is likely 330 // so far from the middle of the road that it is likely to be problematic in 331 // many C++ compilers. 332 // 333 #define CAST_TO_FN_PTR(func_type, value) ((func_type)(castable_address(value))) 334 #define CAST_FROM_FN_PTR(new_type, func_ptr) ((new_type)((address_word)(func_ptr))) 335 336 // Unsigned byte types for os and stream.hpp 337 338 // Unsigned one, two, four and eigth byte quantities used for describing 339 // the .class file format. See JVM book chapter 4. 340 341 typedef jubyte u1; 342 typedef jushort u2; 343 typedef juint u4; 344 typedef julong u8; 345 346 const jubyte max_jubyte = (jubyte)-1; // 0xFF largest jubyte 347 const jushort max_jushort = (jushort)-1; // 0xFFFF largest jushort 348 const juint max_juint = (juint)-1; // 0xFFFFFFFF largest juint 349 const julong max_julong = (julong)-1; // 0xFF....FF largest julong 350 351 typedef jbyte s1; 352 typedef jshort s2; 353 typedef jint s4; 354 typedef jlong s8; 355 356 //---------------------------------------------------------------------------------------------------- 357 // JVM spec restrictions 358 359 const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) 360 361 // Default ProtectionDomainCacheSize values 362 363 const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017); 364 365 //---------------------------------------------------------------------------------------------------- 366 // Default and minimum StringTableSize values 367 368 const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); 369 const int minimumStringTableSize = 1009; 370 371 const int defaultSymbolTableSize = 20011; 372 const int minimumSymbolTableSize = 1009; 373 374 375 //---------------------------------------------------------------------------------------------------- 376 // HotSwap - for JVMTI aka Class File Replacement and PopFrame 377 // 378 // Determines whether on-the-fly class replacement and frame popping are enabled. 379 380 #define HOTSWAP 381 382 //---------------------------------------------------------------------------------------------------- 383 // Object alignment, in units of HeapWords. 384 // 385 // Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and 386 // reference fields can be naturally aligned. 387 388 extern int MinObjAlignment; 389 extern int MinObjAlignmentInBytes; 390 extern int MinObjAlignmentInBytesMask; 391 392 extern int LogMinObjAlignment; 393 extern int LogMinObjAlignmentInBytes; 394 395 const int LogKlassAlignmentInBytes = 3; 396 const int LogKlassAlignment = LogKlassAlignmentInBytes - LogHeapWordSize; 397 const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes; 398 const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize; 399 400 // Klass encoding metaspace max size 401 const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; 402 403 // Machine dependent stuff 404 405 #if defined(X86) && defined(COMPILER2) && !defined(JAVASE_EMBEDDED) 406 // Include Restricted Transactional Memory lock eliding optimization 407 #define INCLUDE_RTM_OPT 1 408 #define RTM_OPT_ONLY(code) code 409 #else 410 #define INCLUDE_RTM_OPT 0 411 #define RTM_OPT_ONLY(code) 412 #endif 413 // States of Restricted Transactional Memory usage. 414 enum RTMState { 415 NoRTM = 0x2, // Don't use RTM 416 UseRTM = 0x1, // Use RTM 417 ProfileRTM = 0x0 // Use RTM with abort ratio calculation 418 }; 419 420 // The maximum size of the code cache. Can be overridden by targets. 421 #define CODE_CACHE_SIZE_LIMIT (2*G) 422 // Allow targets to reduce the default size of the code cache. 423 #define CODE_CACHE_DEFAULT_LIMIT CODE_CACHE_SIZE_LIMIT 424 425 #ifdef TARGET_ARCH_x86 426 # include "globalDefinitions_x86.hpp" 427 #endif 428 #ifdef TARGET_ARCH_sparc 429 # include "globalDefinitions_sparc.hpp" 430 #endif 431 #ifdef TARGET_ARCH_zero 432 # include "globalDefinitions_zero.hpp" 433 #endif 434 #ifdef TARGET_ARCH_arm 435 # include "globalDefinitions_arm.hpp" 436 #endif 437 #ifdef TARGET_ARCH_ppc 438 # include "globalDefinitions_ppc.hpp" 439 #endif 440 #ifdef TARGET_ARCH_aarch64 441 # include "globalDefinitions_aarch64.hpp" 442 #endif 443 444 /* 445 * If a platform does not support native stack walking 446 * the platform specific globalDefinitions (above) 447 * can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0 448 */ 449 #ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 450 #define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1 451 #endif 452 453 // To assure the IRIW property on processors that are not multiple copy 454 // atomic, sync instructions must be issued between volatile reads to 455 // assure their ordering, instead of after volatile stores. 456 // (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models" 457 // by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge) 458 #ifdef CPU_NOT_MULTIPLE_COPY_ATOMIC 459 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = true; 460 #else 461 const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false; 462 #endif 463 464 // The byte alignment to be used by Arena::Amalloc. See bugid 4169348. 465 // Note: this value must be a power of 2 466 467 #define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord) 468 469 // Signed variants of alignment helpers. There are two versions of each, a macro 470 // for use in places like enum definitions that require compile-time constant 471 // expressions and a function for all other places so as to get type checking. 472 473 #define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1)) 474 475 inline bool is_size_aligned(size_t size, size_t alignment) { 476 return align_size_up_(size, alignment) == size; 477 } 478 479 inline bool is_ptr_aligned(void* ptr, size_t alignment) { 480 return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr; 481 } 482 483 inline intptr_t align_size_up(intptr_t size, intptr_t alignment) { 484 return align_size_up_(size, alignment); 485 } 486 487 #define align_size_down_(size, alignment) ((size) & ~((alignment) - 1)) 488 489 inline intptr_t align_size_down(intptr_t size, intptr_t alignment) { 490 return align_size_down_(size, alignment); 491 } 492 493 #define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment))) 494 495 inline void* align_ptr_up(void* ptr, size_t alignment) { 496 return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment); 497 } 498 499 inline void* align_ptr_down(void* ptr, size_t alignment) { 500 return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment); 501 } 502 503 // Align objects by rounding up their size, in HeapWord units. 504 505 #define align_object_size_(size) align_size_up_(size, MinObjAlignment) 506 507 inline intptr_t align_object_size(intptr_t size) { 508 return align_size_up(size, MinObjAlignment); 509 } 510 511 inline bool is_object_aligned(intptr_t addr) { 512 return addr == align_object_size(addr); 513 } 514 515 // Pad out certain offsets to jlong alignment, in HeapWord units. 516 517 inline intptr_t align_object_offset(intptr_t offset) { 518 return align_size_up(offset, HeapWordsPerLong); 519 } 520 521 inline void* align_pointer_up(const void* addr, size_t size) { 522 return (void*) align_size_up_((uintptr_t)addr, size); 523 } 524 525 // Align down with a lower bound. If the aligning results in 0, return 'alignment'. 526 527 inline size_t align_size_down_bounded(size_t size, size_t alignment) { 528 size_t aligned_size = align_size_down_(size, alignment); 529 return aligned_size > 0 ? aligned_size : alignment; 530 } 531 532 // Clamp an address to be within a specific page 533 // 1. If addr is on the page it is returned as is 534 // 2. If addr is above the page_address the start of the *next* page will be returned 535 // 3. Otherwise, if addr is below the page_address the start of the page will be returned 536 inline address clamp_address_in_page(address addr, address page_address, intptr_t page_size) { 537 if (align_size_down(intptr_t(addr), page_size) == align_size_down(intptr_t(page_address), page_size)) { 538 // address is in the specified page, just return it as is 539 return addr; 540 } else if (addr > page_address) { 541 // address is above specified page, return start of next page 542 return (address)align_size_down(intptr_t(page_address), page_size) + page_size; 543 } else { 544 // address is below specified page, return start of page 545 return (address)align_size_down(intptr_t(page_address), page_size); 546 } 547 } 548 549 550 // The expected size in bytes of a cache line, used to pad data structures. 551 #ifndef DEFAULT_CACHE_LINE_SIZE 552 #define DEFAULT_CACHE_LINE_SIZE 64 553 #endif 554 555 556 //---------------------------------------------------------------------------------------------------- 557 // Utility macros for compilers 558 // used to silence compiler warnings 559 560 #define Unused_Variable(var) var 561 562 563 //---------------------------------------------------------------------------------------------------- 564 // Miscellaneous 565 566 // 6302670 Eliminate Hotspot __fabsf dependency 567 // All fabs() callers should call this function instead, which will implicitly 568 // convert the operand to double, avoiding a dependency on __fabsf which 569 // doesn't exist in early versions of Solaris 8. 570 inline double fabsd(double value) { 571 return fabs(value); 572 } 573 574 //---------------------------------------------------------------------------------------------------- 575 // Special casts 576 // Cast floats into same-size integers and vice-versa w/o changing bit-pattern 577 typedef union { 578 jfloat f; 579 jint i; 580 } FloatIntConv; 581 582 typedef union { 583 jdouble d; 584 jlong l; 585 julong ul; 586 } DoubleLongConv; 587 588 inline jint jint_cast (jfloat x) { return ((FloatIntConv*)&x)->i; } 589 inline jfloat jfloat_cast (jint x) { return ((FloatIntConv*)&x)->f; } 590 591 inline jlong jlong_cast (jdouble x) { return ((DoubleLongConv*)&x)->l; } 592 inline julong julong_cast (jdouble x) { return ((DoubleLongConv*)&x)->ul; } 593 inline jdouble jdouble_cast (jlong x) { return ((DoubleLongConv*)&x)->d; } 594 595 inline jint low (jlong value) { return jint(value); } 596 inline jint high(jlong value) { return jint(value >> 32); } 597 598 // the fancy casts are a hopefully portable way 599 // to do unsigned 32 to 64 bit type conversion 600 inline void set_low (jlong* value, jint low ) { *value &= (jlong)0xffffffff << 32; 601 *value |= (jlong)(julong)(juint)low; } 602 603 inline void set_high(jlong* value, jint high) { *value &= (jlong)(julong)(juint)0xffffffff; 604 *value |= (jlong)high << 32; } 605 606 inline jlong jlong_from(jint h, jint l) { 607 jlong result = 0; // initialization to avoid warning 608 set_high(&result, h); 609 set_low(&result, l); 610 return result; 611 } 612 613 union jlong_accessor { 614 jint words[2]; 615 jlong long_value; 616 }; 617 618 void basic_types_init(); // cannot define here; uses assert 619 620 621 // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java 622 enum BasicType { 623 T_BOOLEAN = 4, 624 T_CHAR = 5, 625 T_FLOAT = 6, 626 T_DOUBLE = 7, 627 T_BYTE = 8, 628 T_SHORT = 9, 629 T_INT = 10, 630 T_LONG = 11, 631 T_OBJECT = 12, 632 T_ARRAY = 13, 633 T_VOID = 14, 634 T_ADDRESS = 15, 635 T_NARROWOOP = 16, 636 T_METADATA = 17, 637 T_NARROWKLASS = 18, 638 T_CONFLICT = 19, // for stack value type with conflicting contents 639 T_ILLEGAL = 99 640 }; 641 642 inline bool is_java_primitive(BasicType t) { 643 return T_BOOLEAN <= t && t <= T_LONG; 644 } 645 646 inline bool is_subword_type(BasicType t) { 647 // these guys are processed exactly like T_INT in calling sequences: 648 return (t == T_BOOLEAN || t == T_CHAR || t == T_BYTE || t == T_SHORT); 649 } 650 651 inline bool is_signed_subword_type(BasicType t) { 652 return (t == T_BYTE || t == T_SHORT); 653 } 654 655 // Convert a char from a classfile signature to a BasicType 656 inline BasicType char2type(char c) { 657 switch( c ) { 658 case 'B': return T_BYTE; 659 case 'C': return T_CHAR; 660 case 'D': return T_DOUBLE; 661 case 'F': return T_FLOAT; 662 case 'I': return T_INT; 663 case 'J': return T_LONG; 664 case 'S': return T_SHORT; 665 case 'Z': return T_BOOLEAN; 666 case 'V': return T_VOID; 667 case 'L': return T_OBJECT; 668 case '[': return T_ARRAY; 669 } 670 return T_ILLEGAL; 671 } 672 673 extern char type2char_tab[T_CONFLICT+1]; // Map a BasicType to a jchar 674 inline char type2char(BasicType t) { return (uint)t < T_CONFLICT+1 ? type2char_tab[t] : 0; } 675 extern int type2size[T_CONFLICT+1]; // Map BasicType to result stack elements 676 extern const char* type2name_tab[T_CONFLICT+1]; // Map a BasicType to a jchar 677 inline const char* type2name(BasicType t) { return (uint)t < T_CONFLICT+1 ? type2name_tab[t] : NULL; } 678 extern BasicType name2type(const char* name); 679 680 // Auxilary math routines 681 // least common multiple 682 extern size_t lcm(size_t a, size_t b); 683 684 685 // NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java 686 enum BasicTypeSize { 687 T_BOOLEAN_size = 1, 688 T_CHAR_size = 1, 689 T_FLOAT_size = 1, 690 T_DOUBLE_size = 2, 691 T_BYTE_size = 1, 692 T_SHORT_size = 1, 693 T_INT_size = 1, 694 T_LONG_size = 2, 695 T_OBJECT_size = 1, 696 T_ARRAY_size = 1, 697 T_NARROWOOP_size = 1, 698 T_NARROWKLASS_size = 1, 699 T_VOID_size = 0 700 }; 701 702 703 // maps a BasicType to its instance field storage type: 704 // all sub-word integral types are widened to T_INT 705 extern BasicType type2field[T_CONFLICT+1]; 706 extern BasicType type2wfield[T_CONFLICT+1]; 707 708 709 // size in bytes 710 enum ArrayElementSize { 711 T_BOOLEAN_aelem_bytes = 1, 712 T_CHAR_aelem_bytes = 2, 713 T_FLOAT_aelem_bytes = 4, 714 T_DOUBLE_aelem_bytes = 8, 715 T_BYTE_aelem_bytes = 1, 716 T_SHORT_aelem_bytes = 2, 717 T_INT_aelem_bytes = 4, 718 T_LONG_aelem_bytes = 8, 719 #ifdef _LP64 720 T_OBJECT_aelem_bytes = 8, 721 T_ARRAY_aelem_bytes = 8, 722 #else 723 T_OBJECT_aelem_bytes = 4, 724 T_ARRAY_aelem_bytes = 4, 725 #endif 726 T_NARROWOOP_aelem_bytes = 4, 727 T_NARROWKLASS_aelem_bytes = 4, 728 T_VOID_aelem_bytes = 0 729 }; 730 731 extern int _type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element 732 #ifdef ASSERT 733 extern int type2aelembytes(BasicType t, bool allow_address = false); // asserts 734 #else 735 inline int type2aelembytes(BasicType t, bool allow_address = false) { return _type2aelembytes[t]; } 736 #endif 737 738 739 // JavaValue serves as a container for arbitrary Java values. 740 741 class JavaValue { 742 743 public: 744 typedef union JavaCallValue { 745 jfloat f; 746 jdouble d; 747 jint i; 748 jlong l; 749 jobject h; 750 } JavaCallValue; 751 752 private: 753 BasicType _type; 754 JavaCallValue _value; 755 756 public: 757 JavaValue(BasicType t = T_ILLEGAL) { _type = t; } 758 759 JavaValue(jfloat value) { 760 _type = T_FLOAT; 761 _value.f = value; 762 } 763 764 JavaValue(jdouble value) { 765 _type = T_DOUBLE; 766 _value.d = value; 767 } 768 769 jfloat get_jfloat() const { return _value.f; } 770 jdouble get_jdouble() const { return _value.d; } 771 jint get_jint() const { return _value.i; } 772 jlong get_jlong() const { return _value.l; } 773 jobject get_jobject() const { return _value.h; } 774 JavaCallValue* get_value_addr() { return &_value; } 775 BasicType get_type() const { return _type; } 776 777 void set_jfloat(jfloat f) { _value.f = f;} 778 void set_jdouble(jdouble d) { _value.d = d;} 779 void set_jint(jint i) { _value.i = i;} 780 void set_jlong(jlong l) { _value.l = l;} 781 void set_jobject(jobject h) { _value.h = h;} 782 void set_type(BasicType t) { _type = t; } 783 784 jboolean get_jboolean() const { return (jboolean) (_value.i);} 785 jbyte get_jbyte() const { return (jbyte) (_value.i);} 786 jchar get_jchar() const { return (jchar) (_value.i);} 787 jshort get_jshort() const { return (jshort) (_value.i);} 788 789 }; 790 791 792 #define STACK_BIAS 0 793 // V9 Sparc CPU's running in 64 Bit mode use a stack bias of 7ff 794 // in order to extend the reach of the stack pointer. 795 #if defined(SPARC) && defined(_LP64) 796 #undef STACK_BIAS 797 #define STACK_BIAS 0x7ff 798 #endif 799 800 801 // TosState describes the top-of-stack state before and after the execution of 802 // a bytecode or method. The top-of-stack value may be cached in one or more CPU 803 // registers. The TosState corresponds to the 'machine represention' of this cached 804 // value. There's 4 states corresponding to the JAVA types int, long, float & double 805 // as well as a 5th state in case the top-of-stack value is actually on the top 806 // of stack (in memory) and thus not cached. The atos state corresponds to the itos 807 // state when it comes to machine representation but is used separately for (oop) 808 // type specific operations (e.g. verification code). 809 810 enum TosState { // describes the tos cache contents 811 btos = 0, // byte, bool tos cached 812 ctos = 1, // char tos cached 813 stos = 2, // short tos cached 814 itos = 3, // int tos cached 815 ltos = 4, // long tos cached 816 ftos = 5, // float tos cached 817 dtos = 6, // double tos cached 818 atos = 7, // object cached 819 vtos = 8, // tos not cached 820 number_of_states, 821 ilgl // illegal state: should not occur 822 }; 823 824 825 inline TosState as_TosState(BasicType type) { 826 switch (type) { 827 case T_BYTE : return btos; 828 case T_BOOLEAN: return btos; // FIXME: Add ztos 829 case T_CHAR : return ctos; 830 case T_SHORT : return stos; 831 case T_INT : return itos; 832 case T_LONG : return ltos; 833 case T_FLOAT : return ftos; 834 case T_DOUBLE : return dtos; 835 case T_VOID : return vtos; 836 case T_ARRAY : // fall through 837 case T_OBJECT : return atos; 838 } 839 return ilgl; 840 } 841 842 inline BasicType as_BasicType(TosState state) { 843 switch (state) { 844 //case ztos: return T_BOOLEAN;//FIXME 845 case btos : return T_BYTE; 846 case ctos : return T_CHAR; 847 case stos : return T_SHORT; 848 case itos : return T_INT; 849 case ltos : return T_LONG; 850 case ftos : return T_FLOAT; 851 case dtos : return T_DOUBLE; 852 case atos : return T_OBJECT; 853 case vtos : return T_VOID; 854 } 855 return T_ILLEGAL; 856 } 857 858 859 // Helper function to convert BasicType info into TosState 860 // Note: Cannot define here as it uses global constant at the time being. 861 TosState as_TosState(BasicType type); 862 863 864 // JavaThreadState keeps track of which part of the code a thread is executing in. This 865 // information is needed by the safepoint code. 866 // 867 // There are 4 essential states: 868 // 869 // _thread_new : Just started, but not executed init. code yet (most likely still in OS init code) 870 // _thread_in_native : In native code. This is a safepoint region, since all oops will be in jobject handles 871 // _thread_in_vm : Executing in the vm 872 // _thread_in_Java : Executing either interpreted or compiled Java code (or could be in a stub) 873 // 874 // Each state has an associated xxxx_trans state, which is an intermediate state used when a thread is in 875 // a transition from one state to another. These extra states makes it possible for the safepoint code to 876 // handle certain thread_states without having to suspend the thread - making the safepoint code faster. 877 // 878 // Given a state, the xxx_trans state can always be found by adding 1. 879 // 880 enum JavaThreadState { 881 _thread_uninitialized = 0, // should never happen (missing initialization) 882 _thread_new = 2, // just starting up, i.e., in process of being initialized 883 _thread_new_trans = 3, // corresponding transition state (not used, included for completness) 884 _thread_in_native = 4, // running in native code 885 _thread_in_native_trans = 5, // corresponding transition state 886 _thread_in_vm = 6, // running in VM 887 _thread_in_vm_trans = 7, // corresponding transition state 888 _thread_in_Java = 8, // running in Java or in stub code 889 _thread_in_Java_trans = 9, // corresponding transition state (not used, included for completness) 890 _thread_blocked = 10, // blocked in vm 891 _thread_blocked_trans = 11, // corresponding transition state 892 _thread_max_state = 12 // maximum thread state+1 - used for statistics allocation 893 }; 894 895 896 // Handy constants for deciding which compiler mode to use. 897 enum MethodCompilation { 898 InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation 899 }; 900 901 // Enumeration to distinguish tiers of compilation 902 enum CompLevel { 903 CompLevel_any = -1, 904 CompLevel_all = -1, 905 CompLevel_none = 0, // Interpreter 906 CompLevel_simple = 1, // C1 907 CompLevel_limited_profile = 2, // C1, invocation & backedge counters 908 CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo 909 CompLevel_full_optimization = 4, // C2 or Shark 910 911 #if defined(COMPILER2) || defined(SHARK) 912 CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered 913 #elif defined(COMPILER1) 914 CompLevel_highest_tier = CompLevel_simple, // pure C1 915 #else 916 CompLevel_highest_tier = CompLevel_none, 917 #endif 918 919 #if defined(TIERED) 920 CompLevel_initial_compile = CompLevel_full_profile // tiered 921 #elif defined(COMPILER1) 922 CompLevel_initial_compile = CompLevel_simple // pure C1 923 #elif defined(COMPILER2) || defined(SHARK) 924 CompLevel_initial_compile = CompLevel_full_optimization // pure C2 925 #else 926 CompLevel_initial_compile = CompLevel_none 927 #endif 928 }; 929 930 inline bool is_c1_compile(int comp_level) { 931 return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization; 932 } 933 934 inline bool is_c2_compile(int comp_level) { 935 return comp_level == CompLevel_full_optimization; 936 } 937 938 inline bool is_highest_tier_compile(int comp_level) { 939 return comp_level == CompLevel_highest_tier; 940 } 941 942 inline bool is_compile(int comp_level) { 943 return is_c1_compile(comp_level) || is_c2_compile(comp_level); 944 } 945 946 //---------------------------------------------------------------------------------------------------- 947 // 'Forward' declarations of frequently used classes 948 // (in order to reduce interface dependencies & reduce 949 // number of unnecessary compilations after changes) 950 951 class symbolTable; 952 class ClassFileStream; 953 954 class Event; 955 956 class Thread; 957 class VMThread; 958 class JavaThread; 959 class Threads; 960 961 class VM_Operation; 962 class VMOperationQueue; 963 964 class CodeBlob; 965 class nmethod; 966 class OSRAdapter; 967 class I2CAdapter; 968 class C2IAdapter; 969 class CompiledIC; 970 class relocInfo; 971 class ScopeDesc; 972 class PcDesc; 973 974 class Recompiler; 975 class Recompilee; 976 class RecompilationPolicy; 977 class RFrame; 978 class CompiledRFrame; 979 class InterpretedRFrame; 980 981 class frame; 982 983 class vframe; 984 class javaVFrame; 985 class interpretedVFrame; 986 class compiledVFrame; 987 class deoptimizedVFrame; 988 class externalVFrame; 989 class entryVFrame; 990 991 class RegisterMap; 992 993 class Mutex; 994 class Monitor; 995 class BasicLock; 996 class BasicObjectLock; 997 998 class PeriodicTask; 999 1000 class JavaCallWrapper; 1001 1002 class oopDesc; 1003 class metaDataOopDesc; 1004 1005 class NativeCall; 1006 1007 class zone; 1008 1009 class StubQueue; 1010 1011 class outputStream; 1012 1013 class ResourceArea; 1014 1015 class DebugInformationRecorder; 1016 class ScopeValue; 1017 class CompressedStream; 1018 class DebugInfoReadStream; 1019 class DebugInfoWriteStream; 1020 class LocationValue; 1021 class ConstantValue; 1022 class IllegalValue; 1023 1024 class PrivilegedElement; 1025 class MonitorArray; 1026 1027 class MonitorInfo; 1028 1029 class OffsetClosure; 1030 class OopMapCache; 1031 class InterpreterOopMap; 1032 class OopMapCacheEntry; 1033 class OSThread; 1034 1035 typedef int (*OSThreadStartFunc)(void*); 1036 1037 class Space; 1038 1039 class JavaValue; 1040 class methodHandle; 1041 class JavaCallArguments; 1042 1043 // Basic support for errors (general debug facilities not defined at this point fo the include phase) 1044 1045 extern void basic_fatal(const char* msg); 1046 1047 1048 //---------------------------------------------------------------------------------------------------- 1049 // Special constants for debugging 1050 1051 const jint badInt = -3; // generic "bad int" value 1052 const long badAddressVal = -2; // generic "bad address" value 1053 const long badOopVal = -1; // generic "bad oop" value 1054 const intptr_t badHeapOopVal = (intptr_t) CONST64(0x2BAD4B0BBAADBABE); // value used to zap heap after GC 1055 const int badHandleValue = 0xBC; // value used to zap vm handle area 1056 const int badResourceValue = 0xAB; // value used to zap resource area 1057 const int freeBlockPad = 0xBA; // value used to pad freed blocks. 1058 const int uninitBlockPad = 0xF1; // value used to zap newly malloc'd blocks. 1059 const intptr_t badJNIHandleVal = (intptr_t) CONST64(0xFEFEFEFEFEFEFEFE); // value used to zap jni handle area 1060 const juint badHeapWordVal = 0xBAADBABE; // value used to zap heap after GC 1061 const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC 1062 const int badCodeHeapNewVal= 0xCC; // value used to zap Code heap at allocation 1063 const int badCodeHeapFreeVal = 0xDD; // value used to zap Code heap at deallocation 1064 1065 1066 // (These must be implemented as #defines because C++ compilers are 1067 // not obligated to inline non-integral constants!) 1068 #define badAddress ((address)::badAddressVal) 1069 #define badOop (cast_to_oop(::badOopVal)) 1070 #define badHeapWord (::badHeapWordVal) 1071 #define badJNIHandle (cast_to_oop(::badJNIHandleVal)) 1072 1073 // Default TaskQueue size is 16K (32-bit) or 128K (64-bit) 1074 #define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17)) 1075 1076 //---------------------------------------------------------------------------------------------------- 1077 // Utility functions for bitfield manipulations 1078 1079 const intptr_t AllBits = ~0; // all bits set in a word 1080 const intptr_t NoBits = 0; // no bits set in a word 1081 const jlong NoLongBits = 0; // no bits set in a long 1082 const intptr_t OneBit = 1; // only right_most bit set in a word 1083 1084 // get a word with the n.th or the right-most or left-most n bits set 1085 // (note: #define used only so that they can be used in enum constant definitions) 1086 #define nth_bit(n) (n >= BitsPerWord ? 0 : OneBit << (n)) 1087 #define right_n_bits(n) (nth_bit(n) - 1) 1088 #define left_n_bits(n) (right_n_bits(n) << (n >= BitsPerWord ? 0 : (BitsPerWord - n))) 1089 1090 // bit-operations using a mask m 1091 inline void set_bits (intptr_t& x, intptr_t m) { x |= m; } 1092 inline void clear_bits (intptr_t& x, intptr_t m) { x &= ~m; } 1093 inline intptr_t mask_bits (intptr_t x, intptr_t m) { return x & m; } 1094 inline jlong mask_long_bits (jlong x, jlong m) { return x & m; } 1095 inline bool mask_bits_are_true (intptr_t flags, intptr_t mask) { return (flags & mask) == mask; } 1096 1097 // bit-operations using the n.th bit 1098 inline void set_nth_bit(intptr_t& x, int n) { set_bits (x, nth_bit(n)); } 1099 inline void clear_nth_bit(intptr_t& x, int n) { clear_bits(x, nth_bit(n)); } 1100 inline bool is_set_nth_bit(intptr_t x, int n) { return mask_bits (x, nth_bit(n)) != NoBits; } 1101 1102 // returns the bitfield of x starting at start_bit_no with length field_length (no sign-extension!) 1103 inline intptr_t bitfield(intptr_t x, int start_bit_no, int field_length) { 1104 return mask_bits(x >> start_bit_no, right_n_bits(field_length)); 1105 } 1106 1107 1108 //---------------------------------------------------------------------------------------------------- 1109 // Utility functions for integers 1110 1111 // Avoid use of global min/max macros which may cause unwanted double 1112 // evaluation of arguments. 1113 #ifdef max 1114 #undef max 1115 #endif 1116 1117 #ifdef min 1118 #undef min 1119 #endif 1120 1121 #define max(a,b) Do_not_use_max_use_MAX2_instead 1122 #define min(a,b) Do_not_use_min_use_MIN2_instead 1123 1124 // It is necessary to use templates here. Having normal overloaded 1125 // functions does not work because it is necessary to provide both 32- 1126 // and 64-bit overloaded functions, which does not work, and having 1127 // explicitly-typed versions of these routines (i.e., MAX2I, MAX2L) 1128 // will be even more error-prone than macros. 1129 template<class T> inline T MAX2(T a, T b) { return (a > b) ? a : b; } 1130 template<class T> inline T MIN2(T a, T b) { return (a < b) ? a : b; } 1131 template<class T> inline T MAX3(T a, T b, T c) { return MAX2(MAX2(a, b), c); } 1132 template<class T> inline T MIN3(T a, T b, T c) { return MIN2(MIN2(a, b), c); } 1133 template<class T> inline T MAX4(T a, T b, T c, T d) { return MAX2(MAX3(a, b, c), d); } 1134 template<class T> inline T MIN4(T a, T b, T c, T d) { return MIN2(MIN3(a, b, c), d); } 1135 1136 template<class T> inline T ABS(T x) { return (x > 0) ? x : -x; } 1137 1138 // true if x is a power of 2, false otherwise 1139 inline bool is_power_of_2(intptr_t x) { 1140 return ((x != NoBits) && (mask_bits(x, x - 1) == NoBits)); 1141 } 1142 1143 // long version of is_power_of_2 1144 inline bool is_power_of_2_long(jlong x) { 1145 return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits)); 1146 } 1147 1148 //* largest i such that 2^i <= x 1149 // A negative value of 'x' will return '31' 1150 inline int log2_intptr(intptr_t x) { 1151 int i = -1; 1152 uintptr_t p = 1; 1153 while (p != 0 && p <= (uintptr_t)x) { 1154 // p = 2^(i+1) && p <= x (i.e., 2^(i+1) <= x) 1155 i++; p *= 2; 1156 } 1157 // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) 1158 // (if p = 0 then overflow occurred and i = 31) 1159 return i; 1160 } 1161 1162 //* largest i such that 2^i <= x 1163 // A negative value of 'x' will return '63' 1164 inline int log2_long(jlong x) { 1165 int i = -1; 1166 julong p = 1; 1167 while (p != 0 && p <= (julong)x) { 1168 // p = 2^(i+1) && p <= x (i.e., 2^(i+1) <= x) 1169 i++; p *= 2; 1170 } 1171 // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) 1172 // (if p = 0 then overflow occurred and i = 63) 1173 return i; 1174 } 1175 1176 //* the argument must be exactly a power of 2 1177 inline int exact_log2(intptr_t x) { 1178 #ifdef ASSERT 1179 if (!is_power_of_2(x)) basic_fatal("x must be a power of 2"); 1180 #endif 1181 return log2_intptr(x); 1182 } 1183 1184 //* the argument must be exactly a power of 2 1185 inline int exact_log2_long(jlong x) { 1186 #ifdef ASSERT 1187 if (!is_power_of_2_long(x)) basic_fatal("x must be a power of 2"); 1188 #endif 1189 return log2_long(x); 1190 } 1191 1192 1193 // returns integer round-up to the nearest multiple of s (s must be a power of two) 1194 inline intptr_t round_to(intptr_t x, uintx s) { 1195 #ifdef ASSERT 1196 if (!is_power_of_2(s)) basic_fatal("s must be a power of 2"); 1197 #endif 1198 const uintx m = s - 1; 1199 return mask_bits(x + m, ~m); 1200 } 1201 1202 // returns integer round-down to the nearest multiple of s (s must be a power of two) 1203 inline intptr_t round_down(intptr_t x, uintx s) { 1204 #ifdef ASSERT 1205 if (!is_power_of_2(s)) basic_fatal("s must be a power of 2"); 1206 #endif 1207 const uintx m = s - 1; 1208 return mask_bits(x, ~m); 1209 } 1210 1211 1212 inline bool is_odd (intx x) { return x & 1; } 1213 inline bool is_even(intx x) { return !is_odd(x); } 1214 1215 // "to" should be greater than "from." 1216 inline intx byte_size(void* from, void* to) { 1217 return (address)to - (address)from; 1218 } 1219 1220 //---------------------------------------------------------------------------------------------------- 1221 // Avoid non-portable casts with these routines (DEPRECATED) 1222 1223 // NOTE: USE Bytes class INSTEAD WHERE POSSIBLE 1224 // Bytes is optimized machine-specifically and may be much faster then the portable routines below. 1225 1226 // Given sequence of four bytes, build into a 32-bit word 1227 // following the conventions used in class files. 1228 // On the 386, this could be realized with a simple address cast. 1229 // 1230 1231 // This routine takes eight bytes: 1232 inline u8 build_u8_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) { 1233 return (( u8(c1) << 56 ) & ( u8(0xff) << 56 )) 1234 | (( u8(c2) << 48 ) & ( u8(0xff) << 48 )) 1235 | (( u8(c3) << 40 ) & ( u8(0xff) << 40 )) 1236 | (( u8(c4) << 32 ) & ( u8(0xff) << 32 )) 1237 | (( u8(c5) << 24 ) & ( u8(0xff) << 24 )) 1238 | (( u8(c6) << 16 ) & ( u8(0xff) << 16 )) 1239 | (( u8(c7) << 8 ) & ( u8(0xff) << 8 )) 1240 | (( u8(c8) << 0 ) & ( u8(0xff) << 0 )); 1241 } 1242 1243 // This routine takes four bytes: 1244 inline u4 build_u4_from( u1 c1, u1 c2, u1 c3, u1 c4 ) { 1245 return (( u4(c1) << 24 ) & 0xff000000) 1246 | (( u4(c2) << 16 ) & 0x00ff0000) 1247 | (( u4(c3) << 8 ) & 0x0000ff00) 1248 | (( u4(c4) << 0 ) & 0x000000ff); 1249 } 1250 1251 // And this one works if the four bytes are contiguous in memory: 1252 inline u4 build_u4_from( u1* p ) { 1253 return build_u4_from( p[0], p[1], p[2], p[3] ); 1254 } 1255 1256 // Ditto for two-byte ints: 1257 inline u2 build_u2_from( u1 c1, u1 c2 ) { 1258 return u2((( u2(c1) << 8 ) & 0xff00) 1259 | (( u2(c2) << 0 ) & 0x00ff)); 1260 } 1261 1262 // And this one works if the two bytes are contiguous in memory: 1263 inline u2 build_u2_from( u1* p ) { 1264 return build_u2_from( p[0], p[1] ); 1265 } 1266 1267 // Ditto for floats: 1268 inline jfloat build_float_from( u1 c1, u1 c2, u1 c3, u1 c4 ) { 1269 u4 u = build_u4_from( c1, c2, c3, c4 ); 1270 return *(jfloat*)&u; 1271 } 1272 1273 inline jfloat build_float_from( u1* p ) { 1274 u4 u = build_u4_from( p ); 1275 return *(jfloat*)&u; 1276 } 1277 1278 1279 // now (64-bit) longs 1280 1281 inline jlong build_long_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) { 1282 return (( jlong(c1) << 56 ) & ( jlong(0xff) << 56 )) 1283 | (( jlong(c2) << 48 ) & ( jlong(0xff) << 48 )) 1284 | (( jlong(c3) << 40 ) & ( jlong(0xff) << 40 )) 1285 | (( jlong(c4) << 32 ) & ( jlong(0xff) << 32 )) 1286 | (( jlong(c5) << 24 ) & ( jlong(0xff) << 24 )) 1287 | (( jlong(c6) << 16 ) & ( jlong(0xff) << 16 )) 1288 | (( jlong(c7) << 8 ) & ( jlong(0xff) << 8 )) 1289 | (( jlong(c8) << 0 ) & ( jlong(0xff) << 0 )); 1290 } 1291 1292 inline jlong build_long_from( u1* p ) { 1293 return build_long_from( p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] ); 1294 } 1295 1296 1297 // Doubles, too! 1298 inline jdouble build_double_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) { 1299 jlong u = build_long_from( c1, c2, c3, c4, c5, c6, c7, c8 ); 1300 return *(jdouble*)&u; 1301 } 1302 1303 inline jdouble build_double_from( u1* p ) { 1304 jlong u = build_long_from( p ); 1305 return *(jdouble*)&u; 1306 } 1307 1308 1309 // Portable routines to go the other way: 1310 1311 inline void explode_short_to( u2 x, u1& c1, u1& c2 ) { 1312 c1 = u1(x >> 8); 1313 c2 = u1(x); 1314 } 1315 1316 inline void explode_short_to( u2 x, u1* p ) { 1317 explode_short_to( x, p[0], p[1]); 1318 } 1319 1320 inline void explode_int_to( u4 x, u1& c1, u1& c2, u1& c3, u1& c4 ) { 1321 c1 = u1(x >> 24); 1322 c2 = u1(x >> 16); 1323 c3 = u1(x >> 8); 1324 c4 = u1(x); 1325 } 1326 1327 inline void explode_int_to( u4 x, u1* p ) { 1328 explode_int_to( x, p[0], p[1], p[2], p[3]); 1329 } 1330 1331 1332 // Pack and extract shorts to/from ints: 1333 1334 inline int extract_low_short_from_int(jint x) { 1335 return x & 0xffff; 1336 } 1337 1338 inline int extract_high_short_from_int(jint x) { 1339 return (x >> 16) & 0xffff; 1340 } 1341 1342 inline int build_int_from_shorts( jushort low, jushort high ) { 1343 return ((int)((unsigned int)high << 16) | (unsigned int)low); 1344 } 1345 1346 // Convert pointer to intptr_t, for use in printing pointers. 1347 inline intptr_t p2i(const void * p) { 1348 return (intptr_t) p; 1349 } 1350 1351 // Printf-style formatters for fixed- and variable-width types as pointers and 1352 // integers. These are derived from the definitions in inttypes.h. If the platform 1353 // doesn't provide appropriate definitions, they should be provided in 1354 // the compiler-specific definitions file (e.g., globalDefinitions_gcc.hpp) 1355 1356 #define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false") 1357 1358 // Format 32-bit quantities. 1359 #define INT32_FORMAT "%" PRId32 1360 #define UINT32_FORMAT "%" PRIu32 1361 #define INT32_FORMAT_W(width) "%" #width PRId32 1362 #define UINT32_FORMAT_W(width) "%" #width PRIu32 1363 1364 #define PTR32_FORMAT "0x%08" PRIx32 1365 1366 // Format 64-bit quantities. 1367 #define INT64_FORMAT "%" PRId64 1368 #define UINT64_FORMAT "%" PRIu64 1369 #define UINT64_FORMAT_X "%" PRIx64 1370 #define INT64_FORMAT_W(width) "%" #width PRId64 1371 #define UINT64_FORMAT_W(width) "%" #width PRIu64 1372 1373 #define PTR64_FORMAT "0x%016" PRIx64 1374 1375 // Format jlong, if necessary 1376 #ifndef JLONG_FORMAT 1377 #define JLONG_FORMAT INT64_FORMAT 1378 #endif 1379 #ifndef JULONG_FORMAT 1380 #define JULONG_FORMAT UINT64_FORMAT 1381 #endif 1382 #ifndef JULONG_FORMAT_X 1383 #define JULONG_FORMAT_X UINT64_FORMAT_X 1384 #endif 1385 1386 // Format pointers which change size between 32- and 64-bit. 1387 #ifdef _LP64 1388 #define INTPTR_FORMAT "0x%016" PRIxPTR 1389 #define PTR_FORMAT "0x%016" PRIxPTR 1390 #else // !_LP64 1391 #define INTPTR_FORMAT "0x%08" PRIxPTR 1392 #define PTR_FORMAT "0x%08" PRIxPTR 1393 #endif // _LP64 1394 1395 #define INTPTR_FORMAT_W(width) "%" #width PRIxPTR 1396 1397 #define SSIZE_FORMAT "%" PRIdPTR 1398 #define SIZE_FORMAT "%" PRIuPTR 1399 #define SIZE_FORMAT_HEX "0x%" PRIxPTR 1400 #define SSIZE_FORMAT_W(width) "%" #width PRIdPTR 1401 #define SIZE_FORMAT_W(width) "%" #width PRIuPTR 1402 #define SIZE_FORMAT_HEX_W(width) "0x%" #width PRIxPTR 1403 1404 #define INTX_FORMAT "%" PRIdPTR 1405 #define UINTX_FORMAT "%" PRIuPTR 1406 #define INTX_FORMAT_W(width) "%" #width PRIdPTR 1407 #define UINTX_FORMAT_W(width) "%" #width PRIuPTR 1408 1409 1410 // Enable zap-a-lot if in debug version. 1411 1412 # ifdef ASSERT 1413 # ifdef COMPILER2 1414 # define ENABLE_ZAP_DEAD_LOCALS 1415 #endif /* COMPILER2 */ 1416 # endif /* ASSERT */ 1417 1418 #define ARRAY_SIZE(array) (sizeof(array)/sizeof((array)[0])) 1419 1420 // Dereference vptr 1421 // All C++ compilers that we know of have the vtbl pointer in the first 1422 // word. If there are exceptions, this function needs to be made compiler 1423 // specific. 1424 static inline void* dereference_vptr(const void* addr) { 1425 return *(void**)addr; 1426 } 1427 1428 #ifndef PRODUCT 1429 1430 // For unit testing only 1431 class GlobalDefinitions { 1432 public: 1433 static void test_globals(); 1434 }; 1435 1436 #endif // PRODUCT 1437 1438 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP