< prev index next >

src/share/vm/runtime/thread.hpp

Print this page




1395   //
1396   //  (large addresses)
1397   //
1398 
1399  private:
1400   // These values are derived from flags StackRedPages, StackYellowPages,
1401   // StackReservedPages and StackShadowPages. The zone size is determined
1402   // ergonomically if page_size > 4K.
1403   static size_t _stack_red_zone_size;
1404   static size_t _stack_yellow_zone_size;
1405   static size_t _stack_reserved_zone_size;
1406   static size_t _stack_shadow_zone_size;
1407  public:
1408   inline size_t stack_available(address cur_sp);
1409 
1410   static size_t stack_red_zone_size() {
1411     assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
1412     return _stack_red_zone_size;
1413   }
1414   static void set_stack_red_zone_size(size_t s) {
1415     assert(is_size_aligned(s, os::vm_page_size()),
1416            "We can not protect if the red zone size is not page aligned.");
1417     assert(_stack_red_zone_size == 0, "This should be called only once.");
1418     _stack_red_zone_size = s;
1419   }
1420   address stack_red_zone_base() {
1421     return (address)(stack_end() + stack_red_zone_size());
1422   }
1423   bool in_stack_red_zone(address a) {
1424     return a <= stack_red_zone_base() && a >= stack_end();
1425   }
1426 
1427   static size_t stack_yellow_zone_size() {
1428     assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
1429     return _stack_yellow_zone_size;
1430   }
1431   static void set_stack_yellow_zone_size(size_t s) {
1432     assert(is_size_aligned(s, os::vm_page_size()),
1433            "We can not protect if the yellow zone size is not page aligned.");
1434     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
1435     _stack_yellow_zone_size = s;
1436   }
1437 
1438   static size_t stack_reserved_zone_size() {
1439     // _stack_reserved_zone_size may be 0. This indicates the feature is off.
1440     return _stack_reserved_zone_size;
1441   }
1442   static void set_stack_reserved_zone_size(size_t s) {
1443     assert(is_size_aligned(s, os::vm_page_size()),
1444            "We can not protect if the reserved zone size is not page aligned.");
1445     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
1446     _stack_reserved_zone_size = s;
1447   }
1448   address stack_reserved_zone_base() {
1449     return (address)(stack_end() +
1450                      (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
1451   }
1452   bool in_stack_reserved_zone(address a) {
1453     return (a <= stack_reserved_zone_base()) &&
1454            (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1455   }
1456 
1457   static size_t stack_yellow_reserved_zone_size() {
1458     return _stack_yellow_zone_size + _stack_reserved_zone_size;
1459   }
1460   bool in_stack_yellow_reserved_zone(address a) {
1461     return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
1462   }
1463 
1464   // Size of red + yellow + reserved zones.
1465   static size_t stack_guard_zone_size() {
1466     return stack_red_zone_size() + stack_yellow_reserved_zone_size();
1467   }
1468 
1469   static size_t stack_shadow_zone_size() {
1470     assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
1471     return _stack_shadow_zone_size;
1472   }
1473   static void set_stack_shadow_zone_size(size_t s) {
1474     // The shadow area is not allocated or protected, so
1475     // it needs not be page aligned.
1476     // But the stack bang currently assumes that it is a
1477     // multiple of page size. This guarantees that the bang
1478     // loop touches all pages in the shadow zone.
1479     // This can be guaranteed differently, as well.  E.g., if
1480     // the page size is a multiple of 4K, banging in 4K steps
1481     // suffices to touch all pages. (Some pages are banged
1482     // several times, though.)
1483     assert(is_size_aligned(s, os::vm_page_size()),
1484            "Stack bang assumes multiple of page size.");
1485     assert(_stack_shadow_zone_size == 0, "This should be called only once.");
1486     _stack_shadow_zone_size = s;
1487   }
1488 
1489   void create_stack_guard_pages();
1490   void remove_stack_guard_pages();
1491 
1492   void enable_stack_reserved_zone();
1493   void disable_stack_reserved_zone();
1494   void enable_stack_yellow_reserved_zone();
1495   void disable_stack_yellow_reserved_zone();
1496   void enable_stack_red_zone();
1497   void disable_stack_red_zone();
1498 
1499   inline bool stack_guard_zone_unused();
1500   inline bool stack_yellow_reserved_zone_disabled();
1501   inline bool stack_reserved_zone_disabled();
1502   inline bool stack_guards_enabled();
1503 




1395   //
1396   //  (large addresses)
1397   //
1398 
1399  private:
1400   // These values are derived from flags StackRedPages, StackYellowPages,
1401   // StackReservedPages and StackShadowPages. The zone size is determined
1402   // ergonomically if page_size > 4K.
1403   static size_t _stack_red_zone_size;
1404   static size_t _stack_yellow_zone_size;
1405   static size_t _stack_reserved_zone_size;
1406   static size_t _stack_shadow_zone_size;
1407  public:
1408   inline size_t stack_available(address cur_sp);
1409 
1410   static size_t stack_red_zone_size() {
1411     assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
1412     return _stack_red_zone_size;
1413   }
1414   static void set_stack_red_zone_size(size_t s) {
1415     assert(is_aligned(s, os::vm_page_size()),
1416            "We can not protect if the red zone size is not page aligned.");
1417     assert(_stack_red_zone_size == 0, "This should be called only once.");
1418     _stack_red_zone_size = s;
1419   }
1420   address stack_red_zone_base() {
1421     return (address)(stack_end() + stack_red_zone_size());
1422   }
1423   bool in_stack_red_zone(address a) {
1424     return a <= stack_red_zone_base() && a >= stack_end();
1425   }
1426 
1427   static size_t stack_yellow_zone_size() {
1428     assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
1429     return _stack_yellow_zone_size;
1430   }
1431   static void set_stack_yellow_zone_size(size_t s) {
1432     assert(is_aligned(s, os::vm_page_size()),
1433            "We can not protect if the yellow zone size is not page aligned.");
1434     assert(_stack_yellow_zone_size == 0, "This should be called only once.");
1435     _stack_yellow_zone_size = s;
1436   }
1437 
1438   static size_t stack_reserved_zone_size() {
1439     // _stack_reserved_zone_size may be 0. This indicates the feature is off.
1440     return _stack_reserved_zone_size;
1441   }
1442   static void set_stack_reserved_zone_size(size_t s) {
1443     assert(is_aligned(s, os::vm_page_size()),
1444            "We can not protect if the reserved zone size is not page aligned.");
1445     assert(_stack_reserved_zone_size == 0, "This should be called only once.");
1446     _stack_reserved_zone_size = s;
1447   }
1448   address stack_reserved_zone_base() {
1449     return (address)(stack_end() +
1450                      (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
1451   }
1452   bool in_stack_reserved_zone(address a) {
1453     return (a <= stack_reserved_zone_base()) &&
1454            (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1455   }
1456 
1457   static size_t stack_yellow_reserved_zone_size() {
1458     return _stack_yellow_zone_size + _stack_reserved_zone_size;
1459   }
1460   bool in_stack_yellow_reserved_zone(address a) {
1461     return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
1462   }
1463 
1464   // Size of red + yellow + reserved zones.
1465   static size_t stack_guard_zone_size() {
1466     return stack_red_zone_size() + stack_yellow_reserved_zone_size();
1467   }
1468 
1469   static size_t stack_shadow_zone_size() {
1470     assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
1471     return _stack_shadow_zone_size;
1472   }
1473   static void set_stack_shadow_zone_size(size_t s) {
1474     // The shadow area is not allocated or protected, so
1475     // it needs not be page aligned.
1476     // But the stack bang currently assumes that it is a
1477     // multiple of page size. This guarantees that the bang
1478     // loop touches all pages in the shadow zone.
1479     // This can be guaranteed differently, as well.  E.g., if
1480     // the page size is a multiple of 4K, banging in 4K steps
1481     // suffices to touch all pages. (Some pages are banged
1482     // several times, though.)
1483     assert(is_aligned(s, os::vm_page_size()),
1484            "Stack bang assumes multiple of page size.");
1485     assert(_stack_shadow_zone_size == 0, "This should be called only once.");
1486     _stack_shadow_zone_size = s;
1487   }
1488 
1489   void create_stack_guard_pages();
1490   void remove_stack_guard_pages();
1491 
1492   void enable_stack_reserved_zone();
1493   void disable_stack_reserved_zone();
1494   void enable_stack_yellow_reserved_zone();
1495   void disable_stack_yellow_reserved_zone();
1496   void enable_stack_red_zone();
1497   void disable_stack_red_zone();
1498 
1499   inline bool stack_guard_zone_unused();
1500   inline bool stack_yellow_reserved_zone_disabled();
1501   inline bool stack_reserved_zone_disabled();
1502   inline bool stack_guards_enabled();
1503 


< prev index next >