1 /*
   2  * Copyright © 2007,2008,2009,2010  Red Hat, Inc.
   3  * Copyright © 2012  Google, Inc.
   4  *
   5  *  This is part of HarfBuzz, a text shaping library.
   6  *
   7  * Permission is hereby granted, without written agreement and without
   8  * license or royalty fees, to use, copy, modify, and distribute this
   9  * software and its documentation for any purpose, provided that the
  10  * above copyright notice and the following two paragraphs appear in
  11  * all copies of this software.
  12  *
  13  * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
  14  * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
  15  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
  16  * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
  17  * DAMAGE.
  18  *
  19  * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
  20  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  21  * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
  22  * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
  23  * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
  24  *
  25  * Red Hat Author(s): Behdad Esfahbod
  26  * Google Author(s): Behdad Esfahbod
  27  */
  28 
  29 #ifndef HB_OPEN_TYPE_PRIVATE_HH
  30 #define HB_OPEN_TYPE_PRIVATE_HH
  31 
  32 #include "hb-private.hh"
  33 #include "hb-debug.hh"
  34 #include "hb-face-private.hh"
  35 
  36 
  37 namespace OT {
  38 
  39 
  40 
  41 /*
  42  * Casts
  43  */
  44 
  45 /* Cast to struct T, reference to reference */
  46 template<typename Type, typename TObject>
  47 static inline const Type& CastR(const TObject &X)
  48 { return reinterpret_cast<const Type&> (X); }
  49 template<typename Type, typename TObject>
  50 static inline Type& CastR(TObject &X)
  51 { return reinterpret_cast<Type&> (X); }
  52 
  53 /* Cast to struct T, pointer to pointer */
  54 template<typename Type, typename TObject>
  55 static inline const Type* CastP(const TObject *X)
  56 { return reinterpret_cast<const Type*> (X); }
  57 template<typename Type, typename TObject>
  58 static inline Type* CastP(TObject *X)
  59 { return reinterpret_cast<Type*> (X); }
  60 
  61 /* StructAtOffset<T>(P,Ofs) returns the struct T& that is placed at memory
  62  * location pointed to by P plus Ofs bytes. */
  63 template<typename Type>
  64 static inline const Type& StructAtOffset(const void *P, unsigned int offset)
  65 { return * reinterpret_cast<const Type*> ((const char *) P + offset); }
  66 template<typename Type>
  67 static inline Type& StructAtOffset(void *P, unsigned int offset)
  68 { return * reinterpret_cast<Type*> ((char *) P + offset); }
  69 
  70 /* StructAfter<T>(X) returns the struct T& that is placed after X.
  71  * Works with X of variable size also.  X must implement get_size() */
  72 template<typename Type, typename TObject>
  73 static inline const Type& StructAfter(const TObject &X)
  74 { return StructAtOffset<Type>(&X, X.get_size()); }
  75 template<typename Type, typename TObject>
  76 static inline Type& StructAfter(TObject &X)
  77 { return StructAtOffset<Type>(&X, X.get_size()); }
  78 
  79 
  80 
  81 /*
  82  * Size checking
  83  */
  84 
  85 /* Check _assertion in a method environment */
  86 #define _DEFINE_INSTANCE_ASSERTION1(_line, _assertion) \
  87   inline void _instance_assertion_on_line_##_line (void) const \
  88   { \
  89     static_assert ((_assertion), ""); \
  90     ASSERT_INSTANCE_POD (*this); /* Make sure it's POD. */ \
  91   }
  92 # define _DEFINE_INSTANCE_ASSERTION0(_line, _assertion) _DEFINE_INSTANCE_ASSERTION1 (_line, _assertion)
  93 # define DEFINE_INSTANCE_ASSERTION(_assertion) _DEFINE_INSTANCE_ASSERTION0 (__LINE__, _assertion)
  94 
  95 /* Check that _code compiles in a method environment */
  96 #define _DEFINE_COMPILES_ASSERTION1(_line, _code) \
  97   inline void _compiles_assertion_on_line_##_line (void) const \
  98   { _code; }
  99 # define _DEFINE_COMPILES_ASSERTION0(_line, _code) _DEFINE_COMPILES_ASSERTION1 (_line, _code)
 100 # define DEFINE_COMPILES_ASSERTION(_code) _DEFINE_COMPILES_ASSERTION0 (__LINE__, _code)
 101 
 102 
 103 #define DEFINE_SIZE_STATIC(size) \
 104   DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size)); \
 105   static const unsigned int static_size = (size); \
 106   static const unsigned int min_size = (size); \
 107   inline unsigned int get_size (void) const { return (size); }
 108 
 109 #define DEFINE_SIZE_UNION(size, _member) \
 110   DEFINE_INSTANCE_ASSERTION (0*sizeof(this->u._member.static_size) + sizeof(this->u._member) == (size)); \
 111   static const unsigned int min_size = (size)
 112 
 113 #define DEFINE_SIZE_MIN(size) \
 114   DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)); \
 115   static const unsigned int min_size = (size)
 116 
 117 #define DEFINE_SIZE_ARRAY(size, array) \
 118   DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (array[0])); \
 119   DEFINE_COMPILES_ASSERTION ((void) array[0].static_size) \
 120   static const unsigned int min_size = (size)
 121 
 122 #define DEFINE_SIZE_ARRAY2(size, array1, array2) \
 123   DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (this->array1[0]) + sizeof (this->array2[0])); \
 124   DEFINE_COMPILES_ASSERTION ((void) array1[0].static_size; (void) array2[0].static_size) \
 125   static const unsigned int min_size = (size)
 126 
 127 
 128 
 129 /*
 130  * Null objects
 131  */
 132 
 133 /* Global nul-content Null pool.  Enlarge as necessary. */
 134 
 135 #define HB_NULL_POOL_SIZE 264
 136 static_assert (HB_NULL_POOL_SIZE % sizeof (void *) == 0, "Align HB_NULL_POOL_SIZE.");
 137 extern HB_INTERNAL const void * const _hb_NullPool[HB_NULL_POOL_SIZE / sizeof (void *)];
 138 
 139 /* Generic nul-content Null objects. */
 140 template <typename Type>
 141 static inline const Type& Null (void) {
 142   static_assert (sizeof (Type) <= HB_NULL_POOL_SIZE, "Increase HB_NULL_POOL_SIZE.");
 143   return *CastP<Type> (_hb_NullPool);
 144 }
 145 
 146 /* Specializaiton for arbitrary-content arbitrary-sized Null objects. */
 147 #define DEFINE_NULL_DATA(Type, data) \
 148 static const char _Null##Type[sizeof (Type) + 1] = data; /* +1 is for nul-termination in data */ \
 149 template <> \
 150 /*static*/ inline const Type& Null<Type> (void) { \
 151   return *CastP<Type> (_Null##Type); \
 152 } /* The following line really exists such that we end in a place needing semicolon */ \
 153 static_assert (Type::min_size + 1 <= sizeof (_Null##Type), "Null pool too small.  Enlarge.")
 154 
 155 /* Accessor macro. */
 156 #define Null(Type) Null<Type>()
 157 
 158 
 159 /*
 160  * Dispatch
 161  */
 162 
 163 template <typename Context, typename Return, unsigned int MaxDebugDepth>
 164 struct hb_dispatch_context_t
 165 {
 166   static const unsigned int max_debug_depth = MaxDebugDepth;
 167   typedef Return return_t;
 168   template <typename T, typename F>
 169   inline bool may_dispatch (const T *obj, const F *format) { return true; }
 170   static return_t no_dispatch_return_value (void) { return Context::default_return_value (); }
 171 };
 172 
 173 
 174 /*
 175  * Sanitize
 176  */
 177 
 178 /* This limits sanitizing time on really broken fonts. */
 179 #ifndef HB_SANITIZE_MAX_EDITS
 180 #define HB_SANITIZE_MAX_EDITS 32
 181 #endif
 182 
 183 struct hb_sanitize_context_t :
 184        hb_dispatch_context_t<hb_sanitize_context_t, bool, HB_DEBUG_SANITIZE>
 185 {
 186   inline hb_sanitize_context_t (void) :
 187         debug_depth (0),
 188         start (nullptr), end (nullptr),
 189         writable (false), edit_count (0),
 190         blob (nullptr) {}
 191 
 192   inline const char *get_name (void) { return "SANITIZE"; }
 193   template <typename T, typename F>
 194   inline bool may_dispatch (const T *obj, const F *format)
 195   { return format->sanitize (this); }
 196   template <typename T>
 197   inline return_t dispatch (const T &obj) { return obj.sanitize (this); }
 198   static return_t default_return_value (void) { return true; }
 199   static return_t no_dispatch_return_value (void) { return false; }
 200   bool stop_sublookup_iteration (const return_t r) const { return !r; }
 201 
 202   inline void init (hb_blob_t *b)
 203   {
 204     this->blob = hb_blob_reference (b);
 205     this->writable = false;
 206   }
 207 
 208   inline void start_processing (void)
 209   {
 210     this->start = hb_blob_get_data (this->blob, nullptr);
 211     this->end = this->start + hb_blob_get_length (this->blob);
 212     assert (this->start <= this->end); /* Must not overflow. */
 213     this->edit_count = 0;
 214     this->debug_depth = 0;
 215 
 216     DEBUG_MSG_LEVEL (SANITIZE, start, 0, +1,
 217                      "start [%p..%p] (%lu bytes)",
 218                      this->start, this->end,
 219                      (unsigned long) (this->end - this->start));
 220   }
 221 
 222   inline void end_processing (void)
 223   {
 224     DEBUG_MSG_LEVEL (SANITIZE, this->start, 0, -1,
 225                      "end [%p..%p] %u edit requests",
 226                      this->start, this->end, this->edit_count);
 227 
 228     hb_blob_destroy (this->blob);
 229     this->blob = nullptr;
 230     this->start = this->end = nullptr;
 231   }
 232 
 233   inline bool check_range (const void *base, unsigned int len) const
 234   {
 235     const char *p = (const char *) base;
 236     bool ok = this->start <= p && p <= this->end && (unsigned int) (this->end - p) >= len;
 237 
 238     DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
 239        "check_range [%p..%p] (%d bytes) in [%p..%p] -> %s",
 240        p, p + len, len,
 241        this->start, this->end,
 242        ok ? "OK" : "OUT-OF-RANGE");
 243 
 244     return likely (ok);
 245   }
 246 
 247   inline bool check_array (const void *base, unsigned int record_size, unsigned int len) const
 248   {
 249     const char *p = (const char *) base;
 250     bool overflows = _hb_unsigned_int_mul_overflows (len, record_size);
 251     unsigned int array_size = record_size * len;
 252     bool ok = !overflows && this->check_range (base, array_size);
 253 
 254     DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
 255        "check_array [%p..%p] (%d*%d=%d bytes) in [%p..%p] -> %s",
 256        p, p + (record_size * len), record_size, len, (unsigned int) array_size,
 257        this->start, this->end,
 258        overflows ? "OVERFLOWS" : ok ? "OK" : "OUT-OF-RANGE");
 259 
 260     return likely (ok);
 261   }
 262 
 263   template <typename Type>
 264   inline bool check_struct (const Type *obj) const
 265   {
 266     return likely (this->check_range (obj, obj->min_size));
 267   }
 268 
 269   inline bool may_edit (const void *base HB_UNUSED, unsigned int len HB_UNUSED)
 270   {
 271     if (this->edit_count >= HB_SANITIZE_MAX_EDITS)
 272       return false;
 273 
 274     const char *p = (const char *) base;
 275     this->edit_count++;
 276 
 277     DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
 278        "may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s",
 279        this->edit_count,
 280        p, p + len, len,
 281        this->start, this->end,
 282        this->writable ? "GRANTED" : "DENIED");
 283 
 284     return this->writable;
 285   }
 286 
 287   template <typename Type, typename ValueType>
 288   inline bool try_set (const Type *obj, const ValueType &v) {
 289     if (this->may_edit (obj, obj->static_size)) {
 290       const_cast<Type *> (obj)->set (v);
 291       return true;
 292     }
 293     return false;
 294   }
 295 
 296   mutable unsigned int debug_depth;
 297   const char *start, *end;
 298   bool writable;
 299   unsigned int edit_count;
 300   hb_blob_t *blob;
 301 };
 302 
 303 
 304 
 305 /* Template to sanitize an object. */
 306 template <typename Type>
 307 struct Sanitizer
 308 {
 309   static hb_blob_t *sanitize (hb_blob_t *blob) {
 310     hb_sanitize_context_t c[1];
 311     bool sane;
 312 
 313     /* TODO is_sane() stuff */
 314 
 315     c->init (blob);
 316 
 317   retry:
 318     DEBUG_MSG_FUNC (SANITIZE, c->start, "start");
 319 
 320     c->start_processing ();
 321 
 322     if (unlikely (!c->start)) {
 323       c->end_processing ();
 324       return blob;
 325     }
 326 
 327     Type *t = CastP<Type> (const_cast<char *> (c->start));
 328 
 329     sane = t->sanitize (c);
 330     if (sane) {
 331       if (c->edit_count) {
 332         DEBUG_MSG_FUNC (SANITIZE, c->start, "passed first round with %d edits; going for second round", c->edit_count);
 333 
 334         /* sanitize again to ensure no toe-stepping */
 335         c->edit_count = 0;
 336         sane = t->sanitize (c);
 337         if (c->edit_count) {
 338           DEBUG_MSG_FUNC (SANITIZE, c->start, "requested %d edits in second round; FAILLING", c->edit_count);
 339           sane = false;
 340         }
 341       }
 342     } else {
 343       unsigned int edit_count = c->edit_count;
 344       if (edit_count && !c->writable) {
 345         c->start = hb_blob_get_data_writable (blob, nullptr);
 346         c->end = c->start + hb_blob_get_length (blob);
 347 
 348         if (c->start) {
 349           c->writable = true;
 350           /* ok, we made it writable by relocating.  try again */
 351           DEBUG_MSG_FUNC (SANITIZE, c->start, "retry");
 352           goto retry;
 353         }
 354       }
 355     }
 356 
 357     c->end_processing ();
 358 
 359     DEBUG_MSG_FUNC (SANITIZE, c->start, sane ? "PASSED" : "FAILED");
 360     if (sane)
 361       return blob;
 362     else {
 363       hb_blob_destroy (blob);
 364       return hb_blob_get_empty ();
 365     }
 366   }
 367 
 368   static const Type* lock_instance (hb_blob_t *blob) {
 369     hb_blob_make_immutable (blob);
 370     const char *base = hb_blob_get_data (blob, nullptr);
 371     return unlikely (!base) ? &Null(Type) : CastP<Type> (base);
 372   }
 373 };
 374 
 375 
 376 
 377 /*
 378  * Serialize
 379  */
 380 
 381 
 382 struct hb_serialize_context_t
 383 {
 384   inline hb_serialize_context_t (void *start_, unsigned int size)
 385   {
 386     this->start = (char *) start_;
 387     this->end = this->start + size;
 388 
 389     this->ran_out_of_room = false;
 390     this->head = this->start;
 391     this->debug_depth = 0;
 392   }
 393 
 394   template <typename Type>
 395   inline Type *start_serialize (void)
 396   {
 397     DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1,
 398                      "start [%p..%p] (%lu bytes)",
 399                      this->start, this->end,
 400                      (unsigned long) (this->end - this->start));
 401 
 402     return start_embed<Type> ();
 403   }
 404 
 405   inline void end_serialize (void)
 406   {
 407     DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1,
 408                      "end [%p..%p] serialized %d bytes; %s",
 409                      this->start, this->end,
 410                      (int) (this->head - this->start),
 411                      this->ran_out_of_room ? "RAN OUT OF ROOM" : "did not ran out of room");
 412 
 413   }
 414 
 415   template <typename Type>
 416   inline Type *copy (void)
 417   {
 418     assert (!this->ran_out_of_room);
 419     unsigned int len = this->head - this->start;
 420     void *p = malloc (len);
 421     if (p)
 422       memcpy (p, this->start, len);
 423     return reinterpret_cast<Type *> (p);
 424   }
 425 
 426   template <typename Type>
 427   inline Type *allocate_size (unsigned int size)
 428   {
 429     if (unlikely (this->ran_out_of_room || this->end - this->head < ptrdiff_t (size))) {
 430       this->ran_out_of_room = true;
 431       return nullptr;
 432     }
 433     memset (this->head, 0, size);
 434     char *ret = this->head;
 435     this->head += size;
 436     return reinterpret_cast<Type *> (ret);
 437   }
 438 
 439   template <typename Type>
 440   inline Type *allocate_min (void)
 441   {
 442     return this->allocate_size<Type> (Type::min_size);
 443   }
 444 
 445   template <typename Type>
 446   inline Type *start_embed (void)
 447   {
 448     Type *ret = reinterpret_cast<Type *> (this->head);
 449     return ret;
 450   }
 451 
 452   template <typename Type>
 453   inline Type *embed (const Type &obj)
 454   {
 455     unsigned int size = obj.get_size ();
 456     Type *ret = this->allocate_size<Type> (size);
 457     if (unlikely (!ret)) return nullptr;
 458     memcpy (ret, obj, size);
 459     return ret;
 460   }
 461 
 462   template <typename Type>
 463   inline Type *extend_min (Type &obj)
 464   {
 465     unsigned int size = obj.min_size;
 466     assert (this->start <= (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head);
 467     if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr;
 468     return reinterpret_cast<Type *> (&obj);
 469   }
 470 
 471   template <typename Type>
 472   inline Type *extend (Type &obj)
 473   {
 474     unsigned int size = obj.get_size ();
 475     assert (this->start < (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head);
 476     if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr;
 477     return reinterpret_cast<Type *> (&obj);
 478   }
 479 
 480   inline void truncate (void *new_head)
 481   {
 482     assert (this->start < new_head && new_head <= this->head);
 483     this->head = (char *) new_head;
 484   }
 485 
 486   unsigned int debug_depth;
 487   char *start, *end, *head;
 488   bool ran_out_of_room;
 489 };
 490 
 491 template <typename Type>
 492 struct Supplier
 493 {
 494   inline Supplier (const Type *array, unsigned int len_)
 495   {
 496     head = array;
 497     len = len_;
 498   }
 499   inline const Type operator [] (unsigned int i) const
 500   {
 501     if (unlikely (i >= len)) return Type ();
 502     return head[i];
 503   }
 504 
 505   inline void advance (unsigned int count)
 506   {
 507     if (unlikely (count > len))
 508       count = len;
 509     len -= count;
 510     head += count;
 511   }
 512 
 513   private:
 514   inline Supplier (const Supplier<Type> &); /* Disallow copy */
 515   inline Supplier<Type>& operator= (const Supplier<Type> &); /* Disallow copy */
 516 
 517   unsigned int len;
 518   const Type *head;
 519 };
 520 
 521 
 522 
 523 
 524 /*
 525  *
 526  * The OpenType Font File: Data Types
 527  */
 528 
 529 
 530 /* "The following data types are used in the OpenType font file.
 531  *  All OpenType fonts use Motorola-style byte ordering (Big Endian):" */
 532 
 533 /*
 534  * Int types
 535  */
 536 
 537 
 538 template <typename Type, int Bytes> struct BEInt;
 539 
 540 template <typename Type>
 541 struct BEInt<Type, 1>
 542 {
 543   public:
 544   inline void set (Type V)
 545   {
 546     v = V;
 547   }
 548   inline operator Type (void) const
 549   {
 550     return v;
 551   }
 552   private: uint8_t v;
 553 };
 554 template <typename Type>
 555 struct BEInt<Type, 2>
 556 {
 557   public:
 558   inline void set (Type V)
 559   {
 560     v[0] = (V >>  8) & 0xFF;
 561     v[1] = (V      ) & 0xFF;
 562   }
 563   inline operator Type (void) const
 564   {
 565     return (v[0] <<  8)
 566          + (v[1]      );
 567   }
 568   private: uint8_t v[2];
 569 };
 570 template <typename Type>
 571 struct BEInt<Type, 3>
 572 {
 573   public:
 574   inline void set (Type V)
 575   {
 576     v[0] = (V >> 16) & 0xFF;
 577     v[1] = (V >>  8) & 0xFF;
 578     v[2] = (V      ) & 0xFF;
 579   }
 580   inline operator Type (void) const
 581   {
 582     return (v[0] << 16)
 583          + (v[1] <<  8)
 584          + (v[2]      );
 585   }
 586   private: uint8_t v[3];
 587 };
 588 template <typename Type>
 589 struct BEInt<Type, 4>
 590 {
 591   public:
 592   inline void set (Type V)
 593   {
 594     v[0] = (V >> 24) & 0xFF;
 595     v[1] = (V >> 16) & 0xFF;
 596     v[2] = (V >>  8) & 0xFF;
 597     v[3] = (V      ) & 0xFF;
 598   }
 599   inline operator Type (void) const
 600   {
 601     return (v[0] << 24)
 602          + (v[1] << 16)
 603          + (v[2] <<  8)
 604          + (v[3]      );
 605   }
 606   private: uint8_t v[4];
 607 };
 608 
 609 /* Integer types in big-endian order and no alignment requirement */
 610 template <typename Type, unsigned int Size>
 611 struct IntType
 612 {
 613   inline void set (Type i) { v.set (i); }
 614   inline operator Type(void) const { return v; }
 615   inline bool operator == (const IntType<Type,Size> &o) const { return (Type) v == (Type) o.v; }
 616   inline bool operator != (const IntType<Type,Size> &o) const { return !(*this == o); }
 617   static inline int cmp (const IntType<Type,Size> *a, const IntType<Type,Size> *b) { return b->cmp (*a); }
 618   template <typename Type2>
 619   inline int cmp (Type2 a) const
 620   {
 621     Type b = v;
 622     if (sizeof (Type) < sizeof (int) && sizeof (Type2) < sizeof (int))
 623       return (int) a - (int) b;
 624     else
 625       return a < b ? -1 : a == b ? 0 : +1;
 626   }
 627   inline bool sanitize (hb_sanitize_context_t *c) const
 628   {
 629     TRACE_SANITIZE (this);
 630     return_trace (likely (c->check_struct (this)));
 631   }
 632   protected:
 633   BEInt<Type, Size> v;
 634   public:
 635   DEFINE_SIZE_STATIC (Size);
 636 };
 637 
 638 typedef IntType<int8_t,   1> CHAR;      /* 8-bit signed integer. */
 639 typedef IntType<uint8_t,  1> BYTE;      /* 8-bit unsigned integer. */
 640 typedef IntType<int8_t,   1> INT8;      /* 8-bit signed integer. */
 641 typedef IntType<uint16_t, 2> USHORT;    /* 16-bit unsigned integer. */
 642 typedef IntType<int16_t,  2> SHORT;     /* 16-bit signed integer. */
 643 typedef IntType<uint32_t, 4> ULONG;     /* 32-bit unsigned integer. */
 644 typedef IntType<int32_t,  4> LONG;      /* 32-bit signed integer. */
 645 typedef IntType<uint32_t, 3> UINT24;    /* 24-bit unsigned integer. */
 646 
 647 /* 16-bit signed integer (SHORT) that describes a quantity in FUnits. */
 648 typedef SHORT FWORD;
 649 
 650 /* 16-bit unsigned integer (USHORT) that describes a quantity in FUnits. */
 651 typedef USHORT UFWORD;
 652 
 653 /* 16-bit signed fixed number with the low 14 bits of fraction (2.14). */
 654 struct F2DOT14 : SHORT
 655 {
 656   //inline float to_float (void) const { return ???; }
 657   //inline void set_float (float f) { v.set (f * ???); }
 658   public:
 659   DEFINE_SIZE_STATIC (2);
 660 };
 661 
 662 /* 32-bit signed fixed-point number (16.16). */
 663 struct Fixed: LONG
 664 {
 665   //inline float to_float (void) const { return ???; }
 666   //inline void set_float (float f) { v.set (f * ???); }
 667   public:
 668   DEFINE_SIZE_STATIC (4);
 669 };
 670 
 671 /* Date represented in number of seconds since 12:00 midnight, January 1,
 672  * 1904. The value is represented as a signed 64-bit integer. */
 673 struct LONGDATETIME
 674 {
 675   inline bool sanitize (hb_sanitize_context_t *c) const
 676   {
 677     TRACE_SANITIZE (this);
 678     return_trace (likely (c->check_struct (this)));
 679   }
 680   protected:
 681   LONG major;
 682   ULONG minor;
 683   public:
 684   DEFINE_SIZE_STATIC (8);
 685 };
 686 
 687 /* Array of four uint8s (length = 32 bits) used to identify a script, language
 688  * system, feature, or baseline */
 689 struct Tag : ULONG
 690 {
 691   /* What the char* converters return is NOT nul-terminated.  Print using "%.4s" */
 692   inline operator const char* (void) const { return reinterpret_cast<const char *> (&this->v); }
 693   inline operator char* (void) { return reinterpret_cast<char *> (&this->v); }
 694   public:
 695   DEFINE_SIZE_STATIC (4);
 696 };
 697 DEFINE_NULL_DATA (Tag, "    ");
 698 
 699 /* Glyph index number, same as uint16 (length = 16 bits) */
 700 typedef USHORT GlyphID;
 701 
 702 /* Script/language-system/feature index */
 703 struct Index : USHORT {
 704   static const unsigned int NOT_FOUND_INDEX = 0xFFFFu;
 705 };
 706 DEFINE_NULL_DATA (Index, "\xff\xff");
 707 
 708 /* Offset, Null offset = 0 */
 709 template <typename Type=USHORT>
 710 struct Offset : Type
 711 {
 712   inline bool is_null (void) const { return 0 == *this; }
 713   public:
 714   DEFINE_SIZE_STATIC (sizeof(Type));
 715 };
 716 
 717 
 718 /* CheckSum */
 719 struct CheckSum : ULONG
 720 {
 721   /* This is reference implementation from the spec. */
 722   static inline uint32_t CalcTableChecksum (const ULONG *Table, uint32_t Length)
 723   {
 724     uint32_t Sum = 0L;
 725     const ULONG *EndPtr = Table+((Length+3) & ~3) / ULONG::static_size;
 726 
 727     while (Table < EndPtr)
 728       Sum += *Table++;
 729     return Sum;
 730   }
 731 
 732   /* Note: data should be 4byte aligned and have 4byte padding at the end. */
 733   inline void set_for_data (const void *data, unsigned int length)
 734   { set (CalcTableChecksum ((const ULONG *) data, length)); }
 735 
 736   public:
 737   DEFINE_SIZE_STATIC (4);
 738 };
 739 
 740 
 741 /*
 742  * Version Numbers
 743  */
 744 
 745 template <typename FixedType=USHORT>
 746 struct FixedVersion
 747 {
 748   inline uint32_t to_int (void) const { return (major << (sizeof(FixedType) * 8)) + minor; }
 749 
 750   inline bool sanitize (hb_sanitize_context_t *c) const
 751   {
 752     TRACE_SANITIZE (this);
 753     return_trace (c->check_struct (this));
 754   }
 755 
 756   FixedType major;
 757   FixedType minor;
 758   public:
 759   DEFINE_SIZE_STATIC (2 * sizeof(FixedType));
 760 };
 761 
 762 
 763 
 764 /*
 765  * Template subclasses of Offset that do the dereferencing.
 766  * Use: (base+offset)
 767  */
 768 
 769 template <typename Type, typename OffsetType=USHORT>
 770 struct OffsetTo : Offset<OffsetType>
 771 {
 772   inline const Type& operator () (const void *base) const
 773   {
 774     unsigned int offset = *this;
 775     if (unlikely (!offset)) return Null(Type);
 776     return StructAtOffset<Type> (base, offset);
 777   }
 778 
 779   inline Type& serialize (hb_serialize_context_t *c, const void *base)
 780   {
 781     Type *t = c->start_embed<Type> ();
 782     this->set ((char *) t - (char *) base); /* TODO(serialize) Overflow? */
 783     return *t;
 784   }
 785 
 786   inline bool sanitize (hb_sanitize_context_t *c, const void *base) const
 787   {
 788     TRACE_SANITIZE (this);
 789     if (unlikely (!c->check_struct (this))) return_trace (false);
 790     unsigned int offset = *this;
 791     if (unlikely (!offset)) return_trace (true);
 792     if (unlikely (!c->check_range (base, offset))) return_trace (false);
 793     const Type &obj = StructAtOffset<Type> (base, offset);
 794     return_trace (likely (obj.sanitize (c)) || neuter (c));
 795   }
 796   template <typename T>
 797   inline bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const
 798   {
 799     TRACE_SANITIZE (this);
 800     if (unlikely (!c->check_struct (this))) return_trace (false);
 801     unsigned int offset = *this;
 802     if (unlikely (!offset)) return_trace (true);
 803     if (unlikely (!c->check_range (base, offset))) return_trace (false);
 804     const Type &obj = StructAtOffset<Type> (base, offset);
 805     return_trace (likely (obj.sanitize (c, user_data)) || neuter (c));
 806   }
 807 
 808   /* Set the offset to Null */
 809   inline bool neuter (hb_sanitize_context_t *c) const {
 810     return c->try_set (this, 0);
 811   }
 812   DEFINE_SIZE_STATIC (sizeof(OffsetType));
 813 };
 814 template <typename Type> struct LOffsetTo : OffsetTo<Type, ULONG> {};
 815 template <typename Base, typename OffsetType, typename Type>
 816 static inline const Type& operator + (const Base &base, const OffsetTo<Type, OffsetType> &offset) { return offset (base); }
 817 template <typename Base, typename OffsetType, typename Type>
 818 static inline Type& operator + (Base &base, OffsetTo<Type, OffsetType> &offset) { return offset (base); }
 819 
 820 
 821 /*
 822  * Array Types
 823  */
 824 
 825 /* An array with a number of elements. */
 826 template <typename Type, typename LenType=USHORT>
 827 struct ArrayOf
 828 {
 829   const Type *sub_array (unsigned int start_offset, unsigned int *pcount /* IN/OUT */) const
 830   {
 831     unsigned int count = len;
 832     if (unlikely (start_offset > count))
 833       count = 0;
 834     else
 835       count -= start_offset;
 836     count = MIN (count, *pcount);
 837     *pcount = count;
 838     return array + start_offset;
 839   }
 840 
 841   inline const Type& operator [] (unsigned int i) const
 842   {
 843     if (unlikely (i >= len)) return Null(Type);
 844     return array[i];
 845   }
 846   inline Type& operator [] (unsigned int i)
 847   {
 848     return array[i];
 849   }
 850   inline unsigned int get_size (void) const
 851   { return len.static_size + len * Type::static_size; }
 852 
 853   inline bool serialize (hb_serialize_context_t *c,
 854                          unsigned int items_len)
 855   {
 856     TRACE_SERIALIZE (this);
 857     if (unlikely (!c->extend_min (*this))) return_trace (false);
 858     len.set (items_len); /* TODO(serialize) Overflow? */
 859     if (unlikely (!c->extend (*this))) return_trace (false);
 860     return_trace (true);
 861   }
 862 
 863   inline bool serialize (hb_serialize_context_t *c,
 864                          Supplier<Type> &items,
 865                          unsigned int items_len)
 866   {
 867     TRACE_SERIALIZE (this);
 868     if (unlikely (!serialize (c, items_len))) return_trace (false);
 869     for (unsigned int i = 0; i < items_len; i++)
 870       array[i] = items[i];
 871     items.advance (items_len);
 872     return_trace (true);
 873   }
 874 
 875   inline bool sanitize (hb_sanitize_context_t *c) const
 876   {
 877     TRACE_SANITIZE (this);
 878     if (unlikely (!sanitize_shallow (c))) return_trace (false);
 879 
 880     /* Note: for structs that do not reference other structs,
 881      * we do not need to call their sanitize() as we already did
 882      * a bound check on the aggregate array size.  We just include
 883      * a small unreachable expression to make sure the structs
 884      * pointed to do have a simple sanitize(), ie. they do not
 885      * reference other structs via offsets.
 886      */
 887     (void) (false && array[0].sanitize (c));
 888 
 889     return_trace (true);
 890   }
 891   inline bool sanitize (hb_sanitize_context_t *c, const void *base) const
 892   {
 893     TRACE_SANITIZE (this);
 894     if (unlikely (!sanitize_shallow (c))) return_trace (false);
 895     unsigned int count = len;
 896     for (unsigned int i = 0; i < count; i++)
 897       if (unlikely (!array[i].sanitize (c, base)))
 898         return_trace (false);
 899     return_trace (true);
 900   }
 901   template <typename T>
 902   inline bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const
 903   {
 904     TRACE_SANITIZE (this);
 905     if (unlikely (!sanitize_shallow (c))) return_trace (false);
 906     unsigned int count = len;
 907     for (unsigned int i = 0; i < count; i++)
 908       if (unlikely (!array[i].sanitize (c, base, user_data)))
 909         return_trace (false);
 910     return_trace (true);
 911   }
 912 
 913   template <typename SearchType>
 914   inline int lsearch (const SearchType &x) const
 915   {
 916     unsigned int count = len;
 917     for (unsigned int i = 0; i < count; i++)
 918       if (!this->array[i].cmp (x))
 919         return i;
 920     return -1;
 921   }
 922 
 923   private:
 924   inline bool sanitize_shallow (hb_sanitize_context_t *c) const
 925   {
 926     TRACE_SANITIZE (this);
 927     return_trace (len.sanitize (c) && c->check_array (array, Type::static_size, len));
 928   }
 929 
 930   public:
 931   LenType len;
 932   Type array[VAR];
 933   public:
 934   DEFINE_SIZE_ARRAY (sizeof (LenType), array);
 935 };
 936 template <typename Type> struct LArrayOf : ArrayOf<Type, ULONG> {};
 937 
 938 /* Array of Offset's */
 939 template <typename Type, typename OffsetType=USHORT>
 940 struct OffsetArrayOf : ArrayOf<OffsetTo<Type, OffsetType> > {};
 941 
 942 /* Array of offsets relative to the beginning of the array itself. */
 943 template <typename Type>
 944 struct OffsetListOf : OffsetArrayOf<Type>
 945 {
 946   inline const Type& operator [] (unsigned int i) const
 947   {
 948     if (unlikely (i >= this->len)) return Null(Type);
 949     return this+this->array[i];
 950   }
 951 
 952   inline bool sanitize (hb_sanitize_context_t *c) const
 953   {
 954     TRACE_SANITIZE (this);
 955     return_trace (OffsetArrayOf<Type>::sanitize (c, this));
 956   }
 957   template <typename T>
 958   inline bool sanitize (hb_sanitize_context_t *c, T user_data) const
 959   {
 960     TRACE_SANITIZE (this);
 961     return_trace (OffsetArrayOf<Type>::sanitize (c, this, user_data));
 962   }
 963 };
 964 
 965 
 966 /* An array starting at second element. */
 967 template <typename Type, typename LenType=USHORT>
 968 struct HeadlessArrayOf
 969 {
 970   inline const Type& operator [] (unsigned int i) const
 971   {
 972     if (unlikely (i >= len || !i)) return Null(Type);
 973     return array[i-1];
 974   }
 975   inline unsigned int get_size (void) const
 976   { return len.static_size + (len ? len - 1 : 0) * Type::static_size; }
 977 
 978   inline bool serialize (hb_serialize_context_t *c,
 979                          Supplier<Type> &items,
 980                          unsigned int items_len)
 981   {
 982     TRACE_SERIALIZE (this);
 983     if (unlikely (!c->extend_min (*this))) return_trace (false);
 984     len.set (items_len); /* TODO(serialize) Overflow? */
 985     if (unlikely (!items_len)) return_trace (true);
 986     if (unlikely (!c->extend (*this))) return_trace (false);
 987     for (unsigned int i = 0; i < items_len - 1; i++)
 988       array[i] = items[i];
 989     items.advance (items_len - 1);
 990     return_trace (true);
 991   }
 992 
 993   inline bool sanitize (hb_sanitize_context_t *c) const
 994   {
 995     TRACE_SANITIZE (this);
 996     if (unlikely (!sanitize_shallow (c))) return_trace (false);
 997 
 998     /* Note: for structs that do not reference other structs,
 999      * we do not need to call their sanitize() as we already did
1000      * a bound check on the aggregate array size.  We just include
1001      * a small unreachable expression to make sure the structs
1002      * pointed to do have a simple sanitize(), ie. they do not
1003      * reference other structs via offsets.
1004      */
1005     (void) (false && array[0].sanitize (c));
1006 
1007     return_trace (true);
1008   }
1009 
1010   private:
1011   inline bool sanitize_shallow (hb_sanitize_context_t *c) const
1012   {
1013     TRACE_SANITIZE (this);
1014     return_trace (len.sanitize (c) &&
1015                   (!len || c->check_array (array, Type::static_size, len - 1)));
1016   }
1017 
1018   public:
1019   LenType len;
1020   Type array[VAR];
1021   public:
1022   DEFINE_SIZE_ARRAY (sizeof (LenType), array);
1023 };
1024 
1025 
1026 /*
1027  * An array with sorted elements.  Supports binary searching.
1028  */
1029 template <typename Type, typename LenType=USHORT>
1030 struct SortedArrayOf : ArrayOf<Type, LenType>
1031 {
1032   template <typename SearchType>
1033   inline int bsearch (const SearchType &x) const
1034   {
1035     /* Hand-coded bsearch here since this is in the hot inner loop. */
1036     const Type *array = this->array;
1037     int min = 0, max = (int) this->len - 1;
1038     while (min <= max)
1039     {
1040       int mid = (min + max) / 2;
1041       int c = array[mid].cmp (x);
1042       if (c < 0)
1043         max = mid - 1;
1044       else if (c > 0)
1045         min = mid + 1;
1046       else
1047         return mid;
1048     }
1049     return -1;
1050   }
1051 };
1052 
1053 /*
1054  * Binary-search arrays
1055  */
1056 
1057 struct BinSearchHeader
1058 {
1059   inline operator uint32_t (void) const { return len; }
1060 
1061   inline bool sanitize (hb_sanitize_context_t *c) const
1062   {
1063     TRACE_SANITIZE (this);
1064     return_trace (c->check_struct (this));
1065   }
1066 
1067   protected:
1068   USHORT        len;
1069   USHORT        searchRangeZ;
1070   USHORT        entrySelectorZ;
1071   USHORT        rangeShiftZ;
1072 
1073   public:
1074   DEFINE_SIZE_STATIC (8);
1075 };
1076 
1077 template <typename Type>
1078 struct BinSearchArrayOf : SortedArrayOf<Type, BinSearchHeader> {};
1079 
1080 
1081 /* Lazy struct and blob loaders. */
1082 
1083 /* Logic is shared between hb_lazy_loader_t and hb_lazy_table_loader_t */
1084 template <typename T>
1085 struct hb_lazy_loader_t
1086 {
1087   inline void init (hb_face_t *face_)
1088   {
1089     face = face_;
1090     instance = nullptr;
1091   }
1092 
1093   inline void fini (void)
1094   {
1095     if (instance && instance != &OT::Null(T))
1096     {
1097       instance->fini();
1098       free (instance);
1099     }
1100   }
1101 
1102   inline const T* get (void) const
1103   {
1104   retry:
1105     T *p = (T *) hb_atomic_ptr_get (&instance);
1106     if (unlikely (!p))
1107     {
1108       p = (T *) calloc (1, sizeof (T));
1109       if (unlikely (!p))
1110         p = const_cast<T *> (&OT::Null(T));
1111       else
1112         p->init (face);
1113       if (unlikely (!hb_atomic_ptr_cmpexch (const_cast<T **>(&instance), nullptr, p)))
1114       {
1115         if (p != &OT::Null(T))
1116           p->fini ();
1117         goto retry;
1118       }
1119     }
1120     return p;
1121   }
1122 
1123   inline const T* operator-> (void) const
1124   {
1125     return get ();
1126   }
1127 
1128   private:
1129   hb_face_t *face;
1130   T *instance;
1131 };
1132 
1133 /* Logic is shared between hb_lazy_loader_t and hb_lazy_table_loader_t */
1134 template <typename T>
1135 struct hb_lazy_table_loader_t
1136 {
1137   inline void init (hb_face_t *face_)
1138   {
1139     face = face_;
1140     instance = nullptr;
1141     blob = nullptr;
1142   }
1143 
1144   inline void fini (void)
1145   {
1146     hb_blob_destroy (blob);
1147   }
1148 
1149   inline const T* get (void) const
1150   {
1151   retry:
1152     T *p = (T *) hb_atomic_ptr_get (&instance);
1153     if (unlikely (!p))
1154     {
1155       hb_blob_t *blob_ = OT::Sanitizer<T>::sanitize (face->reference_table (T::tableTag));
1156       p = const_cast<T *>(OT::Sanitizer<T>::lock_instance (blob_));
1157       if (!hb_atomic_ptr_cmpexch (const_cast<T **>(&instance), nullptr, p))
1158       {
1159         hb_blob_destroy (blob_);
1160         goto retry;
1161       }
1162       blob = blob_;
1163     }
1164     return p;
1165   }
1166 
1167   inline const T* operator-> (void) const
1168   {
1169     return get();
1170   }
1171 
1172   private:
1173   hb_face_t *face;
1174   T *instance;
1175   mutable hb_blob_t *blob;
1176 };
1177 
1178 
1179 } /* namespace OT */
1180 
1181 
1182 #endif /* HB_OPEN_TYPE_PRIVATE_HH */