1 /******************************************************************************
   2  *
   3  * Name: acmacros.h - C macros for the entire subsystem.
   4  *
   5  *****************************************************************************/
   6 
   7 /*
   8  * Copyright (C) 2000 - 2013, Intel Corp.
   9  * All rights reserved.
  10  *
  11  * Redistribution and use in source and binary forms, with or without
  12  * modification, are permitted provided that the following conditions
  13  * are met:
  14  * 1. Redistributions of source code must retain the above copyright
  15  *    notice, this list of conditions, and the following disclaimer,
  16  *    without modification.
  17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  18  *    substantially similar to the "NO WARRANTY" disclaimer below
  19  *    ("Disclaimer") and any redistribution must be conditioned upon
  20  *    including a substantially similar Disclaimer requirement for further
  21  *    binary redistribution.
  22  * 3. Neither the names of the above-listed copyright holders nor the names
  23  *    of any contributors may be used to endorse or promote products derived
  24  *    from this software without specific prior written permission.
  25  *
  26  * Alternatively, this software may be distributed under the terms of the
  27  * GNU General Public License ("GPL") version 2 as published by the Free
  28  * Software Foundation.
  29  *
  30  * NO WARRANTY
  31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  41  * POSSIBILITY OF SUCH DAMAGES.
  42  */
  43 
  44 #ifndef __ACMACROS_H__
  45 #define __ACMACROS_H__
  46 
  47 
  48 /*
  49  * Extract data using a pointer. Any more than a byte and we
  50  * get into potential aligment issues -- see the STORE macros below.
  51  * Use with care.
  52  */
  53 #define ACPI_CAST8(ptr)                 ACPI_CAST_PTR (UINT8, (ptr))
  54 #define ACPI_CAST16(ptr)                ACPI_CAST_PTR (UINT16, (ptr))
  55 #define ACPI_CAST32(ptr)                ACPI_CAST_PTR (UINT32, (ptr))
  56 #define ACPI_CAST64(ptr)                ACPI_CAST_PTR (UINT64, (ptr))
  57 #define ACPI_GET8(ptr)                  (*ACPI_CAST8 (ptr))
  58 #define ACPI_GET16(ptr)                 (*ACPI_CAST16 (ptr))
  59 #define ACPI_GET32(ptr)                 (*ACPI_CAST32 (ptr))
  60 #define ACPI_GET64(ptr)                 (*ACPI_CAST64 (ptr))
  61 #define ACPI_SET8(ptr, val)             (*ACPI_CAST8 (ptr) = (UINT8) (val))
  62 #define ACPI_SET16(ptr, val)            (*ACPI_CAST16 (ptr) = (UINT16) (val))
  63 #define ACPI_SET32(ptr, val)            (*ACPI_CAST32 (ptr) = (UINT32) (val))
  64 #define ACPI_SET64(ptr, val)            (*ACPI_CAST64 (ptr) = (UINT64) (val))
  65 
  66 /*
  67  * printf() format helpers
  68  */
  69 
  70 /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
  71 
  72 #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
  73 
  74 #if ACPI_MACHINE_WIDTH == 64
  75 #define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
  76 #else
  77 #define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
  78 #endif
  79 
  80 
  81 /*
  82  * Macros for moving data around to/from buffers that are possibly unaligned.
  83  * If the hardware supports the transfer of unaligned data, just do the store.
  84  * Otherwise, we have to move one byte at a time.
  85  */
  86 #ifdef ACPI_BIG_ENDIAN
  87 /*
  88  * Macros for big-endian machines
  89  */
  90 
  91 /* These macros reverse the bytes during the move, converting little-endian to big endian */
  92 
  93                                                      /* Big Endian      <==        Little Endian */
  94                                                      /*  Hi...Lo                     Lo...Hi     */
  95 /* 16-bit source, 16/32/64 destination */
  96 
  97 #define ACPI_MOVE_16_TO_16(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[1];\
  98                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[0];}
  99 
 100 #define ACPI_MOVE_16_TO_32(d, s)        {(*(UINT32 *)(void *)(d))=0;\
 101                                            ((UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[1];\
 102                                            ((UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[0];}
 103 
 104 #define ACPI_MOVE_16_TO_64(d, s)        {(*(UINT64 *)(void *)(d))=0;\
 105                                            ((UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\
 106                                            ((UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];}
 107 
 108 /* 32-bit source, 16/32/64 destination */
 109 
 110 #define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 111 
 112 #define ACPI_MOVE_32_TO_32(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[3];\
 113                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[2];\
 114                                          ((  UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[1];\
 115                                          ((  UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[0];}
 116 
 117 #define ACPI_MOVE_32_TO_64(d, s)        {(*(UINT64 *)(void *)(d))=0;\
 118                                            ((UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[3];\
 119                                            ((UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[2];\
 120                                            ((UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\
 121                                            ((UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];}
 122 
 123 /* 64-bit source, 16/32/64 destination */
 124 
 125 #define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 126 
 127 #define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)    /* Truncate to 32 */
 128 
 129 #define ACPI_MOVE_64_TO_64(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[7];\
 130                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[6];\
 131                                          ((  UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[5];\
 132                                          ((  UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[4];\
 133                                          ((  UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[3];\
 134                                          ((  UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[2];\
 135                                          ((  UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[1];\
 136                                          ((  UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[0];}
 137 #else
 138 /*
 139  * Macros for little-endian machines
 140  */
 141 
 142 #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
 143 
 144 /* The hardware supports unaligned transfers, just do the little-endian move */
 145 
 146 /* 16-bit source, 16/32/64 destination */
 147 
 148 #define ACPI_MOVE_16_TO_16(d, s)        *(UINT16 *)(void *)(d) = *(UINT16 *)(void *)(s)
 149 #define ACPI_MOVE_16_TO_32(d, s)        *(UINT32 *)(void *)(d) = *(UINT16 *)(void *)(s)
 150 #define ACPI_MOVE_16_TO_64(d, s)        *(UINT64 *)(void *)(d) = *(UINT16 *)(void *)(s)
 151 
 152 /* 32-bit source, 16/32/64 destination */
 153 
 154 #define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 155 #define ACPI_MOVE_32_TO_32(d, s)        *(UINT32 *)(void *)(d) = *(UINT32 *)(void *)(s)
 156 #define ACPI_MOVE_32_TO_64(d, s)        *(UINT64 *)(void *)(d) = *(UINT32 *)(void *)(s)
 157 
 158 /* 64-bit source, 16/32/64 destination */
 159 
 160 #define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 161 #define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)    /* Truncate to 32 */
 162 #define ACPI_MOVE_64_TO_64(d, s)        *(UINT64 *)(void *)(d) = *(UINT64 *)(void *)(s)
 163 
 164 #else
 165 /*
 166  * The hardware does not support unaligned transfers. We must move the
 167  * data one byte at a time. These macros work whether the source or
 168  * the destination (or both) is/are unaligned. (Little-endian move)
 169  */
 170 
 171 /* 16-bit source, 16/32/64 destination */
 172 
 173 #define ACPI_MOVE_16_TO_16(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\
 174                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];}
 175 
 176 #define ACPI_MOVE_16_TO_32(d, s)        {(*(UINT32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
 177 #define ACPI_MOVE_16_TO_64(d, s)        {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
 178 
 179 /* 32-bit source, 16/32/64 destination */
 180 
 181 #define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 182 
 183 #define ACPI_MOVE_32_TO_32(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\
 184                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\
 185                                          ((  UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\
 186                                          ((  UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];}
 187 
 188 #define ACPI_MOVE_32_TO_64(d, s)        {(*(UINT64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d, s);}
 189 
 190 /* 64-bit source, 16/32/64 destination */
 191 
 192 #define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)    /* Truncate to 16 */
 193 #define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)    /* Truncate to 32 */
 194 #define ACPI_MOVE_64_TO_64(d, s)        {((  UINT8 *)(void *)(d))[0] = ((UINT8 *)(void *)(s))[0];\
 195                                          ((  UINT8 *)(void *)(d))[1] = ((UINT8 *)(void *)(s))[1];\
 196                                          ((  UINT8 *)(void *)(d))[2] = ((UINT8 *)(void *)(s))[2];\
 197                                          ((  UINT8 *)(void *)(d))[3] = ((UINT8 *)(void *)(s))[3];\
 198                                          ((  UINT8 *)(void *)(d))[4] = ((UINT8 *)(void *)(s))[4];\
 199                                          ((  UINT8 *)(void *)(d))[5] = ((UINT8 *)(void *)(s))[5];\
 200                                          ((  UINT8 *)(void *)(d))[6] = ((UINT8 *)(void *)(s))[6];\
 201                                          ((  UINT8 *)(void *)(d))[7] = ((UINT8 *)(void *)(s))[7];}
 202 #endif
 203 #endif
 204 
 205 
 206 /*
 207  * Fast power-of-two math macros for non-optimized compilers
 208  */
 209 #define _ACPI_DIV(value, PowerOf2)      ((UINT32) ((value) >> (PowerOf2)))
 210 #define _ACPI_MUL(value, PowerOf2)      ((UINT32) ((value) << (PowerOf2)))
 211 #define _ACPI_MOD(value, Divisor)       ((UINT32) ((value) & ((Divisor) -1)))
 212 
 213 #define ACPI_DIV_2(a)                   _ACPI_DIV(a, 1)
 214 #define ACPI_MUL_2(a)                   _ACPI_MUL(a, 1)
 215 #define ACPI_MOD_2(a)                   _ACPI_MOD(a, 2)
 216 
 217 #define ACPI_DIV_4(a)                   _ACPI_DIV(a, 2)
 218 #define ACPI_MUL_4(a)                   _ACPI_MUL(a, 2)
 219 #define ACPI_MOD_4(a)                   _ACPI_MOD(a, 4)
 220 
 221 #define ACPI_DIV_8(a)                   _ACPI_DIV(a, 3)
 222 #define ACPI_MUL_8(a)                   _ACPI_MUL(a, 3)
 223 #define ACPI_MOD_8(a)                   _ACPI_MOD(a, 8)
 224 
 225 #define ACPI_DIV_16(a)                  _ACPI_DIV(a, 4)
 226 #define ACPI_MUL_16(a)                  _ACPI_MUL(a, 4)
 227 #define ACPI_MOD_16(a)                  _ACPI_MOD(a, 16)
 228 
 229 #define ACPI_DIV_32(a)                  _ACPI_DIV(a, 5)
 230 #define ACPI_MUL_32(a)                  _ACPI_MUL(a, 5)
 231 #define ACPI_MOD_32(a)                  _ACPI_MOD(a, 32)
 232 
 233 /*
 234  * Rounding macros (Power of two boundaries only)
 235  */
 236 #define ACPI_ROUND_DOWN(value, boundary)    (((ACPI_SIZE)(value)) & \
 237                                                 (~(((ACPI_SIZE) boundary)-1)))
 238 
 239 #define ACPI_ROUND_UP(value, boundary)      ((((ACPI_SIZE)(value)) + \
 240                                                 (((ACPI_SIZE) boundary)-1)) & \
 241                                                 (~(((ACPI_SIZE) boundary)-1)))
 242 
 243 /* Note: sizeof(ACPI_SIZE) evaluates to either 4 or 8 (32- vs 64-bit mode) */
 244 
 245 #define ACPI_ROUND_DOWN_TO_32BIT(a)         ACPI_ROUND_DOWN(a, 4)
 246 #define ACPI_ROUND_DOWN_TO_64BIT(a)         ACPI_ROUND_DOWN(a, 8)
 247 #define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a)   ACPI_ROUND_DOWN(a, sizeof(ACPI_SIZE))
 248 
 249 #define ACPI_ROUND_UP_TO_32BIT(a)           ACPI_ROUND_UP(a, 4)
 250 #define ACPI_ROUND_UP_TO_64BIT(a)           ACPI_ROUND_UP(a, 8)
 251 #define ACPI_ROUND_UP_TO_NATIVE_WORD(a)     ACPI_ROUND_UP(a, sizeof(ACPI_SIZE))
 252 
 253 #define ACPI_ROUND_BITS_UP_TO_BYTES(a)      ACPI_DIV_8((a) + 7)
 254 #define ACPI_ROUND_BITS_DOWN_TO_BYTES(a)    ACPI_DIV_8((a))
 255 
 256 #define ACPI_ROUND_UP_TO_1K(a)              (((a) + 1023) >> 10)
 257 
 258 /* Generic (non-power-of-two) rounding */
 259 
 260 #define ACPI_ROUND_UP_TO(value, boundary)   (((value) + ((boundary)-1)) / (boundary))
 261 
 262 #define ACPI_IS_MISALIGNED(value)           (((ACPI_SIZE) value) & (sizeof(ACPI_SIZE)-1))
 263 
 264 /*
 265  * Bitmask creation
 266  * Bit positions start at zero.
 267  * MASK_BITS_ABOVE creates a mask starting AT the position and above
 268  * MASK_BITS_BELOW creates a mask starting one bit BELOW the position
 269  */
 270 #define ACPI_MASK_BITS_ABOVE(position)      (~((ACPI_UINT64_MAX) << ((UINT32) (position))))
 271 #define ACPI_MASK_BITS_BELOW(position)      ((ACPI_UINT64_MAX) << ((UINT32) (position)))
 272 
 273 /* Bitfields within ACPI registers */
 274 
 275 #define ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask) \
 276     ((Val << Pos) & Mask)
 277 
 278 #define ACPI_REGISTER_INSERT_VALUE(Reg, Pos, Mask, Val) \
 279     Reg = (Reg & (~(Mask))) | ACPI_REGISTER_PREPARE_BITS(Val, Pos, Mask)
 280 
 281 #define ACPI_INSERT_BITS(Target, Mask, Source) \
 282     Target = ((Target & (~(Mask))) | (Source & Mask))
 283 
 284 /* Generic bitfield macros and masks */
 285 
 286 #define ACPI_GET_BITS(SourcePtr, Position, Mask) \
 287     ((*SourcePtr >> Position) & Mask)
 288 
 289 #define ACPI_SET_BITS(TargetPtr, Position, Mask, Value) \
 290     (*TargetPtr |= ((Value & Mask) << Position))
 291 
 292 #define ACPI_1BIT_MASK      0x00000001
 293 #define ACPI_2BIT_MASK      0x00000003
 294 #define ACPI_3BIT_MASK      0x00000007
 295 #define ACPI_4BIT_MASK      0x0000000F
 296 #define ACPI_5BIT_MASK      0x0000001F
 297 #define ACPI_6BIT_MASK      0x0000003F
 298 #define ACPI_7BIT_MASK      0x0000007F
 299 #define ACPI_8BIT_MASK      0x000000FF
 300 #define ACPI_16BIT_MASK     0x0000FFFF
 301 #define ACPI_24BIT_MASK     0x00FFFFFF
 302 
 303 /* Macros to extract flag bits from position zero */
 304 
 305 #define ACPI_GET_1BIT_FLAG(Value)                   ((Value) & ACPI_1BIT_MASK)
 306 #define ACPI_GET_2BIT_FLAG(Value)                   ((Value) & ACPI_2BIT_MASK)
 307 #define ACPI_GET_3BIT_FLAG(Value)                   ((Value) & ACPI_3BIT_MASK)
 308 #define ACPI_GET_4BIT_FLAG(Value)                   ((Value) & ACPI_4BIT_MASK)
 309 
 310 /* Macros to extract flag bits from position one and above */
 311 
 312 #define ACPI_EXTRACT_1BIT_FLAG(Field, Position)     (ACPI_GET_1BIT_FLAG ((Field) >> Position))
 313 #define ACPI_EXTRACT_2BIT_FLAG(Field, Position)     (ACPI_GET_2BIT_FLAG ((Field) >> Position))
 314 #define ACPI_EXTRACT_3BIT_FLAG(Field, Position)     (ACPI_GET_3BIT_FLAG ((Field) >> Position))
 315 #define ACPI_EXTRACT_4BIT_FLAG(Field, Position)     (ACPI_GET_4BIT_FLAG ((Field) >> Position))
 316 
 317 /* ACPI Pathname helpers */
 318 
 319 #define ACPI_IS_ROOT_PREFIX(c)      ((c) == (UINT8) 0x5C) /* Backslash */
 320 #define ACPI_IS_PARENT_PREFIX(c)    ((c) == (UINT8) 0x5E) /* Carat */
 321 #define ACPI_IS_PATH_SEPARATOR(c)   ((c) == (UINT8) 0x2E) /* Period (dot) */
 322 
 323 /*
 324  * An object of type ACPI_NAMESPACE_NODE can appear in some contexts
 325  * where a pointer to an object of type ACPI_OPERAND_OBJECT can also
 326  * appear. This macro is used to distinguish them.
 327  *
 328  * The "DescriptorType" field is the second field in both structures.
 329  */
 330 #define ACPI_GET_DESCRIPTOR_PTR(d)      (((ACPI_DESCRIPTOR *)(void *)(d))->Common.CommonPointer)
 331 #define ACPI_SET_DESCRIPTOR_PTR(d, p)   (((ACPI_DESCRIPTOR *)(void *)(d))->Common.CommonPointer = (p))
 332 #define ACPI_GET_DESCRIPTOR_TYPE(d)     (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType)
 333 #define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((ACPI_DESCRIPTOR *)(void *)(d))->Common.DescriptorType = (t))
 334 
 335 /*
 336  * Macros for the master AML opcode table
 337  */
 338 #if defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
 339 #define ACPI_OP(Name, PArgs, IArgs, ObjType, Class, Type, Flags) \
 340     {Name, (UINT32)(PArgs), (UINT32)(IArgs), (UINT32)(Flags), ObjType, Class, Type}
 341 #else
 342 #define ACPI_OP(Name, PArgs, IArgs, ObjType, Class, Type, Flags) \
 343     {(UINT32)(PArgs), (UINT32)(IArgs), (UINT32)(Flags), ObjType, Class, Type}
 344 #endif
 345 
 346 #define ARG_TYPE_WIDTH                  5
 347 #define ARG_1(x)                        ((UINT32)(x))
 348 #define ARG_2(x)                        ((UINT32)(x) << (1 * ARG_TYPE_WIDTH))
 349 #define ARG_3(x)                        ((UINT32)(x) << (2 * ARG_TYPE_WIDTH))
 350 #define ARG_4(x)                        ((UINT32)(x) << (3 * ARG_TYPE_WIDTH))
 351 #define ARG_5(x)                        ((UINT32)(x) << (4 * ARG_TYPE_WIDTH))
 352 #define ARG_6(x)                        ((UINT32)(x) << (5 * ARG_TYPE_WIDTH))
 353 
 354 #define ARGI_LIST1(a)                   (ARG_1(a))
 355 #define ARGI_LIST2(a, b)                (ARG_1(b)|ARG_2(a))
 356 #define ARGI_LIST3(a, b, c)             (ARG_1(c)|ARG_2(b)|ARG_3(a))
 357 #define ARGI_LIST4(a, b, c, d)          (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a))
 358 #define ARGI_LIST5(a, b, c, d, e)       (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))
 359 #define ARGI_LIST6(a, b, c, d, e, f)    (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))
 360 
 361 #define ARGP_LIST1(a)                   (ARG_1(a))
 362 #define ARGP_LIST2(a, b)                (ARG_1(a)|ARG_2(b))
 363 #define ARGP_LIST3(a, b, c)             (ARG_1(a)|ARG_2(b)|ARG_3(c))
 364 #define ARGP_LIST4(a, b, c, d)          (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d))
 365 #define ARGP_LIST5(a, b, c, d, e)       (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))
 366 #define ARGP_LIST6(a, b, c, d, e, f)    (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))
 367 
 368 #define GET_CURRENT_ARG_TYPE(List)      (List & ((UINT32) 0x1F))
 369 #define INCREMENT_ARG_LIST(List)        (List >>= ((UINT32) ARG_TYPE_WIDTH))
 370 
 371 /*
 372  * Ascii error messages can be configured out
 373  */
 374 #ifndef ACPI_NO_ERROR_MESSAGES
 375 /*
 376  * Error reporting. Callers module and line number are inserted by AE_INFO,
 377  * the plist contains a set of parens to allow variable-length lists.
 378  * These macros are used for both the debug and non-debug versions of the code.
 379  */
 380 #define ACPI_ERROR_NAMESPACE(s, e)          AcpiUtNamespaceError (AE_INFO, s, e);
 381 #define ACPI_ERROR_METHOD(s, n, p, e)       AcpiUtMethodError (AE_INFO, s, n, p, e);
 382 #define ACPI_WARN_PREDEFINED(plist)         AcpiUtPredefinedWarning plist
 383 #define ACPI_INFO_PREDEFINED(plist)         AcpiUtPredefinedInfo plist
 384 #define ACPI_BIOS_ERROR_PREDEFINED(plist)   AcpiUtPredefinedBiosError plist
 385 
 386 #else
 387 
 388 /* No error messages */
 389 
 390 #define ACPI_ERROR_NAMESPACE(s, e)
 391 #define ACPI_ERROR_METHOD(s, n, p, e)
 392 #define ACPI_WARN_PREDEFINED(plist)
 393 #define ACPI_INFO_PREDEFINED(plist)
 394 #define ACPI_BIOS_ERROR_PREDEFINED(plist)
 395 
 396 #endif /* ACPI_NO_ERROR_MESSAGES */
 397 
 398 #if (!ACPI_REDUCED_HARDWARE)
 399 #define ACPI_HW_OPTIONAL_FUNCTION(addr)     addr
 400 #else
 401 #define ACPI_HW_OPTIONAL_FUNCTION(addr)     NULL
 402 #endif
 403 
 404 
 405 /*
 406  * Some code only gets executed when the debugger is built in.
 407  * Note that this is entirely independent of whether the
 408  * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not.
 409  */
 410 #ifdef ACPI_DEBUGGER
 411 #define ACPI_DEBUGGER_EXEC(a)           a
 412 #else
 413 #define ACPI_DEBUGGER_EXEC(a)
 414 #endif
 415 
 416 
 417 /*
 418  * Macros used for ACPICA utilities only
 419  */
 420 
 421 /* Generate a UUID */
 422 
 423 #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
 424     (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \
 425     (b) & 0xFF, ((b) >> 8) & 0xFF, \
 426     (c) & 0xFF, ((c) >> 8) & 0xFF, \
 427     (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)
 428 
 429 #define ACPI_IS_OCTAL_DIGIT(d)              (((char)(d) >= '0') && ((char)(d) <= '7'))
 430 
 431 
 432 #endif /* ACMACROS_H */