#!/usr/bin/env python3
# Copyright (c) 2010, 2011, 2014 Arista Networks, Inc.  All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.

import array
import errno
import gzip
import os
import re
import socket
import struct
import sys

from collections import namedtuple
from datetime import datetime

try:
   # Hide the import from pkgdeps.  QuickTrace is not
   # allowed to depend on Tac, but this is ok since there
   # is fallback code.
   Tac = __import__( 'Tac' )
   pktTraceFormatter = Tac.Type( 'Arnet::PktTraceFormatter' )
   # If QTPKT_HEX is set, trace as hex even if we could use PktTraceFormatter
   havePktTraceFormatter = ( os.environ.get( 'QTPKT_HEX', None ) != 'True' )
except Exception : # pylint: disable=broad-except
   havePktTraceFormatter = False

MplsStackEntryIndex_max = 12

# This should be updated every time a new file version is added
mostRecentVersionSupported = 5

class QtcatCorruptionException( Exception ):
   pass

def pdb():
   import pdb as pdb_ # pylint: disable=import-outside-toplevel
   import bdb # pylint: disable=import-outside-toplevel
   try:
      # pylint: disable-msg=W0212
      pdb_.Pdb().set_trace( sys._getframe( 1 ) )
   except bdb.BdbQuit:
      # pylint: disable-msg=W0212
      os._exit(1)


def toU8( s1 ):
   # convert s1, a string in host byte order, to an unsigned byte
   return struct.unpack( "B", s1 )[0]
def toU32( s4 ):
   # convert s4, a string in host byte order, to a U32
   return struct.unpack( "I", s4 )[0]
def toU64( s8 ):
   # convert s8, a string in host byte order, to a U64
   return struct.unpack( "Q", s8 )[0]

#----------------------------------------------------------------
#----------------------------------------------------------------
# Deserializers
#----------------------------------------------------------------
#----------------------------------------------------------------
def readChar( data, cur ):
   b = data[ cur ]
   if 8 <= b < 32:
      c = repr( chr( b ) )[1:-1]
   elif b in ( 39, 92 ):
      c = "\\" + chr( b )
   elif 32 <= b < 127:
      c = chr( b )
   else:
      c = '\\x%02x' % b
   return ( c, 1 )
def readU8( data, cur ):
   return (struct.unpack_from( 'B', data, cur )[0], 1 )
def readU16( data, cur ):
   return (struct.unpack_from( 'H', data, cur )[0], 2 )
def readU32( data, cur ):
   return (struct.unpack_from( 'I', data, cur )[0], 4 )
def readU64( data, cur ):
   return ( struct.unpack_from( 'Q', data, cur )[0], 8 )
def readString( data, cur ):
   # pascal-style string: 1-byte len, then len bytes of data
   n = struct.unpack_from( 'B', data, cur )[0]
   return ( data[ cur+1:cur+1+n ].decode( errors='backslashreplace' ), 1 + n )

def readBytes( data, cur ):
   # pascal-style string: 1-byte len, then len bytes of data
   n = struct.unpack_from( 'B', data, cur )[0]
   return ( data[ cur+1:cur+1+n ], 1+n )
def readBool( data, cur ):
   return (struct.unpack_from( '?', data, cur )[0], 1)
def readFloat( data, cur ):
   return ( struct.unpack_from( 'f', data, cur )[ 0 ], 4 )
def readDouble( data, cur ):
   return (struct.unpack_from( 'd', data, cur )[0], 8)
def readIp( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   return ("%s.%s.%s.%s" % (a, b, c, d), 4)
def readEth( data, cur ):
   # The ! indicates network byte order
   return (("%04x.%04x.%04x" % struct.unpack_from("!HHH", data, cur)), 6)
def readIpPrefix( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   plen = struct.unpack_from( 'B', data, cur+4 )[0]
   return ("%s.%s.%s.%s/%s" % (a, b, c, d, plen), 5)
def readIp6Addr( data, cur ):
   return ( socket.inet_ntop( socket.AF_INET6, data[cur:cur+16] ), 16 )
def readIp6Prefix( data, cur ):
   (addr, off) = readIp6Addr( data, cur )
   plen = struct.unpack_from( 'B', data, cur+off )[0]
   return ( "%s/%s" % ( addr, plen ), off+1 )
def readAddrFamily( data, cur ):
   return readU8( data, cur )
def readIpGenAddr( data, cur ):
   (af, off) = readAddrFamily( data, cur )
   (ipv4, offInc) = readIp( data, cur + off )
   off += offInc
   (ipv6, offInc) = readIp6Addr( data, cur + off )
   off += offInc
   ipStr = "unknown"
   if af == 1:
      ipStr = ipv4
   elif af == 2:
      ipStr = ipv6
   return( ipStr, off )
def readIpGenAddrOpt( data, cur ):
   ( af, off ) = readAddrFamily( data, cur )
   if af == 1:
      ( ipStr, offInc ) = readIp( data, cur + off )
      off += offInc
   elif af == 2:
      ( ipStr, offInc ) = readIp6Addr( data, cur + off )
      off += offInc
   else:
      ipStr = "unknown"
   return( ipStr, off )
def readIpGenPrefix( data, cur ):
   '''Arnet::IpGenPrefix storage format:
       0                   1
       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
      +-+---------+-----------------------+-+
      |A|  IPv4   |        <unused>       |L|
      +-+---------+-----------------------+-+

       0                   1
       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
      +-+---------------------------------+-+
      |A|              IPv6               |L|
      +-+---------------------------------+-+
      A: Address Family
      L: Prefix Length
   '''
   (af, off) = readAddrFamily( data, cur )
   ipStr = "unknown address family(%d)" % af
   arg = {
      1 : ( socket.AF_INET, 4 ),
      2 : ( socket.AF_INET6, 16 ),
   }
   if af in arg:
      strVal = data[ cur+off : cur+off+arg[ af ][ 1 ] ]
      ipStr = socket.inet_ntop( arg[ af ][ 0 ], strVal )
   off += 16
   plen = struct.unpack_from( 'B', data, cur + off )[ 0 ]
   off += 1
   return ( "%s/%s" % ( ipStr, plen ), off )
def readIpGenPrefixOpt( data, cur ):
   ( af, off ) = readAddrFamily( data, cur )
   cur += off
   ( plen, off ) = readU8( data, cur )
   cur += off
   ipStr = "unknown address family(%d)" % af
   arg = {
      1 : ( socket.AF_INET, 4 ),
      2 : ( socket.AF_INET6, 16 ),
   }
   if plen == 0xff:
      nullPrefixDict = { 1: '0.0.0.0/255', 2: '::/255' }
      return ( nullPrefixDict.get( af,
                                   '{}/0'.format( ipStr ) ), 2 )
   nbytes = ( plen + 7 ) // 8
   if af in arg:
      assert nbytes <= arg[ af ][ 1 ]
      strVal = data[ cur : cur + nbytes ].ljust( arg[ af ][ 1 ], b'\x00' )
      ipStr = socket.inet_ntop( arg[ af ][ 0 ], strVal )
   else:
      return ( '%s/0' % ipStr, 2 )
   return ( "%s/%s" % ( ipStr, plen ), nbytes + 2 )
def readIpGenAddrWithFullMask( data, cur ):
   ( ipStr, off ) = readIpGenAddr( data, cur )
   ( maskStr, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   return( "%s/%s" % ( ipStr, maskStr ), off )

def readIpGenAddrOptWithFullMask( data, cur ):
   ( ipStr, off ) = readIpGenAddrOpt( data, cur )
   ( maskStr, offInc ) = readIpGenAddrOpt( data, cur + off )
   off += offInc
   return( "%s/%s" % ( ipStr, maskStr ), off )

def _readAfAddr( data, cur ):
   """Internal function to read a sockaddrUnInet structure."""
   length = struct.unpack_from( "B", data, cur )[0]

   # The length of the actual address can be less than 4 bytes for
   # masks. For example: A /24 mask would be stored in three bytes as
   # 0xffffff.
   nBytes = [ 0, 0, 0, 0 ]
   numBytes = length - 4
   cur = cur + 4
   for ix in range(numBytes):
      nBytes[ix] = struct.unpack_from( 'B', data, cur + ix )[0]

   return ( "%d.%d.%d.%d" % tuple(nBytes), length )

def _readAf6Addr( data, cur, length ):
   # advance through sockaddr_un.in6 to gin6_addr field
   cur = cur + 8

   # Similar to _readAfAddr(), the actual number of bytes for the IPv6 address
   # can be less than 16 for masks.  So here we compute the number of address
   # bytes given to us.
   addrLen = length - 8

   # extract the sockaddr_un.in6.gin6_addr field bytes, and left-justify to 16
   # bytes padded with '\0' if necessary, to make inet_ntop() happy.
   gin6_addr = data[ cur:cur+addrLen ]
   gin6_addr = gin6_addr.ljust( 16, b'\0' )

   return ( socket.inet_ntop( socket.AF_INET6, gin6_addr ), length )

def _readAf6LlAddr( data, cur, length ):
   # IPv6 link-local addresses are dumped to quicktrace as sockaddr_un structure
   # representing the address and ifname to which the address is bound. So the
   # total length would be greater than or equal to 24 bytes. Extract the address
   # and ifname and display it in <addr>%<ifname> format.
   addrStr, _ = _readAf6Addr( data, cur, 24 )

   ifnameLen = length - 24

   if ifnameLen:
      cur = cur + 24
      ifname = data[ cur : cur + ifnameLen ].decode()
   else:
      # No ifname. Extract ifindex from the addr and append it.
      ifname = format( struct.unpack( '>i', data[ cur + 10 : cur + 14] )[ 0 ], 'x' )
   addrStr = addrStr + '%' + ifname
   return ( addrStr, length )

def _readIsoAddr( data, cur, length ):
   # Iso addresses here are interpreted in the context of IS-IS
   # Typically
   #    byte0 : length
   #    byte1 : address family
   #    byte2 to length-2 : actual address
   # Typical Iso address [as seen in IS-IS]
   # 2626.2626.2626.a4

   act_length = length - 2
   iso_addr = data[ cur+2:cur+length ]
   isoString = ''
   odd_length = False
   if act_length % 2 != 0:
      odd_length = True
   j = 0
   for i in range( 0, act_length//2 ):
      if i:
         isoString += '.'
      isoString += "%02x%02x" % ( iso_addr[ j ],
                                  iso_addr[ j+1 ] )
      j = j + 2
   if odd_length:
      isoString += "-%02x" % iso_addr[ act_length - 1 ]
   return ( isoString, length )

def _readIsoLspAddr( data, cur, length ):
   # Iso addresses here are interpreted in the context of IS-IS
   # Typically
   #    byte0 : length
   #    byte1 : address family
   #    byte2 : sub-type
   #    byte3 to length-2 : actual address
   # Typical Iso address [as seen in IS-IS]
   # 2626.2626.2626.a400

   act_length = length - 3
   iso_addr = data[ cur+3:cur+length ]
   isoString = ''
   odd_length = False
   if act_length % 2 != 0:
      odd_length = True
   j = 0
   for i in range( 0, act_length//2 ):
      if i:
         isoString += '.'
      isoString += "%02x%02x" % ( iso_addr[ j ],
                                  iso_addr[ j+1 ] )
      j = j + 2
   if odd_length:
      isoString += "-%02x" % iso_addr[ act_length - 1 ]
   return ( isoString, length )

def _readSrTeV4Addr( data, cur, length ):
   #    byte0 : length ( Does not include 2 bytes of padding in the struct )
   #    byte1 : address family
   #    byte2 to byte 3: padding
   #    byte4 to byte length-9 : endpoint addr
   #    byte length-8 to byte length-5 : color
   #    byte length-4 to byte length-1 : distinguisher
   # Sample SR TE Address
   # 1|5|10.20.30.40
   ep = ''
   for ix in range( 4, length - 8 ):
      ep += str( struct.unpack_from( 'B', data, cur + ix )[ 0 ] ) + "."
   ep = ep[ : -1 ]
   color = struct.unpack_from( "!I", data, cur + length - 8 )[ 0 ]
   distinguisher = struct.unpack_from( "!I", data, cur + length - 4 )[ 0 ]
   nlri = '%u|%u|' % ( distinguisher, color )
   nlri += ep
   return ( nlri, length )

def _readSrTeV6Addr( data, cur, length ):
   #    byte0 : length ( Does not include 2 bytes of padding in the struct )
   #    byte1 : address family
   #    byte2 to byte 3: padding
   #    byte4 to byte 4+addr_len-1 : endpoint addr
   #    byte 4+addr_len to byte 8+addr_len-1 : color
   #    byte 8+addr_len to length-1 : distinguisher
   # Sample SR TE Address
   # 1|5|2002::a14:1e28
   addr_len = 16
   gin6_addr = data[ cur + 4 : cur + 4 + addr_len ]
   color = struct.unpack_from( "!I", data, cur + 4 + addr_len )[ 0 ]
   distinguisher = struct.unpack_from( "!I", data, cur + 8 + addr_len )[ 0 ]
   nlri = '%u|%u|' % ( distinguisher, color )
   nlri += socket.inet_ntop( socket.AF_INET6, gin6_addr )
   return ( nlri, length )

def readAfAddr( data, cur ):
   # The sockaddrUnInet structure has a 1 byte len and a 1 byte AF
   # identifer followed by the AF-specific contents.
   (length, family) = struct.unpack_from( "BB", data, cur)

   PF_INET = 2
   PF_INET6 = 10

   # PF_ISO = PF_MAX  - taken from /usr/include/bits/socket.h
   PF_ISO = 41
   PF_ISO_LSP = PF_ISO + 1
   # PF_SR_TE_INET and INET6 need to match AR_SR_TE_INET and INET6 defined in
   # /src/gated/gated-ctk/src/util/sockaddr.h
   PF_SR_TE_INET = 44
   PF_SR_TE_INET6 = 45
   # PF_INET6_LINKLOCAL needs to match AF_INET6_LINKLOCAL defined in
   # gated/gated-ctk/src/util/sockaddr.h
   PF_INET6_LINKLOCAL = 254

   if family == PF_INET :
      return _readAfAddr( data, cur )
   elif family == PF_INET6 :
      return _readAf6Addr( data, cur, length )
   elif family == PF_INET6_LINKLOCAL :
      return _readAf6LlAddr( data, cur, length )
   elif family == PF_ISO :
      return _readIsoAddr( data, cur, length )
   elif family == PF_ISO_LSP :
      return _readIsoLspAddr( data, cur, length )
   elif family == PF_SR_TE_INET :
      return _readSrTeV4Addr( data, cur, length )
   elif family == PF_SR_TE_INET6 :
      return _readSrTeV6Addr( data, cur, length )

   # Punt other address families for now.
   return ("<Address in AF %d>" % family, length)

def readIpAndPort( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   e = readU16( data, cur+4 )[0]
   return ("%s.%s.%s.%s:%s" % (a, b, c, d, e), 6)

def readConnTuple( data, cur ):
   (a, nBytes) = readIpAndPort( data, cur )
   cur += nBytes
   (b, nBytes) = readIpAndPort( data, cur )
   cur += nBytes
   c = struct.unpack_from( 'B', data, cur )[0]
   return ("%s -> %s,%s" % (a, b, c), 13)

def readNatStaticKey( data, cur ):
   ( acl, bytesAcl ) = readString( data, cur )
   ( addr, bytesAddr ) = readIpAndPort( data, cur + bytesAcl )
   return ( "acl: %s, addr: %s" % ( acl, addr ), bytesAcl + bytesAddr )

def readNatDynamicKey( data, cur ):
   ( acl, bytesAcl ) = readString( data, cur )
   ( group, bytesGroup ) = readU16( data, cur + bytesAcl )
   return ( "acl: %s, group: %u" % ( acl, int( group ) ), bytesAcl + bytesGroup )

def readNatDynamicConn( data, cur ):
   ( intf, bytesIntf ) = readString( data, cur )
   ( conn, bytesConn ) = readConnTuple( data, cur + bytesIntf )
   return ( "intf: %s, conn: %s" % ( intf, conn ), bytesIntf + bytesConn )

# BGP/EVPN ethernet segment
def readEthSegment( data, cur ):
   # Already in network byte order
   words = struct.unpack_from( "!HHHHH", data, cur )
   return ( "%04x:%04x:%04x:%04x:%04x" % words, 10 )

# BGP/EVPN label
def readEvpnLabel( data, cur ):
   dword, _ = readU32( data, cur )
   typeNum = dword >> 24
   label = dword & 0xffffff
   labelTypes = {
      0x01: 'VNI',
      0x02: 'MplsLabel' }
   if typeNum == 0x00:
      return ( 'NotPresent', 4 )
   if typeNum == 0x02:
      # MPLS label stored in the top 20 bits of the 24-bit value
      label >>= 4
   return ( "%s:%d" % ( labelTypes.get( typeNum, 'UnsupportedType' ), label ), 4 )

# BGP/EVPN L2Attrs (VPWS) (Routing::Bgp::EvpnPlugin::EvpnL2Attrs)
def _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals ):
   start = cur
   word1, count = readU32( data, cur )
   cur += count
   out = ""
   if word1:
      valid = word1 & (1<<0)
      if valid:
         cFlag = word1 & (1<<1)
         pFlag = word1 & (1<<2)
         bFlag = word1 & (1<<3)
         word1 >>= 8

         fxcMode = word1 & 0xFF
         word1 >>= 8
         if fxcMode:
            fxcModeStr = ";" + fxcModes.get( fxcMode,
                                             f"FxcMode({fxcMode})" )
         else:
            fxcModeStr = ""

         vlanNorm = word1 & 0xFF
         word1 >>= 8
         if vlanNorm:
            vlanNormStr = ";" + vlanNormVals.get( vlanNorm,
                                                  f"VlanNormalization({vlanNorm})" )
         else:
            vlanNormStr = ""

         l2Mtu, count = readU32( data, cur )
         cVal = "C" if cFlag else "c"
         pVal = "P" if pFlag else "p"
         bVal = "B" if bFlag else "b"

         cur += count
         out = ", evpnL2Attrs:{b}{p}{c}{fxcMode}{vlanNorm};mtu={mtu}".format(
               b=bVal, p=pVal, c=cVal, fxcMode=fxcModeStr, vlanNorm=vlanNormStr,
               mtu=l2Mtu )

      # else -> nothing, empty string
   # else -> nothing, empty string
   return out, cur - start

def readEvpnL2Attrs( data, cur ):
   fxcModes = {
      0: "notFxc",
      1: "fxcVlanSignaled",
      2: "fxcDefault",
      3: "fxcUndefined",
   }
   vlanNormVals = {
      0: "notNormalized",
      1: "normalizedSingleVlan",
      2: "normalizedDoubleVlan",
      3: "normalizedUndefined",
   }
   return _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals )

def readEvpnL2AttrsWrongEnums( data, cur ):
   fxcModes = {
      0: "notFxc",
      1: "fxcVlanUnaware",
      2: "fxcVlanAware",
      3: "fxcUndefined",
   }
   vlanNormVals = {
      0: "notNormalized",
      1: "normalizedDoubleVlan",
      2: "normalizedSingleVlan",
      3: "normalizedUndefined",
   }
   return _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals )

# Pim Specific ones for qtcat
def readMRouteKey( data, cur ):
   ''' Method to read the Multicast route key
       Displayed as (S, G ) '''
   prev = cur
   (a, nBytes) = readIpGenAddr( data, cur )
   cur += nBytes
   (b, nBytes) = readIpGenAddr( data, cur )
   cur += nBytes
   return ("(%s, %s)" % (a, b), cur - prev)

def readMRouteKeyOptimized( data, cur ):
   ''' Method to read the Multicast route key
       Displayed as (S, G ) '''
   prev = cur
   ( a, nBytes ) = readIpGenAddrOpt( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpGenAddrOpt( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMfibRouteKey( data, cur ):
   ''' Method to read the Multicast route key
       Displayed as (S, G) '''
   prev = cur
   ( a, nBytes ) = readIpPrefix( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpPrefix( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMfibGenRouteKey( data, cur ):
   ''' Method to read the Multicast IpGenPrefix
       based route key.
       Displayed as (S, G) '''
   prev = cur
   ( a, nBytes ) = readIpGenPrefix( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpGenPrefix( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMfibGenRouteKeyOptimized( data, cur ):
   ''' Method to read the Multicast IpGenPrefix
       based route key.
       Displayed as (S, G) '''
   prev = cur
   ( a, nBytes ) = readIpGenPrefixOpt( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpGenPrefixOpt( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMRouteFlags( data, cur ):
   '''Method to read multicast route flags'''

   prev = cur
   flags = ""
   expandedSg = True

   ( r, nBytes ) = readBool( data, cur )
   if r:
      flags += "R"
   cur += nBytes

   ( w, nBytes ) = readBool( data, cur )
   if w:
      expandedSg = False
      flags += "W"
   cur += nBytes

   ( s, nBytes ) = readBool( data, cur )
   if s:
      expandedSg = False
      flags += "S"
   cur += nBytes

   ( j, nBytes ) = readBool( data, cur )
   if j:
      flags += "J"
   cur += nBytes

   ( t, nBytes ) = readBool( data, cur )
   if t:
      flags += "T"
   cur += nBytes

   ( b, nBytes ) = readBool( data, cur )
   if b:
      flags += "B"
   cur += nBytes

   ( k, nBytes ) = readBool( data, cur )
   if k:
      flags += "K"
   cur += nBytes

   ( z, nBytes ) = readBool( data, cur )
   if z:
      flags += "Z"
   cur += nBytes

   ( n, nBytes ) = readBool( data, cur )
   if n:
      flags += "N"
   cur += nBytes

   ( m, nBytes ) = readBool( data, cur )
   if m:
      flags += "M"
   cur += nBytes

   ( a, nBytes ) = readBool( data, cur )
   if a:
      flags += "A"
   cur += nBytes

   ( c, nBytes ) = readBool( data, cur )
   if c:
      flags += "C"
   cur += nBytes

   ( o, nBytes ) = readBool( data, cur )
   if o:
      flags += "O"
   cur += nBytes

   if expandedSg:
      flags += "E"

   return (flags, cur - prev)

def readMRouteIntfJoinState( data, cur ):
   ''' Method to read Multicast route intf join state '''
   prev = cur
   ( a, nBytes ) = readU32( data, cur )
   cur += nBytes

   joinState = ""

   if a == 0:
      joinState = "noInfo"
   elif a == 1:
      joinState = "prunePending"
   else:
      joinState = "joined"

   return ( joinState, cur - prev )

def readMRouteIntfRptJoinState( data, cur ):
   ''' Method to read Multicast route intf join state '''
   prev = cur
   ( a, nBytes ) = readU32( data, cur )
   cur += nBytes

   joinState = ""

   if a == 0:
      joinState = "rptNoInfo"
   elif a == 1:
      joinState = "rptPruned"
   else:
      joinState = "rptPrunePending"

   return ( joinState, cur - prev )

def mplsLabelDescription( val ):
   if val == 0x100000:
      return 'Null'
   elif val == 3:
      return 'ImplicitNull'
   else:
      return '%d' % val

def readMplsLabel( data, cur ):
   val, off = readU32( data, cur )
   return 'MplsLabel(%s)' % mplsLabelDescription( val ), off

def readMacVtep( data, cur ):
   start = 0
   mac, off = readEth( data, cur + start )
   start += off
   ip, off = readIp( data, cur + start )
   start += off
   move, off = readU32( data, cur + start )
   start += off
   pref, off = readU8( data, cur + start )
   start += off
   return ( "[%s,%s,%d,%d]" % ( mac, ip, move, pref ), start )

def readMacVlanPair( data, cur ):
   start = 0
   mac, off = readEth( data, cur + start )
   start += off
   vlan, off = readU16( data, cur + start )
   start += off
   return ( "[%s,%d]" % ( mac, vlan ), start )

def readMplsLabelOperation( data, cur ):
   '''Arnet::MplsLabelOperation storage format:
   | U8 operation | U8 stackSize | stackSize * U32 labels |
   e.g.
   push 3 labels, 0x1000, 0x2000, 0x3000
   | 0x00 | 0x03 | 0x00001000 | 0x00002000 | 0x00003000 |
   '''
   off = 0
   ( op, tmp ) = readU8( data, cur )
   off += tmp
   ( stackSize, tmp ) = readU8( data, cur + off )
   if stackSize > MplsStackEntryIndex_max + 1:
      # Since the stack size can only be in the above range, we can check for
      # bad data here
      data = data[ cur:cur+40 ].hex()
      print( 'Current QuickTrace data:', data )
      print( 'Bad MPLS label operation stack size:', stackSize )
      print( 'Start of MPLS label operation:', cur )
      print( 'Bad stack size offset:', cur + off )
      raise ValueError( 'Bad MPLS label operation stack size %u' % stackSize )
   off += tmp
   labels = []
   for _ in range( stackSize ):
      ( label, tmp ) = readU32( data, cur + off )
      off += tmp
      labels.append( mplsLabelDescription( label ) )
   opStr = {
      0: 'push',
      1: 'pop',
      2: 'swap',
      0xff: 'unknown',
   }
   return opStr[ op ] + '[' + ', '.join( labels ) + ']', off

def readTunnelKey( data, cur ):
   ( src, off ) = readIp( data, cur )
   ( dst, offInc ) = readIp( data, cur + off )
   off += offInc
   ( vrf, offInc ) = readU32( data, cur + off )
   off += offInc
   ( gen, offInc ) = readU64( data, cur + off )
   off += offInc
   return ( 'src:%s, dst:%s, vrf:%d, genId:%d' % ( src, dst, vrf, gen ), off )

def readTunnelKeyGen( data, cur ):
   ( src, off ) = readIpGenAddr( data, cur )
   ( dst, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   ( vrf, offInc ) = readU32( data, cur + off )
   off += offInc
   ( gen, offInc ) = readU64( data, cur + off )
   off += offInc
   return ( 'src:%s, dst:%s, vrf:%d, genId:%d' % ( src, dst, vrf, gen ), off )

def readTunnelKeyGenOptimized( data, cur ):
   ( src, off ) = readIpGenAddrOpt( data, cur )
   ( dst, offInc ) = readIpGenAddrOpt( data, cur + off )
   off += offInc
   ( vrf, offInc ) = readU32( data, cur + off )
   off += offInc
   ( gen, offInc ) = readU64( data, cur + off )
   off += offInc
   return ( 'src:%s, dst:%s, vrf:%d, genId:%d' % ( src, dst, vrf, gen ), off )

def readPseudowireKey( data, cur ):
   ( pwId, off ) = readU32( data, cur )
   ( pwGenIdStr, offInc ) = readString( data, cur + off )
   off += offInc
   ( neighbor, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   return ( '%u/%s/%s' % ( pwId, pwGenIdStr, neighbor ), off )

def readPseudowireKeyOptimized( data, cur ):
   ( pwId, off ) = readU32( data, cur )
   ( pwGenIdStr, offInc ) = readString( data, cur + off )
   off += offInc
   ( neighbor, offInc ) = readIpGenAddrOpt( data, cur + off )
   off += offInc
   return ( '%u/%s/%s' % ( pwId, pwGenIdStr, neighbor ), off )

def readUnsignedSequence( data, cur ):
   '''Read a flexible sequence of unsigned values
   format:
     count:U8 sep:U8 [ size:U8 | value:size ] ... [ size:U8 | value:size ]
     e.g.
       \x02 \x3a \x01 \x31 \x02 \x12 \x34
     count=2, sep=':', value1=0x31, value2=0x3412
     output: "49:13330"
   '''
   ( count, off ) = readU8( data, cur )
   ( sep, offInc ) = readU8( data, cur + off )
   off += offInc
   if count == 0:
      return ( '', off )
   vals = []
   for _ in range( count ):
      ( sz, offInc ) = readU8( data, cur + off )
      off += offInc
      szMap = {
         1: readU8,
         2: readU16,
         4: readU32,
         8: readU64,
         }
      if sz not in szMap:
         raise ValueError( 'Bad value size in unsigned sequence: %u' % sz )
      ( val, offInc ) = szMap[ sz ]( data, cur + off )
      off += offInc
      vals.append( str( val ) )
   sepChar = chr( sep )
   return ( sepChar.join( vals ), off )

def readSizeSpec( data, cur ):
   '''Reads the SizeSpec ( an array of 10 integers )'''
   ss = array.array('i')
   ss.frombytes( data[ cur:cur+40 ] )
   return ss, 40

def readGlobalFecId( data, cur ):
   ( fecId, off ) = readU64( data, cur )
   ( tableId, offInc ) = readU16( data, cur + off )
   off += offInc
   return ( 'fecId: %u, tableId: %u' % ( fecId, tableId ), off )

afiToStr = { 0 : 'None',
             1 : 'ipv4',
             2 : 'ipv6',
             25 : 'l2vpn',
             16388 : 'linkState',
             16389 : '48bitMac'
}
safiToStr = { 0 : 'None',
              1 : 'ucast',
              2 : 'mcast',
              4 : 'mplsLabels',
              5 : 'mvpn',
              65 : 'vpls',
              70 : 'evpn',
              71 : 'linkState',
              73 : 'srTe',
              79 : 'dps',
              128 : 'mplsVpn',
              132 : 'rtMembership',
              133 : 'flowspec',
              134 : 'flowspecVpn',
              # 251-254 reserved for private use by IANA
              241 : 'reservedPrivate1',
              242 : 'reservedPrivate2',
              243 : 'reservedPrivate3',
              244 : 'reservedPrivate4',
              245 : 'reservedPrivate5',
              246 : 'reservedPrivate6',
              247 : 'reservedPrivate7',
              248 : 'reservedPrivate8',
              249 : 'reservedPrivate9',
              250 : 'reservedPrivate10',
              251 : 'reservedPrivate11',
              252 : 'reservedPrivate12',
              253 : 'reservedPrivate13',
              254 : 'reservedPrivate14',
              # New private values outside the 1 byte space going on the wire
              0x101 : 'privEvpnMacSegment',
              0x102 : 'privEvpnMacAddress',
              0x103 : 'privEvpnMacFloodTarget',
              0x104 : 'privEvpnMacEthSegment',
              0x105 : 'privEvpnMacArp',
              0x106 : 'privEvpnMacMulticast',
              0x107 : 'privEvpnMacJoinSynch',
              0x108 : 'privEvpnMacLeaveSynch',
              0x10a : 'privEvpnMacSpmsiAd',
              0x10c : 'privVplsAd',
              0x10d : 'privVplsVe',
              0x110 : 'privMvpnType1',
              0x111 : 'privMvpnType2',
              0x112 : 'privMvpnType3',
              0x113 : 'privMvpnType4',
              0x114 : 'privMvpnType5',
              0x115 : 'privMvpnType6',
              0x116 : 'privMvpnType7',
}

adjRibinTypeToStr = { 0 : 'None',
              1 : 'peer',
              2 : 'redist',
              3 : 'import',
              4 : 'export',
              5 : 'network',
              6 : 'aggregate',
              7 : 'defaultOrigiante',
              8 : 'rtMembershipImport',
              9 : 'linkStateProducer',
}

nlriTypeToStr = {
   0: 'invalid',
   1: 'v4u',
   2: 'v6u',
   3: 'evpnType1',
   4: 'evpnType2',
   5: 'evpnType3',
   6: 'evpnType4',
   7: 'evpnType5Ipv4',
   8: 'evpnType5Ipv6',
   9: 'macSegment',
   10: 'macAddress',
   11: 'macFloodTarget',
   12: 'macEthSegment',
   13: 'macArp',
   14: 'rtMembership',
   15: 'v4Lu',
   16: 'v6Lu',
   17: 'v4m',
   18: 'mplsVpnv4u',
   19: 'mplsVpnv6u',
   20: 'flowspecv4u',
   21: 'flowspecv6u',
   22: 'v4srTe',
   23: 'v6srTe',
   24: 'v6m',
   25: 'evpnType6',
   26: 'evpnType7',
   27: 'evpnType8',
   28: 'evpnType10',
   29: 'macMulticast',
   30: 'macJoinSynch',
   31: 'macLeaveSynch',
   32: 'macSpmsiAd',
   33: 'lsNode',
   34: 'lsLink',
   35: 'lsV4Prefix',
   36: 'lsV6Prefix',
   37: 'ipv4Dps',
   38: 'ipv4Ipsec',
   39: 'ipv6Dps',
   40: 'ipv6Ipsec',
   41: 'ipv4MvpnType1',
   42: 'ipv4MvpnType2',
   43: 'ipv4MvpnType3',
   44: 'ipv4MvpnType4',
   45: 'ipv4MvpnType5',
   46: 'ipv4MvpnType6',
   47: 'ipv4MvpnType7',
   48: 'ipv4MvpnVpnType1',
   49: 'ipv4MvpnVpnType2',
   50: 'ipv4MvpnVpnType3',
   51: 'ipv4MvpnVpnType4',
   52: 'ipv4MvpnVpnType5',
   53: 'ipv4MvpnVpnType6',
   54: 'ipv4MvpnVpnType7',
   55: 'vplsVpnAd',
   56: 'vplsVpnVe',
   57: 'vplsVpnCe',
   58: 'vplsAd',
   59: 'ipEvpnResolution',
   60: 'ipAdSegment',
   61: 'vplsVe',
   62: 'flowspecVpnv4u',
   63: 'flowspecVpnv6u',
   # Do not modify any of the numeric key values above, the NlriType
   # numeric values must remain fixed so qtcat can decode .qt files
   # from older releases. If you find you need to, you made a mistake
   # when you added your entry to BgpNlriType.tac, any new entry
   # should go immediately before oneBeyondLastValid

   # As you add new values, you need to change the numeric value of
   # oneBeyondLastValid, we have this entry, so we can detect if
   # people add new NlriTypes and forget to update qtcat.
   #
   # You need to add new entries to the nlriTypeToAfiSafiStr function
   # below in addition to this dict.
   64: 'oneBeyondLastValid',
}

def nlriTypeToAfiSafiStr( nlriType ):
   if nlriType not in nlriTypeToStr:
      return ( f'unknownAfi-NlriType{nlriType}',
               f'unknownSafi-NlriType{nlriType}' )
   nlriTypeStr = nlriTypeToStr[ nlriType ]
   if nlriTypeStr == 'v4u':
      return 'ipv4', 'ucast'
   if nlriTypeStr == 'v6u':
      return 'ipv6', 'ucast'
   if nlriTypeStr == 'v4m':
      return 'ipv4', 'mcast'
   if nlriTypeStr == 'v6m':
      return 'ipv6', 'mcast'
   if nlriTypeStr.startswith( 'evpn' ):
      return 'l2vpn', 'evpn'
   if nlriTypeStr == 'macSegment':
      return '48bitMac', 'privEvpnMacSegment'
   if nlriTypeStr == 'macAddress':
      return '48bitMac', 'privEvpnMacAddress'
   if nlriTypeStr == 'macFloodTarget':
      return '48bitMac', 'privEvpnMacFloodTarget'
   if nlriTypeStr == 'macEthSegment':
      return '48bitMac', 'privEvpnMacEthSegment'
   if nlriTypeStr == 'macArp':
      return '48bitMac', 'privEvpnMacArp'
   if nlriTypeStr == 'macMulticast':
      return '48bitMac', 'privEvpnMacMulticast'
   if nlriTypeStr == 'macJoinSynch':
      return '48bitMac', 'privEvpnMacJoinSynch'
   if nlriTypeStr == 'macLeaveSynch':
      return '48bitMac', 'privEvpnMacLeaveSynch'
   if nlriTypeStr == 'macSpmsiAd':
      return '48bitMac', 'privEvpnMacSpmsiAd'
   if nlriTypeStr == 'rtMembership':
      return 'ipv4', 'rtMembership'
   if nlriTypeStr.startswith( 'ls' ):
      return 'linkState', 'linkState'
   if nlriTypeStr == 'flowspecv4u':
      return 'ipv4', 'flowspec'
   if nlriTypeStr == 'flowspecv6u':
      return 'ipv6', 'flowspec'
   if nlriTypeStr == 'v4srTe':
      return 'ipv4', 'srTe'
   if nlriTypeStr == 'v6srTe':
      return 'ipv6', 'srTe'
   if nlriTypeStr == 'v4Lu':
      return 'ipv4', 'mplsLabels'
   if nlriTypeStr == 'v6Lu':
      return 'ipv6', 'mplsLabels'
   if nlriTypeStr == 'mplsVpnv4u':
      return 'ipv4', 'mplsVpn'
   if nlriTypeStr == 'mplsVpnv6u':
      return 'ipv6', 'mplsVpn'
   if nlriTypeStr == 'ipv4Dps':
      return 'ipv4', 'dps'
   if nlriTypeStr == 'ipv6Dps':
      return 'ipv4', 'dps'
   if nlriTypeStr == 'ipv4Ipsec':
      return 'ipv4', 'dps'
   if nlriTypeStr == 'ipv6Ipsec':
      return 'ipv4', 'dps'
   if nlriTypeStr.startswith( 'ipv4MvpnType' ):
      return 'ipv4', 'mvpn'
   if nlriTypeStr == 'ipv4MvpnVpnType1':
      return 'ipv4', 'privMvpnType1'
   if nlriTypeStr == 'ipv4MvpnVpnType2':
      return 'ipv4', 'privMvpnType2'
   if nlriTypeStr == 'ipv4MvpnVpnType3':
      return 'ipv4', 'privMvpnType3'
   if nlriTypeStr == 'ipv4MvpnVpnType4':
      return 'ipv4', 'privMvpnType4'
   if nlriTypeStr == 'ipv4MvpnVpnType5':
      return 'ipv4', 'privMvpnType5'
   if nlriTypeStr == 'ipv4MvpnVpnType6':
      return 'ipv4', 'privMvpnType6'
   if nlriTypeStr == 'ipv4MvpnVpnType7':
      return 'ipv4', 'privMvpnType7'
   if nlriTypeStr == 'testIpv4Evpn':
      return 'ipv4', 'evpn'
   if nlriTypeStr == 'testIpv6Evpn':
      return 'ipv6', 'evpn'
   if nlriTypeStr in [ 'vplsVpnAd', 'vplsVpnVe', 'vplsVpnCe' ]:
      return 'l2vpn', 'vpls'
   if nlriTypeStr == 'vplsAd':
      return 'l2vpn', 'privVplsAd'
   if nlriTypeStr == 'ipEvpnResolution':
      return 'ipv4', 'privEvpnMacArp'
   if nlriTypeStr == 'ipAdSegment':
      return 'ipv4', 'privEvpnMacSegment'
   if nlriTypeStr == 'vplsVe':
      return 'l2vpn', 'privVplsVe'
   if nlriTypeStr == 'flowspecVpnv4u':
      return 'ipv4', 'flowspecVpn'
   if nlriTypeStr == 'flowspecVpnv6u':
      return 'ipv6', 'flowspecVpn'
   if nlriTypeStr == 'oneBeyondLastValid':
      return 'None', 'None'
   # Catch all
   return 'nlriType=' + nlriTypeStr, 'nlriType=' + nlriTypeStr

# Route key formatters for BGP. Each AFI/SAFI can use its own mechanism. The key is
# either the AF or an ( AF, SAF ) pair. The value is a tuple or a dict of tuples -
# if a dict then the parser will examine the first character of the input to
# determine which tuple to use for formatting.
#
# In the tuple, the first string is the output string, format will be called on this
# string with args formed by parsing the rest of the fields - these are identified
# by the subsequent strings in the tuple.
#
# For example, ( '{0}', 'P' ) will readPrefix and return '{0}'.format( prefix ).
# ( '{0}/{1}', 'I', 'E' ) will readIp, then readEth, and return
# '{0}/{1}'.format( ip, eth ).
bgpRouteKeyFormat = {
   'ipv4': ( '{0}', 'P' ),
   'ipv6': ( '{0}', 'P6' ),
   ( 'l2vpn', 'evpn' ): {
      # Type 1 Auto-Discovery
      'A': ( '<EVPN-Type1-Key: rd:{0}, esi:{1}, etid:{2}>', 'RD', 'ES', 'i' ),
      # Type 2 MAC/IP
      'M': ( '<EVPN-Type2-Key: rd:{0}, etid:{1}, etid2:{2}, mac:{3}, ip:{4}>',
             'RD', 'i', 'i', 'E', 'IG' ),
      # Type 2 MAC/IP with optimized IpGenAddr
      'm': ( '<EVPN-Type2-Key: rd:{0}, etid:{1}, etid2:{2}, mac:{3}, ip:{4}>',
             'RD', 'i', 'i', 'E', 'IGO' ),
      # Type 3 IMET
      'I': ( '<EVPN-Type3-Key: rd:{0}, etid:{1}, etid2:{2}, ip:{3}, ip2:{4}>',
             'RD', 'i', 'i', 'IG', 'IG' ),
      # Type 3 IMET with optimized IpGenAddr
      'i': ( '<EVPN-Type3-Key: rd:{0}, etid:{1}, etid2:{2}, ip:{3}, ip2:{4}>',
             'RD', 'i', 'i', 'IGO', 'IGO' ),
      # Type 4 Ethernet Segment
      'E': ( '<EVPN-Type4-Key: rd:{0}, esi:{1}, ip:{2}>', 'RD', 'ES', 'IG' ),
      # Type 4 Ethernet Segment with optimized pGenAddr
      'e': ( '<EVPN-Type4-Key: rd:{0}, esi:{1}, ip:{2}>', 'RD', 'ES', 'IGO' ),
      # Type 5 IpPrefix (IPv4)
      '4': ( '<EVPN-Type5-Key: rd:{0}, etid:{1}, ip:{2}>', 'RD', 'i', 'P' ),
      # Type 5 IpPrefix (IPv6)
      '6': ( '<EVPN-Type5-Key: rd:{0}, etid:{1}, ip:{2}>', 'RD', 'i', 'P6' ),
      # Type 6 SMET
      'S': ( '<EVPN-Type6-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'RD', 'i', 'IG', 'IG', 'IG' ),
      # Type 6 SMET with optimized IpGenAddr
      's': ( '<EVPN-Type6-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'RD', 'i', 'IGO', 'IGO', 'IGO' ),
      # Type 7 JoinSync
      'J': ( '<EVPN-Type7-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'RD', 'i', 'ES', 'IG', 'IG', 'IG' ),
      # Type 7 JoinSync Optimized
      'j': ( '<EVPN-Type7-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'RD', 'i', 'ES', 'IGO', 'IGO', 'IGO' ),
      # Type 8 LeaveSync
      'V': ( '<EVPN-Type8-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'RD', 'i', 'ES', 'IG', 'IG', 'IG' ),
      # Type 8 LeaveSync Optimized
      'v': ( '<EVPN-Type8-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'RD', 'i', 'ES', 'IGO', 'IGO', 'IGO' ),
      # Type 10 SpmsiAD
      'P': ( '<EVPN-Type10-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'RD', 'i', 'IG', 'IG', 'IG' ),
      # Type 10 SpmsiAD Optimized
      'p': ( '<EVPN-Type10-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'RD', 'i', 'IGO', 'IGO', 'IGO' ),
   },
   # reservedPrivate1 is the backwards compatible definition for
   # privEvpnMacSegment
   ( '48bitMac', 'reservedPrivate1' ):   ( 'rd:{0}, esi:{1}, etid:{2}', 'RD', 'ES',
                                           'i' ),
   ( '48bitMac', 'privEvpnMacSegment' ): (
      'rd:{0}, esi:{1}, etid:{2}, remoteDomain:{3}, vpwsLocalRoute:{4}',
      'RD', 'ES', 'i', 'b', 'b' ),
   ( 'ipv4', 'privEvpnMacSegment' ): (
      'rd:{0}, esi:{1}, etid:{2}, remoteDomain:{3}',
      'RD', 'ES', 'i', 'b' ),
   # reservedPrivate2 is the backwards compatible definition for
   # privEvpnMacAddress
   ( '48bitMac', 'reservedPrivate2' ):   ( 'mac:{0}, etid:{1}', 'E', 'i' ),
   ( '48bitMac', 'privEvpnMacAddress' ):
      ( 'mac:{0}, etid:{1}, etid2:{2}', 'E', 'i', 'i' ),
   # reservedPrivate3 is the backwards compatible definition for
   # privEvpnMacFloodTarget
   #
   # NOTE: None of Evpn L2 RouteKeys have 1 byte distiguisher based
   # scheme like l2evpn, evpn above. As a result we have to maintain
   # backwards compatibility for them and log using the legacy
   # IpGenAddr format.
   ( '48bitMac', 'reservedPrivate3' ):       ( 'vtep:{0}, etid:{1}', 'IG', 'i' ),
   ( '48bitMac', 'privEvpnMacFloodTarget' ):
   ( 'ip:{0}, ip2:{1}, etid:{2}, etid2:{3}', 'IG', 'IG', 'i', 'i' ),
   # reservedPrivate4 is the backwards compatible definition for
   # privEvpnMacEthSegment
   ( '48bitMac', 'reservedPrivate4' ):      ( 'rd:{0}, vtep:{1}, esi:{2}', 'RD',
                                              'IG', 'ES' ),
   ( '48bitMac', 'privEvpnMacEthSegment' ):
      ( 'rd:{0}, vtep:{1}, esi:{2}, remoteDomain:{3}', 'RD', 'IG', 'ES', 'b' ),
   # reservedPrivate5 is the backwards compatible definition for
   # privEvpnMacArp
   ( '48bitMac', 'reservedPrivate5' ): ( 'mac:{0}, ip:{1}, etid:{2}',
                                         'E', 'IG', 'i' ),
   ( '48bitMac', 'privEvpnMacArp' ):
      ( 'mac:{0}, ip:{1}, etid:{2}, etid2:{3}',
        'E', 'IG', 'i', 'i' ),
   ( 'ipv4', 'privEvpnMacArp' ):
      ( 'mac:{0}, ip:{1}, etid:{2}, etid2:{3}',
        'E', 'IG', 'i', 'i' ),
   # reservedPrivate6 is the backwards compatible definition for
   # privEvpnMacMulticast
   ( '48bitMac', 'reservedPrivate6' ):
      ( 'etid:{0}, srcAddr:{1}, grpAddr:{2},'\
            ' origAddr:{3}, remoteDomain:{4}', 'i', 'IG', 'IG', 'IG', 'b' ),
   ( '48bitMac', 'privEvpnMacMulticast' ):
      ( 'etid:{0}, srcAddr:{1}, grpAddr:{2},'\
            ' origAddr:{3}, remoteDomain:{4}', 'i', 'IG', 'IG', 'IG', 'b' ),
   # reservedPrivate7 is the backwards compatible definition for
   # privEvpnMacJoinSynch
   ( '48bitMac', 'reservedPrivate7' ):
      ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, origAddr:{4}',
        'ES', 'i', 'IG', 'IG', 'IG' ),
   ( '48bitMac', 'privEvpnMacJoinSynch' ):
      ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, origAddr:{4}',
        'ES', 'i', 'IG', 'IG', 'IG' ),
   # reservedPrivate8 is the backwards compatible definition for
   # privEvpnMacLeaveSynch
   ( '48bitMac', 'reservedPrivate8' ):
      ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, origAddr:{4}',
        'ES', 'i', 'IG', 'IG', 'IG' ),
   ( '48bitMac', 'privEvpnMacLeaveSynch' ):
      ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, origAddr:{4}',
        'ES', 'i', 'IG', 'IG', 'IG' ),
   # reservedPrivate10 is the backwards compatible definition for
   # privEvpnMacSpmsiAd
   ( '48bitMac', 'reservedPrivate10' ):
      ( 'etid:{0}, srcAddr:{1}, grpAddr:{2}, origAddr:{3}, remoteDomain:{4}',
        'i', 'IG', 'IG', 'IG', 'b' ),
   ( '48bitMac', 'privEvpnMacSpmsiAd' ):
      ( 'etid:{0}, srcAddr:{1}, grpAddr:{2}, origAddr:{3}, remoteDomain:{4}',
        'i', 'IG', 'IG', 'IG', 'b' ),
   ( 'ipv4', 'mplsVpn' ): ( 'rd: {0}, ip:{1}', 'RD', 'P' ),
   ( 'ipv6', 'mplsVpn' ): ( 'rd: {0}, ip:{1}', 'RD', 'P6' ),
   ( 'ipv4', 'flowspec' ): ( 'ruleId:{0}', 'q' ),
   ( 'ipv6', 'flowspec' ): ( 'ruleId:{0}', 'q' ),
   ( 'ipv4', 'flowspecVpn' ): ( 'rd:{0}, ruleId:{1}', 'RD', 'q' ),
   ( 'ipv6', 'flowspecVpn' ): ( 'rd:{0}, ruleId:{1}', 'RD', 'q' ),
   ( 'ipv4', 'srTe' ) : ( '{0}|{1}|{2}', 'i', 'i', 'I' ),
   ( 'ipv6', 'srTe' ) : ( '{0}|{1}|{2}', 'i', 'i', 'I6' ),
   ( 'linkState', 'linkState' ) : {
      'N' : ( 'nodeRk: proto:{0}, identifier:{1}, nodeId:{2}', 'u', 'q', 'IGPN' ),
      'L' : ( 'linkRk: proto:{0}, identifier:{1}, lNodeId:{2}, rNodeId:{3}, ' +\
            'v4lAddr:{4}, v6lAddr:{5}', 'u', 'q', 'IGPN', 'IGPN', 'IG', 'IG' ),
      'l' : ( 'linkRk: proto:{0}, identifier:{1}, lNodeId:{2}, rNodeId:{3}, ' + \
              'v4lAddr:{4}, v6lAddr:{5}', 'u', 'q', 'IGPN', 'IGPN', 'IGO', 'IGO' ),
      'P' : ( 'prefixRk: proto:{0}, identifier:{1}, nodeId:{2}, prefix:{3}',
              'u', 'q', 'IGPN', 'PG' ),
      'p' : ( 'prefixRk: proto:{0}, identifier:{1}, nodeId:{2}, prefix:{3}',
              'u', 'q', 'IGPN', 'PGO' ),
      },
   ( 'ipv4', 'rtMembership' ) : ( '{0}', 'RTMRK' ),
   ( 'l2vpn', 'vpls' ): {
      'A': ( 'AD: rd:{0}, peAddr:{1}', 'RD', 'i' ),
      'V': ( 'VE: rd:{0}, veId:{1}, vbo:{2}', 'RD', 's', 's' ),
   },
   ( 'l2vpn', 'privVplsAd' ): ( 'peAddr:{0}', 'i' ),
   ( 'l2vpn', 'privVplsVe' ): ( 'veId:{0}, vbo:{1}', 's', 's' ),
}

# Nexthop formatters are similar to route key formatters, but keyed by the first
# byte, not by AFI/SAFI.
bgpNextHopFormat = {
   # IPv4 address
   0x01: ( '{0}', 'I' ),
   # IPv6 address
   0x02: ( '{0}', 'I6' ),
   # IPv4 address + router MAC
   0x03: ( '<ip:{0}, routerMac:{1}>', 'I', 'E' ),
   # IPv6 address + router MAC
   0x04: ( '<ip:{0}, routerMac:{1}>', 'I6', 'E' ),
   # EvpnADNexthop version 1:
   #   IPv4 tunnel endpoint + label + singleActive flag + esiLabel
   0x05: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}>',
           'I', 'EL', 'b', 'EL' ),
   # IPv4 tunnel endpoint + segment ID + tunnel key + L3 tunnel key
   0x06: ( '<ip:{0}, esi:{1}, label:{2}, label2:{3}>', 'I', 'ES', 'EL', 'EL' ),
   # IPv6 address + mpls label
   0x07: ( '<ipv6: {0}, label:{1}>', 'I6', 'ML' ),
   # Ipv4 address + Mpls label stored as a U32
   0x08: ( '<ip:{0}, label:{1}>', 'I', 'i' ),
   # Ipv6 address + Mpls label stored as a U32
   0x09: ( '<ipv6:{0}, label:{1}>', 'I6', 'i' ),
   # Ipv6 link-local address + interface stored as string
   0x0A: ( '{0}%{1}', 'I6', 'p' ),
   # EvpnMacIpNexthop:
   #   IPv4 address + segment ID + L2 label + L3 label + ndProxy flag
   0x0B: ( '<ip:{0}, esi:{1}, label:{2}, label2:{3}, ndProxyFlag:{4}>',
          'I', 'ES', 'EL', 'EL', 'b' ),
   # EvpnADNexthop version 2 (replaces 0x05 above)
   #   parameter {4} is an optional EvpnL2Attrs
   0x0C: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}{4}>',
           'I', 'EL', 'b', 'EL', 'EL2old' ),
   # IPv4 address + mcastFlags (SMET and JoinSync EVPN routes)
   0x0D: ( '<ip:{0}, mcastFlags:{1}>', 'I', 'u' ),
   # IPv6 address + mcastFlags
   0x0E: ( '<ip:{0}, mcastFlags:{1}>', 'I6', 'u' ),
   # IPv4 address + mcastFlags + maxResponseTime (LeaveSync EVPN routes)
   0x0F: ( '<ip:{0}, mcastFlags:{1}, maxResponseTime:{2}>', 'I', 'u', 'u' ),
   # IPv6 address + mcastFlags + maxResponseTime
   0x10: ( '<ip:{0}, mcastFlags:{1}, maxResponseTime:{2}>', 'I6', 'u', 'u' ),
   # EvpnADNexthop version 3 (replaces 0x0C above)
   #   Using new enum values for fxcMode, vlanNormVals
   0x11: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}{4}>',
           'I', 'EL', 'b', 'EL', 'EL2' ),
   # EvpnImetNexthop version 2 (replaces 0x01 above)
   #   IPv4 address + remoteDomain flag
   0x12: ( '<ip:{0}, remoteDomain:{1}>', 'I', 'b' ),
   # EvpnIpPrefixNexthopSelf
   #   L3 VXLAN label + routerMac + L3 MPLS label
   0x13: ( '<l3Label:{0}, routerMac:{1}, l3Label2:{2}>', 'EL', 'E', 'EL' ),
   # EvpnGatewayIpPrefixNexthop v4 address:
   #   IPv4 address + L3 label + routerMac +
   #   local L3 Label + local routerMac + local L3 Label2
   0x14: ( '<ip:{0}, l3Label:{1}, routerMac:{2}, localL3Label:{3}, ' +\
         'localRouterMac:{4}, localL3Label2:{5}>',
         'I', 'EL', 'E', 'EL', 'E', 'EL' ),
   # EvpnGatewayIpPrefixNexthop v6 address:
   #   IPv6 address + L3 label + routerMac +
   #   local L3 Label + local routerMac + local L3 Label2
   0x15: ( '<ip:{0}, l3Label:{1}, routerMac:{2}, localL3Label:{3}, ' +\
         'localRouterMac:{4}, localL3Label2:{5}>',
         'I6', 'EL', 'E', 'EL', 'E', 'EL' ),
   # RtMembershipGenNexthop and VplsVpnAdNexthop
   0x16: ( '{0}', 'IG' ),
   # EvpnMacIpNexthopSelf:
   #  segment ID + VXLAN L2 label + VXLAN L3 label + MPLS L2 label + MPLS L3 label +
   #  ndProxyFlag
   0x17: ( '<esi:{0}, label:{1}, l3Label:{2}, label2:{3}, l3Label2:{4}, ' +\
         'ndProxyFlag:{5}>', 'ES', 'EL', 'EL', 'EL', 'EL', 'b' ),
   # EvpnImetNexthopSelf:
   #  VXLAN label + MPLS label
   0x18: ( '<label:{0}, label2:{1}>', 'EL', 'EL' ),
   # EvpnIpPrefixNexthopSelf (with SRv6 SID)
   #   L3 VXLAN label + routerMac + L3 MPLS label + L3 SRv6 SID
   0x19: ( '<l3Label:{0}, routerMac:{1}, l3Label2:{2}, l3Sid:{3}>',
           'EL', 'E', 'EL', 'P6' ),
   # Ipv6 address + SRv6 SID
   0x1A: ( '<ipv6:{0}, l3Sid:{1}>', 'I6', 'P6' ),
   # VplsVpnVeNexthop: address + VBS + MPLS label
   0x1B: ( '<ip:{0}, vbs:{1}, labelBase:{2}>', 'IG', 's', 'ML' ),
   # RtMembershipGenNexthop and VplsVpnAdNexthop based on Optimized IpGenAddr
   0x1C: ( '{0}', 'IGO' ),
   # VplsVpnVeNexthop: address + VBS + MPLS label based on Optimized IpGenAddr
   0x1D: ( '<ip:{0}, vbs:{1}, labelBase:{2}>', 'IGO', 's', 'ML' ),
}

def readBgpVrfId( data, cur ):
   curIn = cur
   ( idType, nBytes ) = readU8( data, cur )
   cur += nBytes
   ( vrfId, nBytes ) = readU32( data, cur )
   cur += nBytes
   prefix = { 0: 'L2',
              1: 'L3',
              2: 'SBD',
              3: 'VPLS' }.get( idType, 'unknown' )
   vrfStr = '%s-vrfId:%d' % ( prefix, vrfId )
   return ( vrfStr, cur - curIn )

def readBgpRouteKeyCommon( prev, data, cur, nlriType, afiStr, safiStr ):
   ( vrfStr, nBytes ) = readBgpVrfId( data, cur )
   cur += nBytes

   # Get one of:
   # - bgpRouteKeyFormat[ ( afiStr, safiStr ) ]
   # - bgpRouteKeyFormat[ afiStr ]
   # - ( 'unknown', )
   rkFormat = ( bgpRouteKeyFormat.get( ( afiStr, safiStr ) ) or
                bgpRouteKeyFormat.get( afiStr ) or ( 'unknown', ) )

   # Dict: need to consume one more byte to distinguish.
   if hasattr( rkFormat, 'keys' ):
      ( distinguisher, nBytes ) = readChar( data, cur )
      cur += nBytes
      rkFormat = rkFormat[ distinguisher ]

   fields = []
   for fieldKey in rkFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )

   rkStr = rkFormat[ 0 ].format( *fields )
   return ( ' RouteKey<(%s/%s) (%s; %s nlriType-%d)> ' % (
      afiStr, safiStr, vrfStr, rkStr, nlriType ), cur - prev )

# Here is the format of the qt ring
# octets    |  1     |  1   |   4    |         var         |
# content   |nlriType|l2OrL3|rawVrfId|---afiSafiSpecific---|
def readBgpRouteKeyNew( data, cur ):
   prev = cur
   # Read nlriType
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes

   afiStr, safiStr = nlriTypeToAfiSafiStr( nlriType )
   return readBgpRouteKeyCommon( prev, data, cur, nlriType, afiStr, safiStr )

# Here is the format of the qt ring
# octets    | 2 | 1  |  1     |  1   |   4    |         var         |
# content   |afi|safi|nlriType|l2OrL3|rawVrfId|---afiSafiSpecific---|
def readBgpRouteKeyOld( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes

   afiStr = afiToStr[ afi ] if afi in afiToStr else f'unknownAfi{afi}'
   safiStr = safiToStr[ safi ] if safi in safiToStr else \
             f'unknownSafi{safi}'

   # Read nlriType
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes

   return readBgpRouteKeyCommon( prev, data, cur, nlriType, afiStr, safiStr )

# Here is the format of Bmp RouteKey
# octets    | 2 | 1  |var    |   var   |
# content   |afi|safi|vrfName|---key---|
def readBmpRouteKey( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes

   afiStr = afiToStr[ afi ]
   safiStr = safiToStr[ safi ]

   (vrfStr, nBytes) = readString( data, cur )
   cur += nBytes
   # Get one of:
   # - bgpRouteKeyFormat[ ( afiStr, safiStr ) ]
   # - bgpRouteKeyFormat[ afiStr ]
   # - ( 'unknown', )
   rkFormat = ( bgpRouteKeyFormat.get( ( afiStr, safiStr ) ) or
                bgpRouteKeyFormat.get( afiStr ) or ( 'unknown', ) )

   # Dict: need to consume one more byte to distinguish.
   if hasattr( rkFormat, 'keys' ):
      ( distinguisher, nBytes ) = readChar( data, cur )
      cur += nBytes
      rkFormat = rkFormat[ distinguisher ]

   fields = []
   for fieldKey in rkFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )

   rkStr = rkFormat[ 0 ].format( *fields )
   return ( ' RouteKey<(%s/%s) (%s; %s)> ' % (
      afiStr, safiStr, vrfStr, rkStr ), cur - prev )

def readAfi( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   afiStr = "unknown" if afi not in afiToStr else afiToStr[ afi ]
   return ( "<AFI:%s>" % afiStr, cur - prev )

def readSafiLegacy( data, cur ):
   prev = cur
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes
   safiStr = "unknown" if safi not in safiToStr else safiToStr[ safi ]
   return ( "<SAFI:%s>" % safiStr, cur - prev )

def formatAfiSafi( afi, safi ):
   afiStr = "unknown" if afi not in afiToStr else afiToStr[ afi ]
   safiStr = "unknown" if safi not in safiToStr else safiToStr[ safi ]
   return "<AFI:%s>/<SAFI:%s>" % ( afiStr, safiStr )

def readAfiSafi( data, cur ):
   prev = cur
   ( afiSafiStorage, nBytes ) = readU32( data, cur )
   cur += nBytes
   afi = afiSafiStorage >> 16
   safi = afiSafiStorage & 0xffff
   return ( formatAfiSafi( afi, safi ), cur - prev )

def formatNlriType( nlriType ):
   s = "unknown" if nlriType not in nlriTypeToStr else nlriTypeToStr[ nlriType ]
   return "<NLRI:%s>" % s

def readNlriType( data, cur ):
   prev = cur
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes
   return ( formatNlriType( nlriType ), cur - prev )

def readNlriType2AfiSafi( data, cur ):
   prev = cur
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes
   retStr = formatNlriType( nlriType )
   afiStr, safiStr = nlriTypeToAfiSafiStr( nlriType )
   retStr += "<AFI:%s>/<SAFI:%s>" % ( afiStr, safiStr )
   return ( retStr, cur - prev )

def readAdjRibinType( data, cur ):
   prev = cur
   ( adjRibinType, nBytes ) = readU8( data, cur )
   cur += nBytes
   adjRibinTypeStr = "unknown" if adjRibinType not in adjRibinTypeToStr else \
      adjRibinTypeToStr[ adjRibinType ]
   return ( "<AdjRibin:%s>" % adjRibinTypeStr, cur - prev )

def readIpAddrWithMask( data, cur ):
   ( ipAddr, addrLen ) = readIp( data, cur )
   plen = struct.unpack_from( "B", data, cur + addrLen )[ 0 ]
   return ( "%s/%d" % ( ipAddr, plen ), addrLen + 1 )

def readIp6AddrWithMask( data, cur ):
   ( ip6Addr, addrLen ) = readIp6Addr( data, cur )
   plen = struct.unpack_from( 'B', data, cur + addrLen )[ 0 ]
   return ( "%s/%d" % ( ip6Addr, plen ), addrLen + 1 )

def readBgpGenNextHop( data, cur ):
   prev = cur

   ( vrfStr, nBytes ) = readBgpVrfId( data, cur )
   cur += nBytes

   ( nhType, nBytes ) = readU8( data, cur )
   cur += nBytes

   nhFormat = bgpNextHopFormat.get( nhType, ( 'NULL', ) )

   fields = []
   for fieldKey in nhFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )
   return ( nhFormat[ 0 ].format( *fields ) + " (" + vrfStr + ")", cur - prev )

def readGenRange( data, cur ):
   '''Halo::GenRange
   format:
      genRangeType: U8 [ L4PortRange | IpGenPrefix | IpGenAddrWithFullMask ]
         genRangeType =  {
            0   : 'unknown'
            1   : 'ipGenPrefix'
            2   : 'port'
            255 : 'ipGenAddrWithFullMask'
         }
         if genRangeType is unknown, "(n/a)" is printed
   '''
   # L4PortRange and IpGenPrefix and existing methods for formatting. If GenRange
   # is unknown, simply print (n/a), which is consistent with stringValue
   valueFunc = {
      0 : lambda x, y: ( 'n/a', 0 ),
      1 : readIpGenPrefix,
      2 : readUnsignedSequence,
      255 : readIpGenAddrWithFullMask,
   }
   ( genRangeType, off ) = readU8( data, cur )
   ( valueStr, offInc ) = valueFunc[ genRangeType ]( data, cur + off )
   off += offInc
   return ( valueStr, off )

def readGenRangeOptimized( data, cur ):
   '''Halo::GenRange
   format:
      genRangeType: U8 [ L4PortRange | IpGenPrefix | IpGenAddrWithFullMask ]
         genRangeType =  {
            0   : 'unknown'
            1   : 'ipGenPrefix'
            2   : 'port'
            255 : 'ipGenAddrWithFullMask'
         }
         if genRangeType is unknown, "(n/a)" is printed
   '''
   # L4PortRange and IpGenPrefix and existing methods for formatting. If GenRange
   # is unknown, simply print (n/a), which is consistent with stringValue
   valueFunc = {
      0 : lambda x, y: ( 'n/a', 0 ),
      1 : readIpGenPrefixOpt,
      2 : readUnsignedSequence,
      255 : readIpGenAddrOptWithFullMask,
   }
   ( genRangeType, off ) = readU8( data, cur )
   ( valueStr, offInc ) = valueFunc[ genRangeType ]( data, cur + off )
   off += offInc
   return ( valueStr, off )

def readFapmask( data, cur ):
   ( valueLo, off ) = readU64( data, cur )
   ( valueHi, offInc ) = readU64( data, cur + off )
   off += offInc

   # Extract fapIds from the fapmask
   fapmask = valueLo | ( valueHi << 64 )
   fapIds = [ fapId for fapId in range( 128 ) if ( fapmask >> fapId ) & 1 ]

   # Build a list of all fapIds with ranges (i.e. consecutive fapIds) reduced to a
   # (start, stop) tuple. Non-consecutive fapIds are placed in individual tuples.
   # e.g. [ 0, 2, 3, 4, 6 ] becomes [ ( 0, ), ( 2, 4 ), ( 6, ) ]
   fapIdRanges = []

   while fapIds:
      firstFapIdInRange = fapIds.pop( 0 )
      fapIdRange = ( firstFapIdInRange, )

      prevFapId = firstFapIdInRange
      while fapIds and fapIds[ 0 ] == ( prevFapId + 1 ):
         prevFapId = fapIds.pop( 0 )

      if prevFapId != firstFapIdInRange:
         fapIdRange += ( prevFapId, )

      fapIdRanges.append( fapIdRange )

   # e.g. [ '0', '2-4', '6' ]
   fapIdRangeStrs = [ '-'.join( map( str, fapIdRange ) )
                      for fapIdRange in fapIdRanges ]
   # e.g. '(0,2-4,6)'
   fapmaskStr = '({:s})'.format( ','.join( fapIdRangeStrs ) )

   return ( fapmaskStr, off )

def readEthColonSeparated( data, cur ):
   ethAddr = struct.unpack_from("!BBBBBB", data, cur)
   return ( ':'.join( [ format( b,'02x') for b in ethAddr ] ), 6 )

def readIpGenUcastKeyRef( data, cur ):
   ( vrf, off ) = readU16( data, cur )
   ( ipAddr, offInc ) = readIpGenPrefix( data, cur + off )
   off += offInc
   return ( 'vrf:%d, ipAddr:%s' % ( vrf, ipAddr ), off )
def readIpGenUcastKeyRefOptimized( data, cur ):
   ( vrf, off ) = readU16( data, cur )
   ( ipAddr, offInc ) = readIpGenPrefixOpt( data, cur + off )
   off += offInc
   return ( 'vrf:%d, ipAddr:%s' % ( vrf, ipAddr ), off )

def readBfdPeer( data, cur ):
   ( ip, off ) = readIpGenAddr( data, cur )
   ( vrf, offInc ) = readString( data, cur + off )
   off += offInc
   ( src, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   ( intf, offInc ) = readString( data, cur + off )
   off += offInc
   ( tunnelId, offInc ) = readU64( data, cur + off )
   off += offInc
   ( stype, offInc ) = readU32( data, cur + off )
   off += offInc
   return ( "peer(ip:%s, vrf:%s, srcip:%s, intf:%s, tunnelId:%d, type:%d)" %
            ( ip, vrf, src, intf, tunnelId, stype ), off )

def readBfdPeerOptimized( data, cur ):
   ( ip, off ) = readIpGenAddrOpt( data, cur )
   ( vrf, offInc ) = readString( data, cur + off )
   off += offInc
   ( src, offInc ) = readIpGenAddrOpt( data, cur + off )
   off += offInc
   ( intf, offInc ) = readString( data, cur + off )
   off += offInc
   ( tunnelId, offInc ) = readU64( data, cur + off )
   off += offInc
   ( stype, offInc ) = readU32( data, cur + off )
   off += offInc
   return ( "peer(ip:%s, vrf:%s, srcip:%s, intf:%s, tunnelId:%d, type:%d)" %
            ( ip, vrf, src, intf, tunnelId, stype ), off )

def readDuidMacBindingKey( data, cur ):
   ( duidType, off ) = readU16( data, cur )
   ( duidHwType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( lladdr, offInc ) = readEth( data, cur + off )
   off += offInc
   ( uuid, offInc ) = readString( data, cur + off )
   off += offInc
   ( entno, offInc ) = readU32( data, cur + off )
   off += offInc
   ( entId, offInc ) = readString( data, cur + off )
   off += offInc
   return ( "duid(type: {}, hwtype: {}, lladdr: {}, uuid: {}, entno: {},"
            " entId: {})".format( duidType, duidHwType, lladdr, uuid, entno, entId ),
            off )

def readDsfSystemId( data, cur ):
   prev = cur
   ( sysid, nBytes ) = readU32( data, cur )
   cur += nBytes
   ( role, nBytes ) = readString( data, cur )
   cur += nBytes
   return ( "SystemId(%d, %s)" % ( sysid, role ), cur - prev )

def readEthMask( data, cur ):
   ( eth, off ) = readEth( data, cur )
   ( mask, offInc ) = readEth( data, cur + off )
   off += offInc
   return ( f'{eth}/{mask}', off )

def readIsisSystemId( data, cur ):
   ( a, b, c, d, e, f ) = struct.unpack_from( "BBBBBB", data, cur )
   return ( "%02x%02x.%02x%02x.%02x%02x" % ( a, b, c, d, e, f ), 6 )

def readIgpNodeId( data, cur ):
   '''Read Routing::Bgp::LinkStatePlugin::IgpNodeId. The data should contain
      a byte identifying the type of the igp node id followed by the id value
      contained in an U64'''
   # read the igp node id type
   ( idType, _ ) = readU8( data, cur )
   nodeId = "unknown"
   if idType == 1:
      # isisSystemId type
      # Skip the first 2 bytes of the U64 containing the node id
      ( a, b, c, d, e, f ) = struct.unpack_from( "BBBBBB", data, cur + 3 )
      nodeId = "%02x%02x.%02x%02x.%02x%02x" % ( f, e, d, c, b, a )
   elif idType == 2:
      # isisLanId type
      ( lanId, a, b, c, d, e, f ) = struct.unpack_from( "BBBBBBB", data, cur + 2 )
      nodeId = "%02x%02x.%02x%02x.%02x%02x.%02x" % ( f, e, d, c, b, a, lanId )
   elif idType == 5:
      # BGP router ID
      ( a, b, c, d ) = struct.unpack_from( "BBBB", data, cur + 1 )
      nodeId = "%u.%u.%u.%u" % ( d, c, b, a )
   return ( nodeId, 9 )

# Renderer routine used to read Cspf::Destination and Cspf::NodeId
# from the ring buffer
def readCspfVertex( data, cur ):
   valueFunc = {
      0 : lambda x, y: ( 'n/a', 0 ),
      1 : readIpPrefix,
      2 : readIp6Prefix,
      3 : readIp,
      4 : readIp6Addr,
      5 : readIsisSystemId,
      6 : readIp,
   }
   ( vtxType, off ) = readU8( data, cur )
   ( vtx, offInc ) = valueFunc[ vtxType ]( data, cur + off )
   off += offInc
   return ( "%s" % vtx, off )

def readMplsFwdEqvClass( data, cur ):
   ( fecType, off ) = readU8( data, cur )
   if fecType == 0:
      ( prefix, offInc ) = readIpGenPrefix( data, cur + off )
      off += offInc
      return ( prefix, off )
   elif fecType == 1:
      ( rsvpSessionCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      ( rsvpSpCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'RSVP[Session=%d, SP=%d]' % ( rsvpSessionCliId, rsvpSpCliId ), off )
   elif fecType == 2:
      ( mldpRootIp, offInc ) = readIpGenAddr( data, cur + off )
      off += offInc
      ( mldpOpaqueId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'MLDP[Root=%s, ID=%d]' % ( mldpRootIp, mldpOpaqueId ), off )
   elif fecType == 3:
      ( prefix, offInc ) = readIpGenPrefix( data, cur + off )
      off += offInc
      ( algoId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'PFXALGO[%s, %d]' % ( prefix, algoId ), off )
   else:
      raise ValueError( 'Bad FwdEqvClass fecType' )

def readMplsFwdEqvClassOptimized( data, cur ):
   ( fecType, off ) = readU8( data, cur )
   if fecType == 0:
      ( prefix, offInc ) = readIpGenPrefixOpt( data, cur + off )
      off += offInc
      return ( prefix, off )
   elif fecType == 1:
      ( rsvpSessionCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      ( rsvpSpCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'RSVP[Session=%d, SP=%d]' % ( rsvpSessionCliId, rsvpSpCliId ), off )
   elif fecType == 2:
      ( mldpRootIp, offInc ) = readIpGenAddrOpt( data, cur + off )
      off += offInc
      ( mldpOpaqueId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'MLDP[Root=%s, ID=%d]' % ( mldpRootIp, mldpOpaqueId ), off )
   elif fecType == 3:
      ( prefix, offInc ) = readIpGenPrefixOpt( data, cur + off )
      off += offInc
      ( algoId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'PFXALGO[%s, %d]' % ( prefix, algoId ), off )
   else:
      raise ValueError( 'Bad FwdEqvClass fecType' )

def readIdSet( data, cur ):
   ( count , off ) = readU8( data, cur )
   if count == 0:
      return ( '()', off )
   ids = []
   # Read and extract ids from the given IdSet
   for _ in range( count ):
      ( bucketId, offInc ) = readU8( data, cur + off )
      off += offInc
      ( val, offInc ) = readU64( data, cur + off )
      off += offInc
      idOffset = bucketId * 64
      ids += [ ( _id + idOffset ) for _id in range( 64 ) if ( val >> _id ) & 0x1 ]

   # Build a list of all ids with ranges (i.e. consecutive ids) reduced to a
   # (start, stop) tuple. Non-consecutive ids are placed in individual tuples.
   # e.g. [ 0, 2, 3, 4, 6 ] becomes [ ( 0, ), ( 2, 4 ), ( 6, ) ]
   idRanges = []

   while ids:
      firstIdInRange = ids.pop( 0 )
      idRange = ( firstIdInRange, )

      prevId = firstIdInRange
      while ids and ids[ 0 ] == ( prevId + 1 ):
         prevId = ids.pop( 0 )

      if prevId != firstIdInRange:
         idRange += ( prevId, )

      idRanges.append( idRange )

   # e.g. [ '0', '2-4', '6' ]
   idRangeStrs = [ '-'.join( map( str, idRange ) )
                      for idRange in idRanges ]
   # e.g. '(0,2-4,6)'
   idSetStr = '({:s})'.format( ','.join( idRangeStrs ) )

   return ( idSetStr, off )

bgpPeerKeyFormat = {
   0x01: ( '{0}%{1}', 'IG','p' ),
   0x02: ( '{0}', 'IG' ),
   0x03: ( '{0}', 'p' ),
   0x04: ( '{0} ({1})', 'IG', 'I' ),
   # New formats based on optimized IpGenAddr
   0x05: ( '{0}%{1}', 'IGO', 'p' ),
   0x06: ( '{0}', 'IGO' ),
   0x07: ( '{0} ({1})', 'IGO', 'I' ),
}

def readBgpPeerKey( data, cur ):
   prev = cur
   ( keyType, nBytes ) = readU8( data, cur )
   cur += nBytes
   peerKeyFormat = bgpPeerKeyFormat.get( keyType, ( 'NULL', ) )
   fields = []
   for fieldKey in peerKeyFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )
   return ( peerKeyFormat[ 0 ].format( *fields ), cur-prev )

def readVrfIdPair( data, cur ):
   ( srcVrfId, off ) = readU32( data, cur )
   ( dstVrfId, off2 ) = readU32( data, cur + off )

   return ( '(%d-%d)' % ( srcVrfId, dstVrfId ), off + off2 )

def readVlanIpPair( data, cur ):
   start = 0
   vlan, off = readU16( data, cur + start )
   start += off
   ip, off = readIpGenAddr( data, cur + start )
   start += off
   return ( "[%d,%s]" % ( vlan, ip ), start )

def readVlanIpPairOptimized( data, cur ):
   start = 0
   vlan, off = readU16( data, cur + start )
   start += off
   ip, off = readIpGenAddrOpt( data, cur + start )
   start += off
   return ( "[%d,%s]" % ( vlan, ip ), start )

def readIpMacV2( data, cur ):
   start = 0
   ( ip, off ) = readIpGenAddr( data, cur + start )
   start += off
   ( mac, off ) = readEth( data, cur + start )
   start += off
   ( changeCount, off ) = readU32( data, cur + start )
   start += off
   ( preference, off ) = readU8( data, cur + start )
   start += off
   return ( "[%s,%s,%u,%d]" % ( ip, mac, changeCount, preference ), start )

def readIpMacV2Optimized( data, cur ):
   start = 0
   ( ip, off ) = readIpGenAddrOpt( data, cur + start )
   start += off
   ( mac, off ) = readEth( data, cur + start )
   start += off
   ( changeCount, off ) = readU32( data, cur + start )
   start += off
   ( preference, off ) = readU8( data, cur + start )
   start += off
   return ( "[%s,%s,%u,%d]" % ( ip, mac, changeCount, preference ), start )

def readVniSourcePair( data, cur ):
   start = 0
   vni, off = readU32( data, cur + start )
   start += off
   vti, off = readString( data, cur + start )
   start += off
   src, off = readString( data, cur + start )
   start += off
   return ( "[%d,%s,%s]" % ( vni, vti, src ), start )

def readUpnpPortMapKey( data, cur ):
   start = 0
   ip, off = readIp( data, cur + start )
   start += off
   port, off = readU16( data, cur + start )
   start += off
   proto, off = readU8( data, cur + start )
   start += off
   return ( "(%s:%d:%d)" % ( ip, port, proto ), start )

def readRoutingProtocol( data, cur ):
   v = {
      # keep this in sync with the enum in IpRibLib.tac, adding any
      # new protos at the end
      0 : 'reserved',
      1 : 'connected',
      2 : 'staticConfig',
      3 : 'bgp',
      4 : 'routeInput',
      5 : 'ospf',
      6 : 'ospf3',
      7 : 'isis',
      8 : 'dynamicPolicy',
      9 : 'vrfLeak',
      10 : 'rip',
      11 : 'staticRouteCacheConfig',
      12 : 'gribi',
      13 : 'cbf',
      14 : 'routeInputRouteCacheConfig',
   }

   ( d, off ) = readU8( data, cur )
   if d not in v:
      return ( '%d' % d, off )

   return ( v[ d ], off )

def readRibRouteKey( data, cur ):
   ( vrfId, off ) = readU32( data, cur )
   ( prefix, off2 ) = readIpGenPrefix( data, cur + off )

   return ( '(%s, %s)' % ( vrfId, prefix ), off + off2 )

def readRibRouteKeyOptimized( data, cur ):
   ( vrfId, off ) = readU32( data, cur )
   ( prefix, off2 ) = readIpGenPrefixOpt( data, cur + off )

   return ( '(%s, %s)' % ( vrfId, prefix ), off + off2 )

def readRibViaKey( data, cur ):
   ( viaId, off ) = readU64( data, cur )
   ( vrfId, off2 ) = readU32( data, cur + off )
   ( proto, off3 ) = readRoutingProtocol( data, cur + off + off2 )
   return ( f'(id:{viaId}, vrfId:{vrfId}, proto:{proto})', off + off2 + off3 )

# this one reads the Arnet::AddressFamily enum
def readAf( data, cur ):
   v = {
      0 : 'unknown',
      1 : 'ipv4',
      2 : 'ipv6',
   }

   ( af, off ) = readU8( data, cur )
   if af not in v:
      return ( '%d' % af, off )

   return ( v[ af ], off )

def readAfProto( data, cur ):
   ( af, off ) = readAf( data, cur )
   ( proto, off2 ) = readRoutingProtocol( data, cur + off )

   return( '(%s,%s)' % ( af, proto ), off + off2 )

def readAfProtoVrfId( data, cur ):
   ( af, off ) = readAf( data, cur )
   ( proto, off2 ) = readRoutingProtocol( data, cur + off )
   ( vrfId, off3 ) = readU32( data, cur + off + off2 )

   return( '(%s,%s,%s)' % ( af, proto, vrfId ), off + off2 + off3 )

def readLfibViaKey( data, cur ):
   ( src, off1 ) = readU8( data, cur )
   ( idx, off2 ) = readU32( data, cur + off1 )
   ( viaType, off3 ) = readU8( data, cur + off1 + off2 )

   return( '(%d,%d,%d)' % ( src, idx, viaType ), off1 + off2 + off3 )

def readMplsRouterId( data, cur ):
   ( protocol, _ ) = readU8( data, cur )
   if protocol == 3:
      ( rtrId, off ) = readIsisSystemId( data, cur + 1 )
   else:
      # The bytes are already copied in big-endian format
      # Cant use readIpAndPort as it assumes they are in little endian
      ( a, b, c, d ) = struct.unpack_from( "BBBB", data, cur + 1 )
      # Read port of 2 bytes in big-endian format
      port = struct.unpack_from( "!H", data, cur + 5 )[ 0 ]
      rtrId = "%s.%s.%s.%s:%d" % ( a, b, c, d, port )
      off = 6
   # Add offset of 1 byte for the protocol field
   return ( "%s" % rtrId, off + 1 )

def parseRouteTarget( value ):
   extCommType = value >> 56
   if extCommType == 0:
      # AS(16-bit):nn(32-bit)
      globalAdmin = str( ( value >> 32 ) & 0xFFFF )
      localAdmin = str( value & 0xFFFFFFFF )
   elif extCommType == 1:
      # IP(32-bit):nn(16-bit)
      ip = ( value >> 16 ) & 0xFFFFFFFF
      globalAdmin = '%d.%d.%d.%d' % (
            ip >> 24, ( ip >> 16 ) & 0xFF, ( ip >> 8 ) & 0xFF, ip & 0xFF )
      localAdmin = str( value & 0xFFFF )
   elif extCommType == 2:
      # AS(32-bit):nn(16-bit)
      globalAdminVal = ( value >> 16 ) & 0xFFFFFFFF
      globalAdmin = str( globalAdminVal )
      # for values which could fit into a 2 byte AS, append an 'L' to indicate
      # that this is a 4 byte AS RouteTarget
      if globalAdminVal <= 0xFFFF:
         globalAdmin += 'L'
      localAdmin = str( value & 0xFFFF )
   else:
      # invalid type, just return the data
      globalAdmin = 'INVALID'
      localAdmin = '0x%X' % value
   return '%s:%s' % ( globalAdmin, localAdmin )

def readRouteTarget( data, cur ):
   # Get the value of the Route Target
   ( value, off ) = readU64( data, cur )
   return ( parseRouteTarget( value ), off )

def readRouteDistinguisher( data, cur ):
   ( rdValue, off ) = readU64( data, cur )
   rdType = rdValue >> 48
   if not rdType:
      type0LocallyAssignedNumber = rdValue & 0xffffffff
      type0Administrator = ( rdValue >> 32 ) & 0xffff
      # Mirror guess in Arnet::RouteDistinguisher to_stringb
      if type0LocallyAssignedNumber >= 0x01000000:
         a, b, c, d = struct.unpack(
            '4B', struct.pack( 'I', type0LocallyAssignedNumber ) )
         return ( '%d:%u.%u.%u.%u' % ( type0Administrator, d, c, b, a ), off )
      return ( '%u:%u' % ( type0Administrator, type0LocallyAssignedNumber ), off )
   if rdType == 1:
      type1Administrator = ( rdValue >> 16 ) & 0xffffffff
      type1LocallyAssignedNumber = rdValue & 0xffff
      a, b, c, d = struct.unpack( '4B', struct.pack( 'I', type1Administrator ) )
      return ( '%u.%u.%u.%u:%u' % ( d, c, b, a, type1LocallyAssignedNumber ), off )
   if rdType == 2:
      type2Administrator = ( rdValue >> 16 ) & 0xffffffff
      type2LocallyAssignedNumber = rdValue & 0xffff
      return ( '%u:%u' % ( type2Administrator, type2LocallyAssignedNumber ), off )
   return ( '0x%016x' % ( rdValue ), off )

def readRtMembershipRouteKey( data, cur ):
   ( originAs, off1 ) = readU32( data, cur )
   ( rtValue, off2 ) = readU64( data, cur + off1 )
   ( length, off3 ) = readU8( data, cur + off1 + off2 )
   ret = 'origin: %s;%s/%s; value: 0x%016X' % (
         originAs, parseRouteTarget( rtValue ), length, rtValue )
   return ( ret, off1 + off2 + off3 )

def readUdfDesc( data, cur ):
   ( aclType, off ) = readString( data, cur )
   ( ethType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( base, offInc ) = readString( data, cur + off )
   off += offInc
   ( ipProto, offInc ) = readU8( data, cur + off )
   off += offInc
   ( innerEtherType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( offset, offInc ) = readU8( data, cur + off )
   off += offInc
   ( length, offInc ) = readU8( data, cur + off )
   off += offInc
   ( lagHashingMask, offInc ) = readU32( data, cur + off )
   off += offInc
   ( priority, offInc ) = readU8( data, cur + off )
   off += offInc
   ret = ( '%s ethtype:0x%x %s ipProto:%d innerEthType:0x%x offset:%d length:%d '
         'lagMask:0x%x priority:%d' ) % (
         aclType, ethType, base, ipProto, innerEtherType, offset, length,
         lagHashingMask, priority )
   return ( ret, off )

def readIntfEncap( data, cur ):
   ( intfId, off ) = readString( data, cur )
   ( outerVid, offInc ) = readU16( data, cur + off )
   off += offInc
   ( innerVid, offInc ) = readU16( data, cur + off )
   off += offInc
   ret = '%s(%d,%d)' % ( intfId, outerVid, innerVid )
   return ( ret, off )

def readIntfEncapV2( data, cur ):
   intfId, off = readString( data, cur )
   def _readEncap( off ):
      vid, offInc = readU16( data, cur + off )
      off += offInc
      tpid, offInc = readU16( data, cur + off )
      off += offInc
      priority, offInc = readU8( data, cur + off )
      off += offInc
      if tpid == 0x8100 and priority == 0:
         return str( vid ), off
      else:
         return "0x%x:p%d:%d" % ( tpid, priority, vid ), off
   outer, off = _readEncap( off )
   inner, off = _readEncap( off )
   ret = '%s(%s,%s)' % ( intfId, outer, inner )
   return ret, off

def readIpFlowKey( data, cur ):
   ( vrfId, off ) = readU32( data, cur )
   ( vlanId, offInc ) = readU16( data, cur + off )
   off += offInc
   ( srcAddr, offInc ) = readIp( data, cur + off )
   off += offInc
   ( dstAddr, offInc ) = readIp( data, cur + off )
   off += offInc
   ( ipProtocolNumber, offInc ) = readU8( data, cur + off )
   off += offInc
   ( srcPort, offInc ) = readU16( data, cur + off )
   off += offInc
   ( dstPort, offInc ) = readU16( data, cur + off )
   off += offInc
   ret = ( "vrfId:%d vlanId:%d srcAddr:%s dstAddr:%s ipProtocol:%d srcPort:%d "
           "dstPort:%d" ) % ( vrfId, vlanId, srcAddr, dstAddr, ipProtocolNumber,
           srcPort, dstPort )
   return ( ret, off )

def readHoIpFlowKey( data, cur ):
   ( ipFlowKey, off ) = readIpFlowKey( data, cur )
   ( intf, offInc ) = readString( data, cur + off )
   off += offInc
   ret = "%s intf:%s" % ( ipFlowKey, intf )
   return ( ret, off )

def readMulticastTunnelType( data, cur ):
   v = {
      # Keep this in sync with the enum in McastVpnLib/IpTunnelTypes.tac, adding any
      # new values at the end.
      0: 'tunnelTypePimSm',
      1: 'tunnelTypePimSsm',
      2: 'tunnelTypePimBidir',
   }

   ( tt, off ) = readU8( data, cur )
   if tt not in v:
      return ( '%d' % tt, off )

   return ( v[ tt ], off )

def readOutAcKey( data, cur ):
   ( fapMask, off ) = readFapmask( data, cur )
   ( word0, offInc ) = readU32( data, cur + off )
   off += offInc
   ( word1, offInc ) = readU32( data, cur + off )
   off += offInc
   ( word2, offInc ) = readU32( data, cur + off )
   off += offInc
   ( acIndex, offInc ) = readU32( data, cur + off )
   off += offInc
   ( pool, offInc ) = readU8( data, cur + off )
   off += offInc
   ( appId, offInc ) = readU8( data, cur + off )
   off += offInc
   ( intfId, offInc ) = readString( data, cur + off )
   off += offInc
   ( vlanId, offInc ) = readU16( data, cur + off )
   off += offInc
   ( innerOutlif, offInc ) = readU32( data, cur + off )
   off += offInc
   ( isL2SubIntf, offInc ) = readBool( data, cur + off )
   off += offInc
   return ( "(fapMask: {}, acFormat: {}, {}, {}, acIndex: {}, pool: {}, appId: {}, "
            "intfId: {}, vlanId: {}, innerOutlif: {}, isL2SubIntf: {})".format(
      fapMask, word0, word1, word2, acIndex, pool, appId, intfId, vlanId,
      innerOutlif, isL2SubIntf ), off )

def readOutAcTypeKey( data, cur ):
   ( fapId, off ) = readU16( data, cur )
   ( acIndex, offInc ) = readU32( data, cur + off )
   off += offInc
   ( outAcType, offInc ) = readU8( data, cur + off )
   off += offInc
   return ( "(fapId: {}, acIndex: {}, outAcType: {})".format(
      fapId, acIndex, outAcType ), off )

def readSviEgressEveKey( data, cur ):
   ( vlanIntfId, off ) = readString( data, cur )
   ( intfId, offInc ) = readString( data, cur + off )
   off += offInc
   ( fapId, offInc ) = readU16( data, cur + off )
   off += offInc
   ( lagIntfId, offInc ) = readString( data, cur + off )
   off += offInc
   return ( "(vlanIntfId: {}, intfId: {}, fapId: {}, lagIntfId: {})".format(
      vlanIntfId, intfId, fapId, lagIntfId ), off )

def readSviEgressEsemRequestKey( data, cur ):
   ( eveKey, off ) = readSviEgressEveKey( data, cur )
   ( outAcTypeKey, offInc ) = readOutAcTypeKey( data, cur + off )
   off += offInc
   return ( "(eveKey: {}, outAcTypeKey: {})".format( eveKey, outAcTypeKey ), off )

def printPktUsingPktTraceFormatter( tpid, tci, flags, origLen, pktData ):
   pkt = Tac.newInstance( 'Arnet::Pkt' )
   if flags:
      pkt.tciPresent = True
      pkt.vlanTpid = tpid
      pkt.tciField = tci
   pkt.stringValue = pktData
   return pktTraceFormatter.traceStringVerbose( pkt ).strip()

def readArnetPkt( data, cur ):
   ( tpid, consumed ) = readU16( data, cur )
   off = consumed
   ( tci, consumed ) = readU16( data, cur + off )
   off += consumed
   ( flags, consumed ) = readU8( data, cur + off )
   off += consumed
   ( origLen, consumed ) = readU16( data, cur + off )
   off += consumed
   ( pktData, consumed ) = readBytes( data, cur + off )
   off += consumed
   if havePktTraceFormatter:
      ret = printPktUsingPktTraceFormatter( tpid, tci, flags, origLen, pktData )
   else:
      ret = pktData.hex()
      if flags:
         # See /src/Arnet/Pkt.h for these fields
         prio = ( tci >> 13 ) & 0x07
         cfi = bool( tci & 0x1000 )
         vlanId = tci & 0x0fff
         vlanInfo = "(tpid 0x%x vlan %d" % ( tpid, vlanId )
         if prio != 0:
            vlanInfo += " p %d" % ( prio, )
         if cfi:
            vlanInfo += " CFI"
         vlanInfo += ") "
         ret = vlanInfo + ret
   return ( ret.replace( '\n', '\\' ), off )

def readOpt( reader, data, cur ):
   isSet = struct.unpack_from( 'B', data, cur )[ 0 ]
   if not isSet:
      return ( "OptionalNone", 1 )
   else:
      ( data, cur ) = reader( data, cur + 1 )
      return ( data, cur + 1 )

percentFormatMap = {
   # The sockaddrUnInet is changed to a string.  So %A and %M need to be
   # changed to %s.
   "A": "%s",
   "M": "%s",

   # Memory addresses are treated as numbers.  So %p needs to be changed to
   # a recognized format.
   "p": "%#x",

   # Preserve %%
   "%": "%%",
}
BEGIN = ""
PERCENT = "%"
PERCENT_HASH = "%#"

def replaceFormatSpecifiers( msg ):
   """
   DFA to replace certain format specifiers in the input

   It replaces entries starting with "%" (not preceded by another "%") based
   on percentFormatMap.

   It also replaces, specifically, "%#A" with "%s", to maintain backward
   compatibility.

   Examples:
     %%d (no change)
     %d -> %s
     %p -> %#x
     %%#A (no change)
     %#A -> %s
   """
   out = ""
   state = BEGIN
   for c in msg:
      val = c
      if state is BEGIN:
         if c == "%":
            state = PERCENT
            continue
      elif state is PERCENT:
         if c == "#":
            state = PERCENT_HASH
            continue

         state = BEGIN
         val = percentFormatMap.get( c, "%" + c )
      elif state is PERCENT_HASH:
         state = BEGIN
         if c == "A":
            # "%#A"
            val = "%s"
         else:
            val = "%#" + c
      out += val
   return out


TrailerSize = 256
Msg = namedtuple( 'Msg','tsc filename line msg format' )
class QtReader:
   parameterizedReaders = {
         'OPT': readOpt
   }
   readers = {
      'u': readU8,
      's': readU16,
      'i': readU32,
      'q': readU64,
      'c': readChar,
      'p': readString,
      'b': readBool,
      'f': readFloat,
      'd': readDouble,
      'A': readAfAddr,
      'AF': readAf,
      'AFI': readAfi,
      'AFS': readAfiSafi,
      'AM4': readIpAddrWithMask,
      'AM6': readIp6AddrWithMask,
      'BMPRK': readBmpRouteKey,
      'BNH': readBgpGenNextHop,
      'BP' : readBfdPeer,
      'BPO' : readBfdPeerOptimized,
      'BPK' : readBgpPeerKey,
      'BRK': readBgpRouteKeyOld,
      'BRY': readBgpRouteKeyNew,
      'C': readConnTuple,
      'CV': readCspfVertex,
      'DMBK' : readDuidMacBindingKey,
      'DSFID': readDsfSystemId,
      'E': readEth,
      'EC': readEthColonSeparated,
      'EL': readEvpnLabel,
      'EL2': readEvpnL2Attrs,
      'EL2old': readEvpnL2AttrsWrongEnums,
      'EM': readEthMask,
      'ES': readEthSegment,
      'F': readFapmask,
      'FM' : readMplsFwdEqvClass,
      'FMO' : readMplsFwdEqvClassOptimized,
      'GFID' : readGlobalFecId,
      'GR': readGenRange,
      'GRO': readGenRangeOptimized,
      'HIFK' : readHoIpFlowKey,
      'I': readIp,
      'I6': readIp6Addr,
      'IDS': readIdSet,
      'IE' : readIntfEncap,
      'IE2' : readIntfEncapV2,
      'IFK' : readIpFlowKey,
      'IG': readIpGenAddr,
      'IGO': readIpGenAddrOpt,
      'IGM': readIpGenAddrWithFullMask,
      'IGMO': readIpGenAddrOptWithFullMask,
      'IGPN': readIgpNodeId,
      'IP': readIpAndPort,
      'IPMACV2': readIpMacV2,
      'IPMACV2O': readIpMacV2Optimized,
      'IS': readIsisSystemId,
      'LO': readMplsLabelOperation,
      'LVK': readLfibViaKey,
      'MF': readMRouteFlags,
      'MFGK': readMfibGenRouteKey,
      'MFGKO': readMfibGenRouteKeyOptimized,
      'MFK': readMfibRouteKey,
      'MIJ': readMRouteIntfJoinState,
      'MIR': readMRouteIntfRptJoinState,
      'MK': readMRouteKey,
      'MKO': readMRouteKeyOptimized,
      'ML': readMplsLabel,
      'MRTR': readMplsRouterId,
      'MTT' : readMulticastTunnelType,
      'MV': readMacVtep,
      'MVP': readMacVlanPair,
      'NLRI': readNlriType,
      'NLRI2AFISAFI': readNlriType2AfiSafi,
      'NDC': readNatDynamicConn,
      'NDK': readNatDynamicKey,
      'NSK': readNatStaticKey,
      'P': readIpPrefix,
      'P6': readIp6Prefix,
      'PG': readIpGenPrefix,
      'PGO': readIpGenPrefixOpt,
      'PGKR' : readIpGenUcastKeyRef,
      'PGKRO' : readIpGenUcastKeyRefOptimized,
      'PKT' : readArnetPkt,
      'PW': readPseudowireKey,
      'PWO': readPseudowireKeyOptimized,
      'RD': readRouteDistinguisher,
      'RIBINT': readAdjRibinType,
      'RRRK': readRibRouteKey,
      'RRRKO': readRibRouteKeyOptimized,
      'RRRP': readRoutingProtocol,
      'RT': readRouteTarget,
      'RTMRK' : readRtMembershipRouteKey,
      'SAFI': readSafiLegacy, # Not used any more, see readAfiSafi
      'SVIESEM' : readSviEgressEsemRequestKey,
      'SVIEVE' : readSviEgressEveKey,
      'OUTAC' : readOutAcKey,
      'OUTACTYPE' : readOutAcTypeKey,
      'TK': readTunnelKey,
      'TKG': readTunnelKeyGen,
      'TKGO': readTunnelKeyGenOptimized,
      'UDF' : readUdfDesc,
      'US': readUnsignedSequence,
      'VIDP': readVrfIdPair,
      'VIP': readVlanIpPair,
      'VIPO': readVlanIpPairOptimized,
      'VLAFP': readAfProto,
      'VLAFPV': readAfProtoVrfId,
      'VSP': readVniSourcePair,
      'PMK' : readUpnpPortMapKey,
      'RVK' : readRibViaKey,
   }

   # Check that non-parameterized format strings do not overlap with the
   # parameterized ones.
   for innerFmt in readers:
      for paramFmt in parameterizedReaders:
         assert not innerFmt.startswith( paramFmt )

   def __init__( self, fp ):
      self.msgIdExceededTotalCounters = None
      if fp.name.endswith( '.gz' ):
         with gzip.open( fp.name ) as f:
            self.data = f.read()
      else:
         self.data = fp.read()

      self.filename = os.path.basename(fp.name)
      fp.close()
      # self.sz is the total size allocated to the circular buffer
      self.version = readU32( self.data, 0 )[0]

      self.hasSelf = self.version >= 4
      self.msgCounterSize = 16 # count(4) + tscHi(4) + tscCount(8)
      if self.hasSelf:
         # count(4) + tscHi(4) + tscCount(8) + tscSelfCount(8)
         self.msgCounterSize = 24

      self.sz = readU32( self.data, 4 )[0]
      self.fileHeaderSize = readU32( self.data, 8 )[0]
      self.fileTrailerSize = readU32( self.data, 12 )[0]
      self.firstMsgOffset = readU32( self.data, 16 )[0]
      self.logCount = readU32( self.data, 20 )[0]
      self.tsc0 = readU64( self.data, 24 )[0]
      self.monotime0 = readDouble( self.data, 32 )[0]
      self.tsc1 = readU64( self.data, 40 )[0]
      self.monotime1 = readDouble( self.data, 48 )[0]
      self.utc1 = readDouble( self.data, 56 )[0]
      if self.version == 1:
         self.logSize = readU32( self.data, 64 )[0]
      elif self.version >= 2:
         self.logSizes = readSizeSpec( self.data, 64 )[0]
      self.ticksPerSecond = self._ticksPerSecond()
      self.numMsgCounters = \
         ( self.fileHeaderSize - self.firstMsgOffset ) // self.msgCounterSize

      # Directory used to hold message descriptors indexed by MsgId
      self.msgs = {}
      # If this string is non-empty we have encountered corruption.
      # Raise an exception with this message after emitting all the traces.
      self.corruptionMessage = ""
      self.caughtExceptions = ""
      # Warn the user if they are trying to parse a file format not supported
      # by this version of qtcat
      if self.version > mostRecentVersionSupported:
         warning = ( "The file you are trying to read is version {version}."
                     " This version of qtcat only supports up to file version"
                     " {supported}, so the output may be incorrect. Please use"
                     " a newer version of qtcat to ensure correct output.\n\n" )
         sys.stderr.write( warning.format( version=self.version,
                                           supported=mostRecentVersionSupported ) )

   def printHeader( self ):
      print( "Version:", self.version )
      print( "Size:", self.sz )
      print( "Header Size:", self.fileHeaderSize )
      print( "Trailer Size:", self.fileTrailerSize )
      print( "Fist Message Offset:", self.firstMsgOffset )
      print( "Profiling counters:", self.numMsgCounters )
      print( "Log Count:", self.logCount )
      print( "tsc0:", self.tsc0 )
      print( "monotime0:", self.monotime0 )
      print( "tsc1:", self.tsc1 )
      print( "monotime1:", self.monotime1 )
      print( "utc1: ", self.utc1 )
      if self.version == 1:
         print( "Log Size:", self.logSize )
      elif self.version >= 2:
         print( "Log Sizes:", self.logSizes )
      else:
         print( "Log Size: (unhandled header version: %u)" % self.version )

   def getTimestampSamples( self ):
      return ( self.tsc0, self.tsc1, self.monotime0, self.monotime1, self.utc1 )

   def setTimestampSamples( self, tsc0, tsc1, monotime0, monotime1, utc1 ):
      self.tsc0 = tsc0
      self.tsc1 = tsc1
      self.monotime0 = monotime0
      self.monotime1 = monotime1
      self.utc1 = utc1
      # Re-calculate ticksPerSecond with revised values
      self.ticksPerSecond = self._ticksPerSecond()

   def dumpProfiling( self, verbose, withTsc, brief, parsable, json, selfProfiling ):
      # must match definition in qtparse
      def addJsonProfilingInfo(qtfile, filename, line, msg, count, avg, total):
         #pylint: disable-msg=E0602
         global JSON
         JSON += [ {
            "qt": qtfile,
            "file": filename,
            "line": line,
            "msg": msg,
            "count": count,
            "average": (avg[:-1].strip() if avg != '-' else 'None'),
            "total": (total[:-1].strip() if total != '-' else 'None')
         } ]
      def int2hr(num, verbose=False):
         if verbose:
            return (num, "")
         elif num < 10000:
            return (num, "x")
         elif num < 10000000:
            return (num // 1000, "Kx")
         elif num < 10000000000:
            return (num // 1000000, "Mx")
         elif num < 10000000000000:
            return (num // 1000000000, "Gx")
         elif num < 10000000000000000:
            return (num // 1000000000000, "Tx")
         else:
            return (num // 1000000000000000, "Px")
      data = self.data
      cur = self.firstMsgOffset
      i = 0
      tps = self.ticksPerSecond
      msgCount = (self.fileHeaderSize - self.firstMsgOffset) // self.msgCounterSize

      tscFmt = "%21s " if withTsc else "%s"
      selfProfilingFmt = ( tscFmt + "%14s %14s " ) if selfProfiling else "%s%s%s"
      if parsable:
         headerFmt = "%9s %10s %26s " + tscFmt + "%14s %12s " + selfProfilingFmt + \
                     "%s:%s \"%s\""
         fmt = "%4s%s%4s %10s %10s %15s " + tscFmt + "%14s %12s " + \
               selfProfilingFmt + "%s:%s \"%s\""
      else:
         headerFmt = "%9s %8s %26s " + tscFmt + "%14s %13s " + \
                     selfProfilingFmt + "%s:%s \"%s\""
         fmt = "%4s%s%4s %8s %10s %15s " + tscFmt + "%14s %13s " + \
               selfProfilingFmt + "%s:%s \"%s\""

      avgSelfTime = ""
      totalSelfTime = ""
      tscSelf = ""
      if selfProfiling:
         avgSelfTime = "avgSelfTime"
         totalSelfTime = "totalSelfTime"
         tscSelf = "tscSelf" if withTsc else ""

      if not json:
         print( headerFmt %
                ( "msgCount", "count", "time", "tsc" if withTsc else "",
                  "avgTime", "totalTime", tscSelf, avgSelfTime, totalSelfTime,
                  "filename", "line", "msg" ) )

      while cur <= self.fileHeaderSize - self.msgCounterSize:
         count = readU32( data, cur )[ 0 ]
         tscHiEn = readU32( data, cur + 4 )[ 0 ]
         off = bool( tscHiEn & 0x80000000 )
         tscHi = tscHiEn & 0x7fffffff
         dateStr, timeStr = self.ticksToTimeStr( tscHi << 28 )
         tscCount = readU64( data, cur + 8 )[ 0 ]
         tscSelfCount = readU64( data, cur + 16 )[ 0 ] if self.hasSelf else 0
         multiple = bool(self.msg(i+1*msgCount))
         cur += self.msgCounterSize
         if count and ( i > 0 or multiple ):
            avgTicks = tscCount/float(count)
            avgTimeStr = ("%10.9f" % (avgTicks/tps)) if tscCount else "-"
            totalTimeStr = ("%12.6f" % (tscCount/tps)) if tscCount else "-"
            if tscCount and not parsable:
               avgTimeStr += "s"
               totalTimeStr += "s"
            magt = int2hr( tscCount )

            if tscSelfCount and selfProfiling:
               avgSelfTicks = tscSelfCount / float( count )
               avgSelfTimeStr = ( "%10.9f" % ( avgSelfTicks / tps ) )
               totalSelfTimeStr = ( "%12.6f" % ( tscSelfCount / tps ) )
               if not parsable:
                  avgSelfTimeStr += "s"
                  totalSelfTimeStr += "s"
               magtSelf = int2hr( tscSelfCount )
            elif selfProfiling:
               avgSelfTimeStr = "-"
               totalSelfTimeStr = "-"
            else:
               avgSelfTimeStr = ""
               totalSelfTimeStr = ""

            tscString = ""
            tscSelfString = ""
            if withTsc:
               tscString = ( "%s%s/%1.2fc" %
                             ( magt[0], magt[1],
                               avgTicks ) ).strip() if tscCount else "-"
               if selfProfiling and tscSelfCount:
                  tscSelfString = ( "%s%s/%1.2fc" %
                                    ( magtSelf[ 0 ], magtSelf[ 1 ],
                                      avgSelfTicks ) ).strip()
               elif selfProfiling:
                  tscSelfString = "-"

            if parsable:
               countStr = ("%10u" % count)
            else:
               countStr = ("%s%s" % int2hr( count ))
            n = 0
            if i == 0 :
               n = 1
               multiple = bool(self.msg(i+2*msgCount))
            while True:
               m = self.msg( i + n * msgCount)
               n += 1
               if m:
                  if brief and not tscCount:
                     continue
                  if json:
                     addJsonProfilingInfo(
                        self.filename,
                        m.filename,
                        m.line,
                        m.msg,
                        count,
                        avgTimeStr,
                        totalTimeStr
                     )
                  else:
                     print( fmt %
                      (i+(n-1)*msgCount,
                       "*" if multiple else " ",
                       "off" if off else "",
                       countStr,
                       "%s"%(dateStr),
                       "%s"%(timeStr),
                       tscString,
                       avgTimeStr,
                       totalTimeStr,
                       tscSelfString,
                       avgSelfTimeStr,
                       totalSelfTimeStr,
                       m.filename,
                       m.line,
                       m.msg) )
               elif n:
                  break
         i += 1

      if not json and not selfProfiling:
         print( "\nUse the '-s' option to include self-profiling columns in "
                "the profiling information" )

   def msg( self, i ):
      assert i > 0
      return self.msgs.get( i, None )

   def readMessages( self ):
      cur = self.sz + self.fileTrailerSize
      msgCount = 0
      tscFileLineRe = b'(\\d+)\\s(\\S+)\\s(\\d+)\\s'
      if self.version >= 3:
         # Version 3 file format includes the MsgId in the descriptor
         tscFileLineRe += b'(\\d+)\\s'
      while True:
         # See AID3904 for differences between file versions 2 and 3
         d = self.data[cur:cur+256]
         m = re.match( tscFileLineRe, d )
         if not m:
            break
         msgCount += 1
         cur += len( m.group(0) )
         tsc = int( m.group(1) )
         filename = m.group( 2 ).decode()
         line = int( m.group(3) )
         if self.version >= 3:
            msgId = int( m.group(4) )
         else:
            msgId = msgCount
         if msgId > self.numMsgCounters:
            self.msgIdExceededTotalCounters = msgId
         msgLen = toU32( self.data[cur:cur+4] )
         cur += 4
         msg = self.data[cur:cur+msgLen-1].decode() # minus one to skip null
         cur += msgLen
         formatLen = toU32( self.data[cur:cur+4] )
         # formatLen minus one to skip null
         fmt = self.data[cur+4 : cur+4+formatLen-1].decode()
         cur += formatLen+4
         self.msgs[ msgId ] = Msg( tsc, filename, line, msg, fmt )

   @classmethod
   def reader( cls, key ):
      for ( k, parameterizedReader ) in cls.parameterizedReaders.items():
         if key.startswith( k ):
            r = cls.readers.get( key[ len( k ) : ] )
            if r:
               return ( lambda data, cur: parameterizedReader( r, data, cur ) )
            else:
               return r

      r = cls.readers.get( key )
      return r

   def _msgs( self, offset, startOfTrailer, level ):
      """Read messages from offset up to the trailer at startOfTrailer. The last
      message might extend beyond startOfTrailer (i.e. ending in the trailer)"""
      msgs = []
      data = self.data
      cur = offset
      try:
         while cur < startOfTrailer:
            m = None
            tsc = toU64( data[ cur : cur + 8] )
            if tsc == 0:
               break
            msgid = toU32( data[cur+8:cur+12] )
            cur += 12
            m = self.msg( msgid )
            # msgid can be 0 sometimes. xxx
            fields = []
            if m.format:
               fieldTypes = m.format.split(",")
               for fieldKey in fieldTypes:
                  r = self.reader( fieldKey )
                  if not r:
                     print( "Failed to find a reader for key", fieldKey )
                     # sys.exit( 1 )
                     fields.append( fieldKey )
                     continue
                  (strep, nBytes) = r( data, cur )
                  cur += nBytes
                  fields.append( strep )
            cur += 1               # skip backpointer
            msgs.append( (tsc, m, fields) )
      except Exception as e: # pylint: disable=broad-except
         # This appends a warning message to the trace output and returns the
         # messages which could be extracted.
         # qtcat will go on to process the rest of the log levels
         # We will raise an exception after processing the entire qt file with the
         # location of the QTRACE statement traced before the corruption

         failedTsc = 0
         prevTrace = ""

         if msgs :
            # can't trust the current tsc - use the one from the previous trace
            failedTsc = msgs[ -1 ][ 0 ]
            prevTrace = "Level:{} {}:{}, ".format( level,
                                                   msgs[ -1 ][ 1 ].filename,
                                                   msgs[ -1 ][ 1 ].line )
         else:
            prevTrace = f" Level:{level} No previous trace, "

         currentTrace = ""
         if m:
            # We were able to parse the msg object before the exception
            # Add it to the exception message
            currentTrace = "Trace that was being processed during corruption: "
            currentTrace += f"Level:{level} {m.filename}:{m.line}"

         warningMsg = 30 * "#" + " Warning: Qt file corruption detected in " + \
                      "level " + str( level ) + " " + 30 * "#"
         m = Msg( failedTsc, "", 0, warningMsg, "" )
         msgs.append( ( failedTsc, m, [] ) )

         warningMsg = "#### There may be lost traces between here and" + \
                      " wrap point, or end of trace level - whichever comes" + \
                      " first ( timestamp is approximated )"
         m = Msg( failedTsc, "", 0, warningMsg, "" )
         msgs.append( ( failedTsc, m, [] ) )

         if self.corruptionMessage == "":
            self.corruptionMessage = "Last traces before corruption: "
         self.corruptionMessage += prevTrace
         if currentTrace:
            self.corruptionMessage += "\n" + currentTrace
         self.caughtExceptions += "\n" + type( e ).__name__ + ": " + str( e )

      return (msgs, cur)

   def _findOldest( self, splitPoint, tailPtr ):
      assert splitPoint > 0
      data = self.data
      cur = tailPtr
      if tailPtr > 0:
         # if tailPtr is 0, then we never wrapped, so skip all this
         while True:
            prevlen = toU8( data[cur-1:cur] ) # use a slice to get bytes in py3
            nxt = cur - 1 - prevlen
            if nxt <= splitPoint + 8:
               break
            cur = nxt
      return cur

   def logStart( self, i ):
      assert i < self.logCount
      if self.version == 1:
         return self.fileHeaderSize + i * self.logSize
      elif self.version >= 2:
         return self.fileHeaderSize + sum(self.logSizes[ 0 : i ])*1024
      else:
         assert False, 'Assert false for unchecked condition at logStart'
         return None

   def logEnd( self, i ):
      assert i < self.logCount
      if self.version == 1:
         return self.fileHeaderSize + (i+1) * self.logSize
      elif self.version >= 2:
         return self.fileHeaderSize + sum(self.logSizes[ 0 : i + 1 ])*1024
      else:
         assert False, 'Assert false for unchecked condition at logEnd'
         return None

   def _ticksPerSecond( self ):
      # Figure out how fast the TSC is going in the tracefile by
      # comparing the snapshots of tsc and clock_monotonic taken each
      # time the log wraps.
      t0 = self.monotime0
      tsc0 = self.tsc0
      t1 = self.monotime1
      tsc1 = self.tsc1

      # Now figure out how fast time was going in the tracefile
      tscDelta = tsc1 - tsc0
      tDelta = t1 - t0
      return float(tscDelta) / tDelta

   def ticksToTimeStr( self, ticks ):
      # If no timestamp was recorded, do not display an invalid timestamp.
      if not ticks:
         return "-", "-"
      tps = self.ticksPerSecond
      tsc1 = self.tsc1
      utc1 = self.utc1
      secondsAgo = ( tsc1 - ticks ) / tps
      timeUtc = utc1 - secondsAgo
      dateAndTime = datetime.fromtimestamp( timeUtc ).\
            strftime( '%Y-%m-%d %H:%M:%S.%f' ).split()
      return dateAndTime[ 0 ], dateAndTime[ 1 ]

   def messages( self, level=0, qtFileName=None, wallClock=False ):
      """ Return a generator of tuples corresponding to traces, as follows:
      (localtime of trace, level, ns accuracy timer, tsc, msg)
      """
      # Message header is:
      #   tail pointer U32
      #   old monotonic time double
      #   old rdtsc U64(8)
      #   new monotonic time double
      data = self.data
      logStart = self.logStart( level )
      if logStart > len( data ):
         pdb()

      tailPtr = toU32( data[ logStart : logStart + 4] )

      # Now figure out how fast the TSC is going in the tracefile by
      # comparing the snapshots of tsc and clock_monotonic taken each
      # time the log wraps.
      t0 = self.monotime0
      tsc0 = self.tsc0
      t1 = self.monotime1
      tsc1 = self.tsc1
      utc1 = self.utc1

      # xxx I need to handle the no-wrap case.

      # Now figure out how fast time was going in the tracefile
      tscDelta = tsc1 - tsc0
      tDelta = t1 - t0
      if tDelta == 0:
         print( "nothing at level", level )
         return
      ticksPerSecond = tscDelta / tDelta
      utc0 = utc1 - tDelta

      # Now we need to read the newest half of the log first, and
      # figure out where it ends
      end2 = self.logEnd( level ) - TrailerSize # start of trailer
      ( m1, end1 ) = self._msgs( logStart + 4, end2, level ) # skip tailPtr

      if (end1 < end2) and tailPtr:
         # Now walk backwards from the end of the log until we hit
         # where we just stopped
         oldest = self._findOldest( end1, tailPtr + logStart )

         # this is the oldest part of the log.  Read messages from here
         ( m2, _ ) = self._msgs( oldest, end2, level )
      else:
         m2 = []

      # Now print out all the log messages, oldest-to-newest.  The log
      # stores the time in clock_monotonic, but print it in localtime
      # as well.
      firstWrap = False
      firstMessage = True
      dashes = "--------------------------------"
      if not qtFileName:
         qtFileName = ''
      else:
         qtFileName = os.path.basename( qtFileName )
      for m in (m2, m1):
         for (tsc, msg, fields) in m:
            if wallClock and msg.line != 0:
               # Message with wall-clock timestamp are using 0 as line number.
               # Skip printing other messages without wall-clock timestamp.
               continue
            fields = tuple(fields)
            t = tsc / ticksPerSecond
            timeSinceWrap = (tsc - tsc0) / ticksPerSecond
            when = utc0 + timeSinceWrap
            #deltaTicks = tsc - lastTsc
            #lastTsc = tsc
            #timeStr = time.strftime( "%F %T", time.localtime(when) )
            #print "%2s %s (%10.9f, %s): \"%s\"" % \
            #    (timeStr,level,t,("+%s" % deltaTicks) if lastTsc else "-",mstr)
            if firstMessage or firstWrap:
               if firstWrap:
                  mstr = "%s %d wrapped here %s" % (dashes, level, dashes)
               else:
                  mstr = "%s level %d first msg %s" % (dashes, level, dashes)
               yield (datetime.fromtimestamp(when), level, t, tsc, qtFileName, mstr,
                      msg )
               firstMessage = False
               firstWrap = False

            msg_tmp = replaceFormatSpecifiers(msg.msg)

            try:
               if msg.line == 0:
                  # If the line number is 0, the first two fields in RingBuf data
                  # contains wall-clock timestamp (tv_sec & tv_usec).
                  # If wallClock option is enabled, use those fields to replace
                  # message timestamp. Otherwise ignore those fields.
                  if wallClock:
                     when = fields[ 0 ] + 0.000001 * fields[ 1 ]
                  fields = fields[ 2: ]
               mstr = msg_tmp % fields
            except ( ValueError, TypeError ):
               mstr = "%s %% (%s) " % (msg_tmp, ",".join( [str(i) for i in fields] ))
            yield (datetime.fromtimestamp(when), level, t, tsc, qtFileName, mstr,
                   msg)
         if m2:
            firstWrap = True


def info(type_, value, tb):
   import traceback # pylint: disable=import-outside-toplevel
   traceback.print_exception(type_, value, tb)
   # Uncomment below lines to enter pdb on error
   #import pdb as pdb_
   #pdb_.pm()

def expandRange( levelStr ):
   """Expand a range string of single-digit numbers, with optional
   dashes, to a list of integers in the same order they were
   presented.  So '92-583' expands to [9,2,3,4,5,8,3].  Duplicates are
   not removed.  This is used to parse the 'level' argument."""
   levels = []
   start = None
   dash = False
   for i in levelStr:
      if i in "0123456789":
         level = int( i )
         if dash:
            for j in range(start, level + 1):
               levels.append( j )
            start = None
         else:
            if start is not None:
               levels.append( start )
            start = level
         dash = False
      elif i == "-":
         dash = True
      # Otherwise, we simply ignore the character

   if start is not None:
      levels.append( start )
   return levels

#test = [ "0-9", "02468", "0-59", "", "9", "3-0", "9 8 7 0" ]
#for i in test:
#   print i, expandRange(i)
#sys.exit(0)

def printMsgs(smsgs, options):
   regexp = options.regexp
   if regexp:
      regexp = re.compile( regexp )
   lastTsc = 0
   t0 = 0
   # tdisplay field is overloaded and contains one of the values from below
   #    - tsc (when '--tsc' option is used)
   #    - qt file name (when 'merge' option is used with multiple source files)
   #    - absolute time (obsolete)
   #    - delta time since first message
   if options.tsc:
      tdisplayFmt = '0x%016x'
   elif options.merge:
      tdisplayFmt = '%s'
   else:
      tdisplayFmt = '%015.9f'
   if options.files:
      # localtime, level, tdisplay, deltaTicks, filename, line, mstr
      fmt = "%s %s " + tdisplayFmt + ", +%s %s:%s \"%s\""
   else:
      # localtime, level, tdisplay, deltaTicks, mstr
      fmt = "%s %s " + tdisplayFmt + ", +%s \"%s\""
   for (localtime, level, t, tsc, qtFileName, mstr, msg) in smsgs:
      if regexp and not regexp.search( mstr )  \
                 and not regexp.search( msg.filename+":"+str(msg.line)):
         continue
      if not t0:
         t0 = t
      if options.tsc:
         tdisplay = tsc
      elif options.merge:
         tdisplay = qtFileName
      elif options.absolute:
         tdisplay = t
      else:
         tdisplay = t - t0
      timeStr = localtime.strftime( '%Y-%m-%d %H:%M:%S.%f' )
      deltaTicks = tsc - lastTsc
      lastTsc = tsc

      args = ((timeStr, level, tdisplay, deltaTicks,
                   msg.filename, msg.line, mstr) if options.files
                  else (timeStr, level, tdisplay, deltaTicks, mstr))
      print( fmt % args )

def gencat( sources ):
   for s in sources:
      yield from s

def maybeRaiseQtcatCorruptionException(files):
   for qtFileName in files:
      qtr = QtReaders[ qtFileName ]
      # QtcatCorruptionException is raised only for the first corrupted file.
      if qtr.corruptionMessage != "":
         print( "\nException indicated corruption in file ", qtFileName )
         raise QtcatCorruptionException( qtr.corruptionMessage + \
                                   "\nExceptions which indicated corruption: " + \
                                   qtr.caughtExceptions )

def getFileMsgs(qtFileName, options):
   sys.excepthook = info
   qtr = QtReaders[ qtFileName ]
   qtr.readMessages()
   levels = expandRange( options.levels )
   msgs = gencat( qtr.messages( i, qtFileName, options.wallClock ) for i in levels )
   return msgs

def printFileMsgs(qtFileName, infile, options):
   what = options.what
   sys.excepthook = info
   if infile:
      qtr = QtReader( infile )
   else:
      qtr = QtReaders[ qtFileName ]
   qtr.readMessages()
   if qtr.msgIdExceededTotalCounters:
      print( "\n\nWARNING: Total msgId exceeded the available counters : %d\n\n"
             % qtr.msgIdExceededTotalCounters )
   if options.header:
      qtr.printHeader()
      sys.exit(0)
   levels = expandRange( options.levels )

   if not what or 'trace' in what:
      msgs = gencat(
             qtr.messages( i, qtFileName, options.wallClock ) for i in levels )
      smsgs = sorted( msgs, key=lambda x: x[3] )
      printMsgs( smsgs, options )

   if what and 'profile' in what:
      qtr.dumpProfiling( options.verbose, options.tsc, options.brief,
                         options.parsable, options.json, options.selfProfiling )

   if qtr.corruptionMessage != "":
      raise QtcatCorruptionException( qtr.corruptionMessage + \
                                      "\nExceptions which indicated corruption: " + \
                                      qtr.caughtExceptions )

JSON = []

# Provides mapping from qtFileName to QtReader.
QtReaders = {}

def getQtReaders( files ):
   for qtFileName in files:
      # pylint: disable-next=consider-using-with
      infile = open( qtFileName, mode='rb' )
      # QtReader loads the full file in memory and the 'infile' handle is closed
      qtr = QtReader( infile )
      QtReaders[ qtFileName ] = qtr

def selectTimestampSamples():
   # Initialize variables before selecting the min / max values from .qt files
   minTsc0 = minMt0 = 0
   maxTsc1 = maxMt1 = maxUtc1 = 0
   for qtr in QtReaders.values():
      tsc0, tsc1, mt0, mt1, utc1 = qtr.getTimestampSamples()
      if tsc0 < minTsc0 or minTsc0 == 0:
         minTsc0 = tsc0
         minMt0 = mt0
      if tsc1 > maxTsc1:
         maxTsc1 = tsc1
         maxMt1 = mt1
         maxUtc1 = utc1
   for qtr in QtReaders.values():
      qtr.setTimestampSamples( minTsc0, maxTsc1, minMt0, maxMt1, maxUtc1 )

def mergeFiles(options, files):
   allMsgs = []
   selectTimestampSamples()
   for qtFileName in files:
      msgs = getFileMsgs( qtFileName, options )
      allMsgs += msgs
   smsgs = sorted( allMsgs, key=lambda x: x[3] )
   printMsgs( smsgs, options )
   # After merging messages from all the qt files, check if any of them are
   # corrupted
   maybeRaiseQtcatCorruptionException( files )

def printFiles(options, args):
   if not args or args[0] == '-':
      printFileMsgs( None, getattr( sys.stdin, 'buffer', sys.stdin ), options )
      return
   files = args
   files.sort(reverse=True)
   getQtReaders( files )
   merge = options.merge and ( not options.what or 'trace' in options.what )
   if merge:
      mergeFiles( options, files )
      return
   numFiles = len(files)
   for qtFileName in files:
      if (numFiles > 1) and not options.json:
         print( "file: ", qtFileName )
      printFileMsgs( qtFileName, None, options )
   if options.json:
      import json # pylint: disable=import-outside-toplevel
      print( json.dumps(JSON, indent=2) )

def main():
   import argparse # pylint: disable=import-outside-toplevel

   argp = argparse.ArgumentParser( description="decode a quicktrace file" )
   argp.add_argument('-a', '--absolute', action='store_true',
                     help="(Obsolete) Print absolute MONOTONIC time", default=False)
   argp.add_argument('--brief',
                     dest='brief', action='store_true',
                     help='print profiling information for function names only')
   argp.add_argument('-f', '--files', action='store_true',
                     help='print file and line number for trace statements')
   argp.add_argument('-H', '--header', action='store_true',
                     help="print qt header information and quit", default=False)
   argp.add_argument('-j', '--json', action='store_true', dest='json',
                     help='json format (profiling data only)')
   argp.add_argument('-l', '--levels', action='store', type=str,
                     help='list of levels', default="0-9")
   argp.add_argument('-m', '--merge', action='store_true',
                     help='merge output of all the files provided')
   argp.add_argument('-p', '--profile',
                     dest='what', action='append_const', const='profile',
                     help='dump profiling information')
   argp.add_argument('--parsable',
                     dest='parsable', action='store_true',
                     help='print profiling information in parsable format')
   argp.add_argument('-r', '--regexp', action='store', type=str,
                     help='regexp to match on' )
   argp.add_argument('-s', '--selfProfiling', action='store_true',
                     help="include self-profiling columns in profiling information",
                     default=False)
   argp.add_argument('-t', '--trace',
                     dest='what', action='append_const', const='trace',
                     help='dump trace log')
   argp.add_argument('--tsc',
                     dest='tsc', action='store_true',
                     help='print timestamp counter values')
   argp.add_argument('-v', '--verbose', action='store_true',
                     help='print full packet counts')
   argp.add_argument('-w', '--wallClock', action='store_true',
                  help='print only those messages which have wall-clock timestamp' )
   argp.add_argument( 'tracefiles', default='-', nargs='*', metavar='tracefile' )
   args = argp.parse_args()

   # -j options forces the profiling output
   if args.json:
      args.what = 'profile'

   if args.absolute:
      print( "The timestamp value printed using '-a' or '--absolute' option is "
             "not accurate enough for comparing across different qt files. "
             "Instead of '-a', use '--tsc' option." )
      return

   printFiles(args, args.tracefiles)

if __name__ ==  "__main__":
   try:
      main()
   except ( OSError ) as error:
      if error.errno == errno.EPIPE:
         # Catching IOError here makes 'qtcat ... | head' not generate an
         # ugly backtrace.
         # pylint: disable-msg=W0212
         os._exit(0)
      raise
   except KeyboardInterrupt:
      # pylint: disable-msg=W0212
      os._exit(0)
