#!/usr/bin/env python
# Copyright (c) 2010, 2011, 2014 Arista Networks, Inc.  All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
from __future__ import absolute_import, division, print_function
import struct, sys, re, os, socket, array, gzip
import errno
from collections import namedtuple
from datetime import datetime

MplsStackEntryIndex_max = 6

# This should be updated every time a new file version is added
mostRecentVersionSupported = 4

class QtcatCorruptionException( Exception ):
   def __init__( self, message ):
      super( QtcatCorruptionException, self ).__init__( message )

def pdb():
   import pdb as pdb_
   import bdb
   try:
      # pylint: disable-msg=W0212
      pdb_.Pdb().set_trace( sys._getframe( 1 ) )
   except bdb.BdbQuit:
      # pylint: disable-msg=W0212
      os._exit(1)


def toU8( s1 ):
   # convert s1, a string in host byte order, to an unsigned byte
   return struct.unpack( "B", s1 )[0]
def toU32( s4 ):
   # convert s4, a string in host byte order, to a U32
   return struct.unpack( "I", s4 )[0]
def toU64( s8 ):
   # convert s8, a string in host byte order, to a U64
   return struct.unpack( "Q", s8 )[0]

#----------------------------------------------------------------
#----------------------------------------------------------------
# Deserializers
#----------------------------------------------------------------
#----------------------------------------------------------------
def readChar( data, cur ):
   return ( data[cur].encode( 'string_escape' ), 1 )
def readU8( data, cur ):
   return (struct.unpack_from( 'B', data, cur )[0], 1 )
def readU16( data, cur ):
   return (struct.unpack_from( 'H', data, cur )[0], 2 )
def readU32( data, cur ):
   return (struct.unpack_from( 'I', data, cur )[0], 4 )
def readU64( data, cur ):
   return ( struct.unpack_from( 'Q', data, cur )[0], 8 )
def readString( data, cur ):
   # pascal-style string: 1-byte len, then len bytes of data
   n = struct.unpack_from( 'B', data, cur )[0]
   return ( data[cur+1:cur+1+n], 1+n )
def readBool( data, cur ):
   return (struct.unpack_from( '?', data, cur )[0], 1)
def readFloat( data, cur ):
   return ( struct.unpack_from( 'f', data, cur )[ 0 ], 4 )
def readDouble( data, cur ):
   return (struct.unpack_from( 'd', data, cur )[0], 8)
def readIp( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   return ("%s.%s.%s.%s" % (a, b, c, d), 4)
def readEth( data, cur ):
   # The ! indicates network byte order
   return (("%04x.%04x.%04x" % struct.unpack_from("!HHH", data, cur)), 6)
def readIpPrefix( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   plen = struct.unpack_from( 'B', data, cur+4 )[0]
   return ("%s.%s.%s.%s/%s" % (a, b, c, d, plen), 5)
def readIp6Addr( data, cur ):
   return ( socket.inet_ntop( socket.AF_INET6, data[cur:cur+16] ), 16 )
def readIp6Prefix( data, cur ):
   (addr, off) = readIp6Addr( data, cur )
   plen = struct.unpack_from( 'B', data, cur+off )[0]
   return ( "%s/%s" % ( addr, plen ), off+1 )
def readAddrFamily( data, cur ):
   (afStr, off) = readChar( data, cur )
   af = int( afStr.replace( "\\", "0" ), 16 )
   return( af, off )
def readIpGenAddr( data, cur ):
   (af, off) = readAddrFamily( data, cur )
   (ipv4, offInc) = readIp( data, cur + off )
   off += offInc
   (ipv6, offInc) = readIp6Addr( data, cur + off )
   off += offInc
   ipStr = "unknown"
   if af == 1:
      ipStr = ipv4
   elif af == 2:
      ipStr = ipv6
   return( ipStr, off )
def readIpGenPrefix( data, cur ):
   '''Arnet::IpGenPrefix storage format:
       0                   1
       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
      +-+---------+-----------------------+-+
      |A|  IPv4   |        <unused>       |L|
      +-+---------+-----------------------+-+
      
       0                   1
       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
      +-+---------------------------------+-+
      |A|              IPv6               |L|
      +-+---------------------------------+-+
      A: Address Family
      L: Prefix Length
   '''
   (af, off) = readAddrFamily( data, cur )
   ipStr = "unknownAddrFamily(%d)" % af
   arg = {
      1 : ( socket.AF_INET, 4 ),
      2 : ( socket.AF_INET6, 16 ),
   }
   if af in arg:
      strVal = data[ cur+off : cur+off+arg[ af ][ 1 ] ]
      ipStr = socket.inet_ntop( arg[ af ][ 0 ], strVal )
   off += 16
   plen = struct.unpack_from( 'B', data, cur + off )[ 0 ]
   off += 1
   return ( "%s/%s" % ( ipStr, plen ), off )

def readIpGenAddrWithFullMask( data, cur ):
   ( ipStr, off ) = readIpGenAddr( data, cur )
   ( maskStr, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   return( "%s/%s" % ( ipStr, maskStr ), off )

def _readAfAddr( data, cur ):
   """Internal function to read a sockaddrUnInet structure."""
   length = struct.unpack_from( "B", data, cur )[0]
   
   # The length of the actual address can be less than 4 bytes for
   # masks. For example: A /24 mask would be stored in three bytes as
   # 0xffffff.
   nBytes = [ 0, 0, 0, 0 ]
   numBytes = length - 4
   cur = cur + 4
   for ix in range(numBytes):
      nBytes[ix] = struct.unpack_from( 'B', data, cur + ix )[0]
      
   return ( "%d.%d.%d.%d" % tuple(nBytes), length )

def _readAf6Addr( data, cur, length ):
   # advance through sockaddr_un.in6 to gin6_addr field
   cur = cur + 8

   # Similar to _readAfAddr(), the actual number of bytes for the IPv6 address
   # can be less than 16 for masks.  So here we compute the number of address
   # bytes given to us.
   addrLen = length - 8

   # extract the sockaddr_un.in6.gin6_addr field bytes, and left-justify to 16
   # bytes padded with '\0' if necessary, to make inet_ntop() happy.
   gin6_addr = data[ cur:cur+addrLen ]
   gin6_addr = gin6_addr.ljust( 16, '\0' )

   return ( socket.inet_ntop( socket.AF_INET6, gin6_addr ), length )

def _readAf6LlAddr( data, cur, length ):
   # IPv6 link-local addresses are dumped to quicktrace as sockaddr_un structure
   # representing the address and ifname to which the address is bound. So the
   # total length would be greater than or equal to 24 bytes. Extract the address
   # and ifname and display it in <addr>%<ifname> format.
   addrStr, _ = _readAf6Addr( data, cur, 24 )
   
   ifnameLen = length - 24  

   if ifnameLen:
      cur = cur + 24
      ifname = data[ cur : cur + ifnameLen ]
   else:
      # No ifname. Extract ifindex from the addr and append it.
      ifname = format( struct.unpack( '>i', data[ cur + 10 : cur + 14] )[ 0 ], 'x' )
   addrStr = addrStr + '%' + ifname
   return ( addrStr, length )

def _readIsoAddr( data, cur, length ):
   # Iso addresses here are interpreted in the context of IS-IS
   # Typically 
   #    byte0 : length
   #    byte1 : address family
   #    byte2 to length-2 : actual address
   # Typical Iso address [as seen in IS-IS]
   # 2626.2626.2626.a4

   act_length = length - 2
   iso_addr = data[ cur+2:cur+length ]
   isoString = ''
   odd_length = False
   if act_length % 2 != 0:
      odd_length = True
   j = 0
   for i in range( 0, act_length//2 ):
      if i:
         isoString += '.'
      isoString += "%02x%02x" % ( ord( iso_addr[ j ] ), ord( iso_addr[ j+1 ] ) )
      j = j + 2
   if odd_length:
      isoString += "-%02x" % ord( iso_addr[ act_length - 1 ] )
   return ( isoString, length )

def _readIsoLspAddr( data, cur, length ):
   # Iso addresses here are interpreted in the context of IS-IS
   # Typically 
   #    byte0 : length
   #    byte1 : address family
   #    byte2 : sub-type
   #    byte3 to length-2 : actual address
   # Typical Iso address [as seen in IS-IS]
   # 2626.2626.2626.a400

   act_length = length - 3
   iso_addr = data[ cur+3:cur+length ]
   isoString = ''
   odd_length = False
   if act_length % 2 != 0:
      odd_length = True
   j = 0
   for i in range( 0, act_length//2 ):
      if i:
         isoString += '.'
      isoString += "%02x%02x" % ( ord( iso_addr[ j ] ), ord( iso_addr[ j+1 ] ) )
      j = j + 2
   if odd_length:
      isoString += "-%02x" % ord( iso_addr[ act_length - 1 ] )
   return ( isoString, length )

def _readSrTeV4Addr( data, cur, length ):
   #    byte0 : length ( Does not include 2 bytes of padding in the struct )
   #    byte1 : address family
   #    byte2 to byte 3: padding
   #    byte4 to byte length-9 : endpoint addr
   #    byte length-8 to byte length-5 : color
   #    byte length-4 to byte length-1 : distinguisher
   # Sample SR TE Address
   # 1|5|10.20.30.40
   ep = ''
   for ix in range( 4, length - 8 ):
      ep += str( struct.unpack_from( 'B', data, cur + ix )[ 0 ] ) + "."
   ep = ep[ : -1 ]
   color = struct.unpack_from( "!I", data, cur + length - 8 )[ 0 ]
   distinguisher = struct.unpack_from( "!I", data, cur + length - 4 )[ 0 ]
   nlri = '%u|%u|' % ( distinguisher, color )
   nlri += ep
   return ( nlri, length )

def _readSrTeV6Addr( data, cur, length ):
   #    byte0 : length ( Does not include 2 bytes of padding in the struct )
   #    byte1 : address family
   #    byte2 to byte 3: padding
   #    byte4 to byte 4+addr_len-1 : endpoint addr
   #    byte 4+addr_len to byte 8+addr_len-1 : color
   #    byte 8+addr_len to length-1 : distinguisher
   # Sample SR TE Address
   # 1|5|2002::a14:1e28
   addr_len = 16
   gin6_addr = data[ cur + 4 : cur + 4 + addr_len ]
   color = struct.unpack_from( "!I", data, cur + 4 + addr_len )[ 0 ]
   distinguisher = struct.unpack_from( "!I", data, cur + 8 + addr_len )[ 0 ]
   nlri = '%u|%u|' % ( distinguisher, color )
   nlri += socket.inet_ntop( socket.AF_INET6, gin6_addr )
   return ( nlri, length )

def readAfAddr( data, cur ):
   # The sockaddrUnInet structure has a 1 byte len and a 1 byte AF
   # identifer followed by the AF-specific contents.
   (length, family) = struct.unpack_from( "BB", data, cur)

   PF_INET = 2
   PF_INET6 = 10

   # PF_ISO = PF_MAX  - taken from /usr/include/bits/socket.h
   PF_ISO = 41
   PF_ISO_LSP = PF_ISO + 1
   # PF_SR_TE_INET and INET6 need to match AR_SR_TE_INET and INET6 defined in
   # /src/gated/gated-ctk/src/util/sockaddr.h
   PF_SR_TE_INET = 44
   PF_SR_TE_INET6 = 45
   # PF_INET6_LINKLOCAL needs to match AF_INET6_LINKLOCAL defined in
   # gated/gated-ctk/src/util/sockaddr.h
   PF_INET6_LINKLOCAL = 254 

   if ( family == PF_INET ):
      return _readAfAddr( data, cur )
   elif ( family == PF_INET6 ):
      return _readAf6Addr( data, cur, length )
   elif ( family == PF_INET6_LINKLOCAL ):
      return _readAf6LlAddr( data, cur, length )
   elif ( family == PF_ISO ):
      return _readIsoAddr( data, cur, length )
   elif ( family == PF_ISO_LSP ):
      return _readIsoLspAddr( data, cur, length )
   elif ( family == PF_SR_TE_INET ):
      return _readSrTeV4Addr( data, cur, length )
   elif ( family == PF_SR_TE_INET6 ):
      return _readSrTeV6Addr( data, cur, length )

   # Punt other address families for now.
   return ("<Address in AF %d>" % family, length)

def readIpAndPort( data, cur ):
   (d, c, b, a) = struct.unpack_from( "BBBB", data, cur )
   e = readU16( data, cur+4 )[0]
   return ("%s.%s.%s.%s:%s" % (a, b, c, d, e), 6)

def readConnTuple( data, cur ):
   (a, nBytes) = readIpAndPort( data, cur )
   cur += nBytes
   (b, nBytes) = readIpAndPort( data, cur )
   cur += nBytes
   c = struct.unpack_from( 'B', data, cur )[0]
   return ("%s -> %s,%s" % (a, b, c), 13)

# BGP/EVPN ethernet segment
def readEthSegment( data, cur ):
   # Already in network byte order
   words = struct.unpack_from( "!HHHHH", data, cur )
   return ( "%04x:%04x:%04x:%04x:%04x" % words, 10 )

# BGP/EVPN label
def readEvpnLabel( data, cur ):
   dword, _ = readU32( data, cur )
   typeNum = dword >> 24
   label = dword & 0xffffff
   labelTypes = {
      0x01: 'VNI',
      0x02: 'MplsLabel' }
   if typeNum == 0x00:
      return ( 'NotPresent', 4 )
   if typeNum == 0x02:
      # MPLS label stored in the top 20 bits of the 24-bit value
      label >>= 4
   return ( "%s:%d" % ( labelTypes.get( typeNum, 'UnsupportedType' ), label ), 4 )

# BGP/EVPN L2Attrs (VPWS) (Routing::Bgp::EvpnPlugin::EvpnL2Attrs)
def _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals ):
   start = cur
   word1, count = readU32( data, cur )
   cur += count
   out = ""
   if word1:
      valid = word1 & (1<<0)
      if valid:
         cFlag = word1 & (1<<1)
         pFlag = word1 & (1<<2)
         bFlag = word1 & (1<<3)
         word1 >>= 8

         fxcMode = word1 & 0xFF
         word1 >>= 8
         if fxcMode:
            fxcModeStr = ";" + fxcModes.get( fxcMode,
                  "FxcMode({})".format( fxcMode ) )
         else:
            fxcModeStr = ""

         vlanNorm = word1 & 0xFF
         word1 >>= 8
         if vlanNorm:
            vlanNormStr = ";" + vlanNormVals.get( vlanNorm,
                  "VlanNormalization({})".format( vlanNorm ) )
         else:
            vlanNormStr = ""

         l2Mtu, count = readU32( data, cur )
         cVal = "C" if cFlag else "c"
         pVal = "P" if pFlag else "p"
         bVal = "B" if bFlag else "b"

         cur += count
         out = ", evpnL2Attrs:{b}{p}{c}{fxcMode}{vlanNorm};mtu={mtu}".format(
               b=bVal, p=pVal, c=cVal, fxcMode=fxcModeStr, vlanNorm=vlanNormStr,
               mtu=l2Mtu )

      # else -> nothing, empty string
   # else -> nothing, empty string
   return out, cur - start

def readEvpnL2Attrs( data, cur ):
   fxcModes = {
      0: "notFxc",
      1: "fxcVlanSignaled",
      2: "fxcDefault",
      3: "fxcUndefined",
   }
   vlanNormVals = {
      0: "notNormalized",
      1: "normalizedSingleVlan",
      2: "normalizedDoubleVlan",
      3: "normalizedUndefined",
   }
   return _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals )

def readEvpnL2AttrsWrongEnums( data, cur ):
   fxcModes = {
      0: "notFxc",
      1: "fxcVlanUnaware",
      2: "fxcVlanAware",
      3: "fxcUndefined",
   }
   vlanNormVals = {
      0: "notNormalized",
      1: "normalizedDoubleVlan",
      2: "normalizedSingleVlan",
      3: "normalizedUndefined",
   }
   return _readEvpnL2Attrs( data, cur, fxcModes, vlanNormVals )

# Pim Specific ones for qtcat
def readMRouteKey( data, cur ):
   ''' Method to read the Multicast route key
       Displayed as (S, G ) '''
   prev = cur
   (a, nBytes) = readIpGenAddr( data, cur )
   cur += nBytes
   (b, nBytes) = readIpGenAddr( data, cur )
   cur += nBytes
   return ("(%s, %s)" % (a, b), cur - prev)

def readMfibRouteKey( data, cur ):
   ''' Method to read the Multicast route key
       Displayed as (S, G) '''
   prev = cur
   ( a, nBytes ) = readIpPrefix( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpPrefix( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMfibGenRouteKey( data, cur ):
   ''' Method to read the Multicast IpGenPrefix
       based route key.
       Displayed as (S, G) '''
   prev = cur
   ( a, nBytes ) = readIpGenPrefix( data, cur )
   cur += nBytes
   ( b, nBytes ) = readIpGenPrefix( data, cur )
   cur += nBytes
   return ( "(%s, %s)" % ( a, b ), cur - prev )

def readMRouteFlags( data, cur ):
   '''Method to read multicast route flags'''

   prev = cur
   flags = ""
   expandedSg = True

   ( r, nBytes ) = readBool( data, cur )
   if r:
      flags += "R"
   cur += nBytes

   ( w, nBytes ) = readBool( data, cur )
   if w:
      expandedSg = False
      flags += "W"
   cur += nBytes

   ( s, nBytes ) = readBool( data, cur )
   if s:
      expandedSg = False
      flags += "S"
   cur += nBytes

   ( j, nBytes ) = readBool( data, cur )
   if j:
      flags += "J"
   cur += nBytes

   ( t, nBytes ) = readBool( data, cur )
   if t:
      flags += "T"
   cur += nBytes

   ( b, nBytes ) = readBool( data, cur )
   if b:
      flags += "B"
   cur += nBytes

   ( k, nBytes ) = readBool( data, cur )
   if k:
      flags += "K"
   cur += nBytes

   ( z, nBytes ) = readBool( data, cur )
   if z:
      flags += "Z"
   cur += nBytes

   ( n, nBytes ) = readBool( data, cur )
   if n:
      flags += "N"
   cur += nBytes

   ( m, nBytes ) = readBool( data, cur )
   if m:
      flags += "M"
   cur += nBytes

   ( a, nBytes ) = readBool( data, cur )
   if a:
      flags += "A"
   cur += nBytes

   ( c, nBytes ) = readBool( data, cur )
   if c:
      flags += "C"
   cur += nBytes

   ( o, nBytes ) = readBool( data, cur )
   if o:
      flags += "O"
   cur += nBytes

   if expandedSg:
      flags += "E"

   return (flags, cur - prev)

def readMRouteIntfJoinState( data, cur ):
   ''' Method to read Multicast route intf join state '''
   prev = cur
   ( a, nBytes ) = readU32( data, cur )
   cur += nBytes

   joinState = ""

   if a == 0:
      joinState = "noInfo"
   elif a == 1:
      joinState = "prunePending"
   else:
      joinState = "joined"

   return ( joinState, cur - prev )

def readMRouteIntfRptJoinState( data, cur ):
   ''' Method to read Multicast route intf join state '''
   prev = cur
   ( a, nBytes ) = readU32( data, cur )
   cur += nBytes

   joinState = ""

   if a == 0:
      joinState = "rptNoInfo"
   elif a == 1:
      joinState = "rptPruned"
   else:
      joinState = "rptPrunePending"

   return ( joinState, cur - prev )

def mplsLabelDescription( val ):
   if val == 0x100000:
      return 'Null'
   elif val == 3:
      return 'ImplicitNull'
   else:
      return '%d' % val

def readMplsLabel( data, cur ):
   val, off = readU32( data, cur )
   return 'MplsLabel(%s)' % mplsLabelDescription( val ), off

def readMplsLabelOperation( data, cur ):
   '''Arnet::MplsLabelOperation storage format:
   | U8 operation | U8 stackSize | stackSize * U32 labels |
   e.g.
   push 3 labels, 0x1000, 0x2000, 0x3000
   | 0x00 | 0x03 | 0x00001000 | 0x00002000 | 0x00003000 |
   '''
   off = 0
   ( op, tmp ) = readU8( data, cur )
   off += tmp
   ( stackSize, tmp ) = readU8( data, cur + off )
   if stackSize > MplsStackEntryIndex_max + 1:
      # Since the stack size can only be in the above range, we can check for
      # bad data here
      data = [ '%02x' % c for c in map( ord, data[ cur:cur+40 ] ) ]
      print( 'Current QuickTrace data:', ''.join( data ) )
      print( 'Bad MPLS label operation stack size:', stackSize )
      print( 'Start of MPLS label operation:', cur )
      print( 'Bad stack size offset:', cur + off )
      raise ValueError( 'Bad MPLS label operation stack size %u' % stackSize )
   off += tmp
   labels = []
   for _ in range( stackSize ):
      ( label, tmp ) = readU32( data, cur + off )
      off += tmp
      labels.append( mplsLabelDescription( label ) )
   opStr = {
      0: 'push',
      1: 'pop',
      2: 'swap',
      0xff: 'unknown',
   }
   return opStr[ op ] + '[' + ', '.join( labels ) + ']', off

def readTunnelKey( data, cur ):
   ( src, off ) = readIp( data, cur )
   ( dst, offInc ) = readIp( data, cur + off )
   off += offInc
   ( vrf, offInc ) = readU32( data, cur + off )
   off += offInc
   ( gen, offInc ) = readU64( data, cur + off )
   off += offInc
   return ( 'src:%s, dst:%s, vrf:%d, genId:%d' % ( src, dst, vrf, gen ), off )

def readTunnelKeyGen( data, cur ):
   ( src, off ) = readIpGenAddr( data, cur )
   ( dst, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   ( vrf, offInc ) = readU32( data, cur + off )
   off += offInc
   ( gen, offInc ) = readU64( data, cur + off )
   off += offInc
   return ( 'src:%s, dst:%s, vrf:%d, genId:%d' % ( src, dst, vrf, gen ), off )

def readPseudowireKey( data, cur ):
   ( pwId, off ) = readU32( data, cur )
   ( neighbor, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   return ( '%u/%s' % ( pwId, neighbor ), off )

def readUnsignedSequence( data, cur ):
   '''Read a flexible sequence of unsigned values
   format:
     count:U8 sep:U8 [ size:U8 | value:size ] ... [ size:U8 | value:size ]
     e.g.
       \x02 \x3a \x01 \x31 \x02 \x12 \x34
     count=2, sep=':', value1=0x31, value2=0x3412
     output: "49:13330"
   '''
   ( count, off ) = readU8( data, cur )
   ( sep, offInc ) = readU8( data, cur + off )
   off += offInc
   if count == 0:
      return ( '', off )
   vals = []
   for _ in range( count ):
      ( sz, offInc ) = readU8( data, cur + off )
      off += offInc
      szMap = {
         1: readU8,
         2: readU16,
         4: readU32,
         8: readU64,
         }
      if sz not in szMap:
         raise ValueError( 'Bad value size in unsigned sequence: %u' % sz )
      ( val, offInc ) = szMap[ sz ]( data, cur + off )
      off += offInc
      vals.append( str( val ) )
   sepChar = chr( sep )
   return ( sepChar.join( vals ), off )

def readSizeSpec( data, cur ):
   '''Reads the SizeSpec ( an array of 10 integers )'''
   ss = array.array('i')
   ss.fromstring(data[cur:cur+40])
   return ss, 40

def readGlobalFecId( data, cur ):
   ( fecId, off ) = readU64( data, cur )
   ( tableId, offInc ) = readU16( data, cur + off )
   off += offInc
   return ( 'fecId: %u, tableId: %u' % ( fecId, tableId ), off )

afiToStr = { 0 : 'None',
             1 : 'ipv4',
             2 : 'ipv6',
             25 : 'l2vpn',
             16388 : 'linkState',
             16389 : '48bitMac'
}
safiToStr = { 0 : 'None',
              1 : 'ucast',
              2 : 'mcast',
              4 : 'mplsLabels',
              70 : 'evpn',
              71 : 'linkState',
              73 : 'srTe',
              128 : 'mplsVpn',
              132 : 'rtMembership',
              133 : 'flowspec',
              241 : 'private1',
              242 : 'private2',
              243 : 'private3',
              244 : 'private4',
              245 : 'private5',
              246 : 'private6',
              247 : 'private7',
              248 : 'private8',
              250 : 'private10',
}

adjRibinTypeToStr = { 0 : 'None',
              1 : 'peer',
              2 : 'redist',
              3 : 'import',
              4 : 'export',
              5 : 'network',
              6 : 'aggregate',
              7 : 'defaultOrigiante',
              8 : 'rtMembershipImport',
              9 : 'linkStateProducer',
}

nlriTypeToStr = { 0 : 'None',
              1 : 'v4u',
              2 : 'v6u',
              3 : 'evpnType1',
              4 : 'evpnType2',
              5 : 'evpnType3',
              6 : 'evpnType4',
              7 : 'evpnType5Ipv4',
              8 : 'evpnType5Ipv6',
              9 : 'macSegment',
              10 : 'macAddress',
              11 : 'macFloodTarget',
              12 : 'macEthSegment',
              13 : 'macArp',
              14 : 'rtMembership',
              15 : 'v4Lu',
              16 : 'v6Lu',
              17 : 'v4m',
              18 : 'mplsVpnv4u',
              19 : 'mplsVpnv6u',
              20 : 'flowspecv4u',
              21 : 'flowspecv6u',
              22 : 'v4srTe',
              23 : 'v6srTe',
              24 : 'ipv6Multicast',
              25 : 'evpnType6',
              26 : 'evpnType7',
              27 : 'evpnType8',
              28 : 'macMulticast',
              29 : 'macJoinSynch',
              30 : 'macLeaveSynch',
              31 : 'evpnType10',
              33 : 'lsNode',
              34 : 'lsLink',
              35 : 'lsV4Prefix',
              36 : 'lsV6Prefix',
              39 : 'None',
              40 : 'testv4Evpn',
              41 : 'testv6Evpn',
}

# Route key formatters for BGP. Each AFI/SAFI can use its own mechanism. The key is
# either the AF or an ( AF, SAF ) pair. The value is a tuple or a dict of tuples -
# if a dict then the parser will examine the first character of the input to
# determine which tuple to use for formatting.
#
# In the tuple, the first string is the output string, format will be called on this
# string with args formed by parsing the rest of the fields - these are identified
# by the subsequent strings in the tuple.
#
# For example, ( '{0}', 'P' ) will readPrefix and return '{0}'.format( prefix ).
# ( '{0}/{1}', 'I', 'E' ) will readIp, then readEth, and return
# '{0}/{1}'.format( ip, eth ).
bgpRouteKeyFormat = {
   'ipv4': ( '{0}', 'P' ),
   'ipv6': ( '{0}', 'P6' ),
   ( 'l2vpn', 'evpn' ): {
      # Type 1 Auto-Discovery
      'A': ( '<EVPN-Type1-Key: rd:{0}, esi:{1}, etid:{2}>', 'q', 'ES', 'i' ),
      # Type 2 MAC/IP
      'M': ( '<EVPN-Type2-Key: rd:{0}, etid:{1}, mac:{2}, ip:{3}>',
             'q', 'i', 'E', 'IG' ),
      # Type 3 IMET
      'I': ( '<EVPN-Type3-Key: rd:{0}, etid:{1}, ip:{2}>', 'q', 'i', 'IG' ),
      # Type 4 Ethernet Segment
      'E': ( '<EVPN-Type4-Key: rd:{0}, esi:{1}, ip:{2}>', 'q', 'ES', 'IG' ),
      # Type 5 IpPrefix (IPv4)
      '4': ( '<EVPN-Type5-Key: rd:{0}, etid:{1}, ip:{2}>', 'q', 'i', 'P' ),
      # Type 5 IpPrefix (IPv6)
      '6': ( '<EVPN-Type5-Key: rd:{0}, etid:{1}, ip:{2}>', 'q', 'i', 'P6' ),
      # Type 6 SMET
      'S': ( '<EVPN-Type6-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'q', 'i', 'IG', 'IG', 'IG' ),
      # Type 7 JoinSync
      'J': ( '<EVPN-Type7-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'q', 'i', 'ES', 'IG', 'IG', 'IG' ),
      # Type 8 LeaveSync
      'V': ( '<EVPN-Type8-Key: rd:{0}, etid:{1}, esi:{2}, srcAddr:{3}, ' + \
             'grpAddr:{4}, origAddr:{5}>', 'q', 'i', 'ES', 'IG', 'IG', 'IG' ),
      # Type 10 SpmsiAD
      'P': ( '<EVPN-Type10-Key: rd:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' + \
             'origAddr:{4}>', 'q', 'i', 'IG', 'IG', 'IG' ),
   },
   ( '48bitMac', 'private1' ): ( 'rd:{0}, esi:{1}, etid:{2}', 'q', 'ES', 'i' ),
   ( '48bitMac', 'private2' ): ( 'mac:{0}, etid:{1}', 'E', 'i' ),
   ( '48bitMac', 'private3' ): ( 'vtep:{0}, etid:{1}', 'IG', 'i' ),
   ( '48bitMac', 'private4' ): ( 'rd:{0}, vtep:{1}, esi:{2}', 'q', 'IG', 'ES' ),
   ( '48bitMac', 'private5' ): ( 'mac:{0}, ip:{1}, etid:{2}', 'E', 'IG', 'i' ),
   ( '48bitMac', 'private6' ): ( 'etid:{0}, srcAddr:{1}, grpAddr:{2}, origAddr:{3}',
                                 'i', 'IG', 'IG', 'IG' ),
   ( '48bitMac', 'private7' ): ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' +\
                                 'origAddr:{4}', 'ES', 'i', 'IG', 'IG', 'IG' ),
   ( '48bitMac', 'private8' ): ( 'esi:{0}, etid:{1}, srcAddr:{2}, grpAddr:{3}, ' +\
                                 'origAddr:{4}', 'ES', 'i', 'IG', 'IG', 'IG' ),
   ( '48bitMac', 'private10' ): ( 'etid:{0}, srcAddr:{1}, grpAddr:{2}, origAddr:{3}',
                                 'i', 'IG', 'IG', 'IG' ),
   ( 'ipv4', 'mplsVpn' ): ( 'rd: {0}, ip:{1}', 'q', 'P' ),
   ( 'ipv6', 'mplsVpn' ): ( 'rd: {0}, ip:{1}', 'q', 'P6' ),
   ( 'ipv4', 'flowspec' ): ( 'ruleId:{0}', 'q' ),
   ( 'ipv6', 'flowspec' ): ( 'ruleId:{0}', 'q' ),
   ( 'ipv4', 'srTe' ) : ( '{0}|{1}|{2}', 'i', 'i', 'I' ),
   ( 'ipv6', 'srTe' ) : ( '{0}|{1}|{2}', 'i', 'i', 'I6' ),
   ( 'linkState', 'linkState' ) : {
      'N' : ( 'nodeRk: proto:{0}, identifier:{1}, nodeId:{2}', 'u', 'q', 'IGPN' ),
      'L' : ( 'linkRk: proto:{0}, identifier:{1}, lNodeId:{2}, rNodeId:{3}, ' +\
            'v4lAddr:{4}, v6lAddr:{5}', 'u', 'q', 'IGPN', 'IGPN', 'IG', 'IG' ),
     'P' : ( 'prefixRk: proto:{0}, identifier:{1}, nodeId:{2}, prefix:{3}', 
           'u', 'q', 'IGPN', 'PG' ),
      },
   ( 'ipv4', 'rtMembership' ) : ( '{0}', 'RTMRK' ),
}

# Nexthop formatters are similar to route key formatters, but keyed by the first
# byte, not by AFI/SAFI.
bgpNextHopFormat = {
   # IPv4 address
   0x01: ( '{0}', 'I' ),
   # IPv6 address
   0x02: ( '{0}', 'I6' ),
   # IPv4 address + router MAC
   0x03: ( '<ip:{0}, routerMac:{1}>', 'I', 'E' ),
   # IPv6 address + router MAC
   0x04: ( '<ip:{0}, routerMac:{1}>', 'I6', 'E' ),
   # EvpnADNexthop version 1:
   #   IPv4 tunnel endpoint + label + singleActive flag + esiLabel
   0x05: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}>',
           'I', 'EL', 'b', 'EL' ),
   # IPv4 tunnel endpoint + segment ID + tunnel key + L3 tunnel key
   0x06: ( '<ip:{0}, esi:{1}, label:{2}, label2:{3}>', 'I', 'ES', 'EL', 'EL' ),
   # IPv6 address + mpls label
   0x07: ( '<ipv6: {0}, label:{1}>', 'I6', 'ML' ),
   # Ipv4 address + Mpls label stored as a U32
   0x08: ( '<ip:{0}, label:{1}>', 'I', 'i' ),
   # Ipv6 address + Mpls label stored as a U32
   0x09: ( '<ipv6:{0}, label:{1}>', 'I6', 'i' ),
   # Ipv6 link-local address + interface stored as string
   0x0A: ( '{0}%{1}', 'I6', 'p' ),
   # EvpnMacIpNexthop:
   #   IPv4 address + segment ID + L2 label + L3 label + ndProxy flag
   0x0B: ( '<ip:{0}, esi:{1}, label:{2}, label2:{3}, ndProxyFlag:{4}>',
          'I', 'ES', 'EL', 'EL', 'b' ),
   # EvpnADNexthop version 2 (replaces 0x05 above)
   #   parameter {4} is an optional EvpnL2Attrs
   0x0C: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}{4}>',
           'I', 'EL', 'b', 'EL', 'EL2old' ),
   # IPv4 address + mcastFlags (SMET and JoinSync EVPN routes)
   0x0D: ( '<ip:{0}, mcastFlags:{1}>', 'I', 'u' ),
   # IPv6 address + mcastFlags
   0x0E: ( '<ip:{0}, mcastFlags:{1}>', 'I6', 'u' ),
   # IPv4 address + mcastFlags + maxResponseTime (LeaveSync EVPN routes)
   0x0F: ( '<ip:{0}, mcastFlags:{1}, maxResponseTime:{2}>', 'I', 'u', 'u' ),
   # IPv6 address + mcastFlags + maxResponseTime
   0x10: ( '<ip:{0}, mcastFlags:{1}, maxResponseTime:{2}>', 'I6', 'u', 'u' ),
   # EvpnADNexthop version 3 (replaces 0x0C above)
   #   Using new enum values for fxcMode, vlanNormVals
   0x11: ( '<ip:{0}, label:{1}, singleActive:{2}, esiLabel:{3}{4}>',
           'I', 'EL', 'b', 'EL', 'EL2' ),
   # EvpnImetNexthop version 2 (replaces 0x01 above)
   #   IPv4 address + remoteDomain flag
   0x12: ( '<ip:{0}, remoteDomain:{1}>', 'I', 'b' ),
   # EvpnIpPrefixPreResolvedNexthop:
   #   L3 label + routerMac + L3 label2
   0x13: ( '<l3Label:{0}, routerMac:{1}, l3Label2:{2}>', 'EL', 'E', 'EL' ),
   # EvpnGatewayIpPrefixNexthop v4 address: 
   #   IPv4 address + L3 label + routerMac +
   #   local L3 Label + local routerMac + local L3 Label2
   0x14: ( '<ip:{0}, l3Label:{1}, routerMac:{2}, localL3Label:{3}, ' +\
         'localRouterMac:{4}, localL3Label2:{5}>', 
         'I', 'EL', 'E', 'EL', 'E', 'EL' ),
   # EvpnGatewayIpPrefixNexthop v6 address: 
   #   IPv6 address + L3 label + routerMac +
   #   local L3 Label + local routerMac + local L3 Label2
   0x15: ( '<ip:{0}, l3Label:{1}, routerMac:{2}, localL3Label:{3}, ' +\
         'localRouterMac:{4}, localL3Label2:{5}>', 
         'I6', 'EL', 'E', 'EL', 'E', 'EL' ),
   # RtMembershipGenNexthop
   0x16: ( '{0}', 'IG' ),
}

# Here is the format of the qt ring
# octets    | 2 | 1  |  1     |  1   |   4    |         var         |
# content   |afi|safi|nlriType|l2OrL3|rawVrfId|---afiSafiSpecific---|
def readBgpRouteKey( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes

   afiStr = afiToStr[ afi ]
   safiStr = safiToStr[ safi ]

   # Read nlriType
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes

   # Read vrfType and vrfId
   ( vrfType, nBytes ) = readU8( data, cur )
   cur += nBytes
   ( vrfId, nBytes ) = readU32( data, cur )
   cur += nBytes
   vrfStr = 'vrf-unknown'
   if vrfType == 0:
      vrfStr = 'L2-vrfId:%d' % vrfId
   elif vrfType == 1:
      vrfStr = 'L3-vrfId:%d' % vrfId

   # Get one of:
   # - bgpRouteKeyFormat[ ( afiStr, safiStr ) ]
   # - bgpRouteKeyFormat[ afiStr ]
   # - ( 'unknown', )
   rkFormat = ( bgpRouteKeyFormat.get( ( afiStr, safiStr ) ) or
                bgpRouteKeyFormat.get( afiStr ) or ( 'unknown', ) )

   # Dict: need to consume one more byte to distinguish.
   if hasattr( rkFormat, 'keys' ):
      ( distinguisher, nBytes ) = readChar( data, cur )
      cur += nBytes
      rkFormat = rkFormat[ distinguisher ]

   fields = []
   for fieldKey in rkFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )

   rkStr = rkFormat[ 0 ].format( *fields )
   return ( ' RouteKey<(%s/%s) (%s; %s nlriType-%d)> ' % (
      afiStr, safiStr, vrfStr, rkStr, nlriType ), cur - prev )

# Here is the format of Bmp RouteKey
# octets    | 2 | 1  |var    |   var   |
# content   |afi|safi|vrfName|---key---|
def readBmpRouteKey( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes

   afiStr = afiToStr[ afi ]
   safiStr = safiToStr[ safi ]

   (vrfStr, nBytes) = readString( data, cur )
   cur += nBytes
   # Get one of:
   # - bgpRouteKeyFormat[ ( afiStr, safiStr ) ]
   # - bgpRouteKeyFormat[ afiStr ]
   # - ( 'unknown', )
   rkFormat = ( bgpRouteKeyFormat.get( ( afiStr, safiStr ) ) or
                bgpRouteKeyFormat.get( afiStr ) or ( 'unknown', ) )

   # Dict: need to consume one more byte to distinguish.
   if hasattr( rkFormat, 'keys' ):
      ( distinguisher, nBytes ) = readChar( data, cur )
      cur += nBytes
      rkFormat = rkFormat[ distinguisher ]

   fields = []
   for fieldKey in rkFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )

   rkStr = rkFormat[ 0 ].format( *fields )
   return ( ' RouteKey<(%s/%s) (%s; %s)> ' % (
      afiStr, safiStr, vrfStr, rkStr ), cur - prev )

def readAfi( data, cur ):
   prev = cur
   ( afi, nBytes ) = readU16( data, cur )
   cur += nBytes
   afiStr = "unknown" if afi not in afiToStr else afiToStr[ afi ]
   return ( "<AFI:%s>" % afiStr, cur - prev )

def readSafi( data, cur ):
   prev = cur
   ( safi, nBytes ) = readU8( data, cur )
   cur += nBytes
   safiStr = "unknown" if safi not in safiToStr else safiToStr[ safi ]
   return ( "<SAFI:%s>" % safiStr, cur - prev )

def readNlriType( data, cur ):
   prev = cur
   ( nlriType, nBytes ) = readU8( data, cur )
   cur += nBytes
   nlriTypeStr = "unknown" if nlriType not in nlriTypeToStr else \
      nlriTypeToStr[ nlriType ]
   return ( "<NLRI:%s>" % nlriTypeStr, cur - prev )

def readAdjRibinType( data, cur ):
   prev = cur
   ( adjRibinType, nBytes ) = readU8( data, cur )
   cur += nBytes
   adjRibinTypeStr = "unknown" if adjRibinType not in adjRibinTypeToStr else \
      adjRibinTypeToStr[ adjRibinType ]
   return ( "<AdjRibin:%s>" % adjRibinTypeStr, cur - prev )

def readIpAddrWithMask( data, cur ):
   ( ipAddr, addrLen ) = readIp( data, cur )
   plen = struct.unpack_from( "B", data, cur + addrLen )[ 0 ]
   return ( "%s/%d" % ( ipAddr, plen ), addrLen + 1 )

def readIp6AddrWithMask( data, cur ):
   ( ip6Addr, addrLen ) = readIp6Addr( data, cur )
   plen = struct.unpack_from( 'B', data, cur + addrLen )[ 0 ]
   return ( "%s/%d" % ( ip6Addr, plen ), addrLen + 1 )

def readBgpGenNextHop( data, cur ):
   prev = cur

   # Read vrfType and vrfId
   ( vrfType, nBytes ) = readU8( data, cur )
   cur += nBytes
   ( vrfId, nBytes ) = readU32( data, cur )
   cur += nBytes
   vrfStr = 'vrf-unknown'
   if vrfType == 0:
      vrfStr = 'L2-vrfId:%d' % vrfId
   elif vrfType == 1:
      vrfStr = 'L3-vrfId:%d' % vrfId

   ( nhType, nBytes ) = readU8( data, cur )
   cur += nBytes

   nhFormat = bgpNextHopFormat.get( nhType, ( 'NULL', ) )

   fields = []
   for fieldKey in nhFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )
   return ( nhFormat[ 0 ].format( *fields ) + " (" + vrfStr + ")", cur - prev )

def readGenRange( data, cur ):
   '''Halo::GenRange
   format:
      genRangeType: U8 [ L4PortRange | IpGenPrefix | IpGenAddrWithFullMask ]
         genRangeType =  {
            0   : 'unknown'
            1   : 'ipGenPrefix'
            2   : 'port'
            255 : 'ipGenAddrWithFullMask'
         }
         if genRangeType is unknown, "(n/a)" is printed
   '''
   # L4PortRange and IpGenPrefix and existing methods for formatting. If GenRange
   # is unknown, simply print (n/a), which is consistent with stringValue
   valueFunc = {
      0 : lambda x, y: ( 'n/a', 0 ),
      1 : readIpGenPrefix,
      2 : readUnsignedSequence,
      255 : readIpGenAddrWithFullMask,
   }
   ( genRangeType, off ) = readU8( data, cur )
   ( valueStr, offInc ) = valueFunc[ genRangeType ]( data, cur + off )
   off += offInc
   return ( valueStr, off )

def readFapmask( data, cur ):
   ( valueLo, off ) = readU64( data, cur )
   ( valueHi, offInc ) = readU64( data, cur + off )
   off += offInc

   # Extract fapIds from the fapmask
   fapmask = valueLo | ( valueHi << 64 )
   fapIds = [ fapId for fapId in range( 128 ) if ( fapmask >> fapId ) & 1 ]

   # Build a list of all fapIds with ranges (i.e. consecutive fapIds) reduced to a
   # (start, stop) tuple. Non-consecutive fapIds are placed in individual tuples.
   # e.g. [ 0, 2, 3, 4, 6 ] becomes [ ( 0, ), ( 2, 4 ), ( 6, ) ]
   fapIdRanges = []

   while fapIds:
      firstFapIdInRange = fapIds.pop( 0 )
      fapIdRange = ( firstFapIdInRange, )

      prevFapId = firstFapIdInRange
      while fapIds and fapIds[ 0 ] == ( prevFapId + 1 ):
         prevFapId = fapIds.pop( 0 )

      if prevFapId != firstFapIdInRange:
         fapIdRange += ( prevFapId, )

      fapIdRanges.append( fapIdRange )

   # e.g. [ '0', '2-4', '6' ]
   fapIdRangeStrs = [ '-'.join( map( str, fapIdRange ) )
                      for fapIdRange in fapIdRanges ]
   # e.g. '(0,2-4,6)'
   fapmaskStr = '({:s})'.format( ','.join( fapIdRangeStrs ) )

   return ( fapmaskStr, off )

def readEthColonSeparated( data, cur ):
   ethAddr = struct.unpack_from("!BBBBBB", data, cur)
   return ( ':'.join( [ format( b,'02x') for b in ethAddr ] ), 6 )

def readIpGenUcastKeyRef( data, cur ):
   ( vrf, off ) = readU16( data, cur )
   ( ipAddr, offInc ) = readIpGenPrefix( data, cur + off )
   off += offInc
   return ( 'vrf:%d, ipAddr:%s' % ( vrf, ipAddr ), off )

def readBfdPeer( data, cur ):
   ( ip, off ) = readIpGenAddr( data, cur )
   ( vrf, offInc ) = readString( data, cur + off )
   off += offInc
   ( src, offInc ) = readIpGenAddr( data, cur + off )
   off += offInc
   ( intf, offInc ) = readString( data, cur + off )
   off += offInc
   ( tunnelId, offInc ) = readU64( data, cur + off )
   off += offInc
   ( stype, offInc ) = readU32( data, cur + off )
   off += offInc
   return ( "peer(ip:%s, vrf:%s, srcip:%s, intf:%s, tunnelId:%d, type:%d)" %
            ( ip, vrf, src, intf, tunnelId, stype ), off ) 

def readDuidMacBindingKey( data, cur ):
   ( duidType, off ) = readU16( data, cur )
   ( duidHwType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( lladdr, offInc ) = readEth( data, cur + off )
   off += offInc
   ( uuid, offInc ) = readString( data, cur + off )
   off += offInc
   ( entno, offInc ) = readU32( data, cur + off )
   off += offInc
   ( entId, offInc ) = readString( data, cur + off )
   off += offInc
   return ( "duid(type: {}, hwtype: {}, lladdr: {}, uuid: {}, entno: {},"
            " entId: {})".format( duidType, duidHwType, lladdr, uuid, entno, entId ),
            off )

def readEthMask( data, cur ):
   ( eth, off ) = readEth( data, cur )
   ( mask, offInc ) = readEth( data, cur + off )
   off += offInc
   return ( '{}/{}'.format( eth, mask ), off )

def readIsisSystemId( data, cur ):
   ( a, b, c, d, e, f ) = struct.unpack_from( "BBBBBB", data, cur )
   return ( "%02x%02x.%02x%02x.%02x%02x" % ( a, b, c, d, e, f ), 6 )

def readIgpNodeId( data, cur ):
   '''Read Routing::Bgp::LinkStatePlugin::IgpNodeId. The data should contain
      a byte identifying the type of the igp node id followed by the id value
      contained in an U64'''
   # read the igp node id type
   ( idType, _ ) = readU8( data, cur )
   nodeId = "unknown"
   if idType == 1:
      # isisSystemId type
      # Skip the first 2 bytes of the U64 containing the node id
      ( a, b, c, d, e, f ) = struct.unpack_from( "BBBBBB", data, cur + 3 )
      nodeId = "%02x%02x.%02x%02x.%02x%02x" % ( f, e, d, c, b, a )
   elif idType == 2:
      # isisLanId type
      ( lanId, a, b, c, d, e, f ) = struct.unpack_from( "BBBBBBB", data, cur + 2 )
      nodeId = "%02x%02x.%02x%02x.%02x%02x.%02x" % ( f, e, d, c, b, a, lanId )
   return ( nodeId, 9 )

# Renderer routine used to read Cspf::Destination and Cspf::NodeId
# from the ring buffer
def readCspfVertex( data, cur ):
   valueFunc = {
      0 : lambda x, y: ( 'n/a', 0 ),
      1 : readIpPrefix,
      2 : readIp6Prefix,
      3 : readIp,
      4 : readIp6Addr,
      5 : readIsisSystemId,
   }
   ( vtxType, off ) = readU8( data, cur )
   ( vtx, offInc ) = valueFunc[ vtxType ]( data, cur + off )
   off += offInc
   return ( "%s" % vtx, off )

def readMplsFwdEqvClass( data, cur ):
   ( fecType, off ) = readU8( data, cur )
   if fecType == 0:
      ( prefix, offInc ) = readIpGenPrefix( data, cur + off )
      off += offInc
      return ( prefix, off )
   elif fecType == 1:
      ( rsvpSessionCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      ( rsvpSpCliId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'RSVP[Session=%d, SP=%d]' % ( rsvpSessionCliId, rsvpSpCliId ), off )
   elif fecType == 2:
      ( mldpRootIp, offInc ) = readIpGenAddr( data, cur + off )
      off += offInc
      ( mldpOpaqueId, offInc ) = readU32( data, cur + off )
      off += offInc
      return ( 'MLDP[Root=%s, ID=%d]' % ( mldpRootIp, mldpOpaqueId ), off )
   else:
      raise ValueError( 'Bad FwdEqvClass fecType' )

def readIdSet( data, cur ):
   ( count , off ) = readU8( data, cur )
   if count == 0:
      return ( '()', off )
   ids = []
   # Read and extract ids from the given IdSet
   for _ in range( count ):
      ( bucketId, offInc ) = readU8( data, cur + off )
      off += offInc
      ( val, offInc ) = readU64( data, cur + off )
      off += offInc
      idOffset = bucketId * 64
      ids += [ ( _id + idOffset ) for _id in range( 64 ) if ( val >> _id ) & 0x1 ]

   # Build a list of all ids with ranges (i.e. consecutive ids) reduced to a
   # (start, stop) tuple. Non-consecutive ids are placed in individual tuples.
   # e.g. [ 0, 2, 3, 4, 6 ] becomes [ ( 0, ), ( 2, 4 ), ( 6, ) ]
   idRanges = []

   while ids:
      firstIdInRange = ids.pop( 0 )
      idRange = ( firstIdInRange, )

      prevId = firstIdInRange
      while ids and ids[ 0 ] == ( prevId + 1 ):
         prevId = ids.pop( 0 )

      if prevId != firstIdInRange:
         idRange += ( prevId, )

      idRanges.append( idRange )

   # e.g. [ '0', '2-4', '6' ]
   idRangeStrs = [ '-'.join( map( str, idRange ) )
                      for idRange in idRanges ]
   # e.g. '(0,2-4,6)'
   idSetStr = '({:s})'.format( ','.join( idRangeStrs ) )

   return ( idSetStr, off )

bgpPeerKeyFormat = {
   0x01: ( '{0}%{1}', 'IG','p' ),
   0x02: ( '{0}', 'IG' ),
   0x03: ( '{0}', 'p' ),
}

def readBgpPeerKey( data, cur ):
   prev = cur
   ( keyType, nBytes ) = readU8( data, cur )
   cur += nBytes
   peerKeyFormat = bgpPeerKeyFormat.get( keyType, ( 'NULL', ) )
   fields = []
   for fieldKey in peerKeyFormat[ 1: ]:
      r = QtReader.reader( fieldKey )
      if not r:
         print( "Failed to find a reader for key", fieldKey )
         fields.append( fieldKey )
         continue
      strep, nBytes = r( data, cur )
      cur += nBytes
      fields.append( strep )
   return ( peerKeyFormat[ 0 ].format( *fields ), cur-prev )

def readVrfIdPair( data, cur ):
   ( srcVrfId, off ) = readU32( data, cur )
   ( dstVrfId, off2 ) = readU32( data, cur + off )

   return ( '(%d-%d)' % ( srcVrfId, dstVrfId ), off + off2 )

def readRoutingProtocol( data, cur ):
   v = {
      # keep this in sync with the enum in IpRibLib.tac, adding any
      # new protos at the end
      0 : 'reserved',
      1 : 'connected',
      2 : 'staticConfig',
      3 : 'bgp',
      4 : 'routeInput',
      5 : 'ospf',
      6 : 'ospf3',
      7 : 'isis',
      8 : 'dynamicPolicy',
      9 : 'vrfLeak',
      10 : 'rip',
      11 : 'staticRouteCacheConfig',
   }

   ( d, off ) = readU8( data, cur )
   if d not in v:
      return ( '%d' % d, off )

   return ( v[ d ], off )

def readRibRouteKey( data, cur ):
   ( vrfId, off ) = readU32( data, cur )
   ( prefix, off2 ) = readIpGenPrefix( data, cur + off )

   return ( '(%s, %s)' % ( vrfId, prefix ), off + off2 )

# this one reads the Arnet::AddressFamily enum
def readAf( data, cur ):
   v = {
      0 : 'unknown',
      1 : 'ipv4',
      2 : 'ipv6',
   }

   ( af, off ) = readU8( data, cur )
   if af not in v:
      return ( '%d' % af, off )

   return ( v[ af ], off )

def readAfProto( data, cur ):
   ( af, off ) = readAf( data, cur )
   ( proto, off2 ) = readRoutingProtocol( data, cur + off )

   return( '(%s,%s)' % ( af, proto ), off + off2 )

def readAfProtoVrfId( data, cur ):
   ( af, off ) = readAf( data, cur )
   ( proto, off2 ) = readRoutingProtocol( data, cur + off )
   ( vrfId, off3 ) = readU32( data, cur + off + off2 )

   return( '(%s,%s,%s)' % ( af, proto, vrfId ), off + off2 + off3 )

def readLfibViaKey( data, cur ):
   ( src, off1 ) = readU8( data, cur )
   ( idx, off2 ) = readU32( data, cur + off1 )
   ( viaType, off3 ) = readU8( data, cur + off1 + off2 )

   return( '(%d,%d,%d)' % ( src, idx, viaType ), off1 + off2 + off3 )

def readMplsRouterId( data, cur ):
   ( protocol, _ ) = readU8( data, cur )
   if protocol == 3:
      ( rtrId, off ) = readIsisSystemId( data, cur + 1 )
   else:
      # The bytes are already copied in big-endian format
      # Cant use readIpAndPort as it assumes they are in little endian
      ( a, b, c, d ) = struct.unpack_from( "BBBB", data, cur + 1 )
      # Read port of 2 bytes in big-endian format
      port = struct.unpack_from( "!H", data, cur + 5 )[ 0 ]
      rtrId = "%s.%s.%s.%s:%d" % ( a, b, c, d, port )
      off = 6
   # Add offset of 1 byte for the protocol field
   return ( "%s" % rtrId, off + 1 )

def parseRouteTarget( value ):
   extCommType = value >> 56
   if extCommType == 0:
      # AS(16-bit):nn(32-bit)
      globalAdmin = str( ( value >> 32 ) & 0xFFFF )
      localAdmin = str( value & 0xFFFFFFFF )
   elif extCommType == 1:
      # IP(32-bit):nn(16-bit)
      ip = ( value >> 16 ) & 0xFFFFFFFF
      globalAdmin = '%d.%d.%d.%d' % (
            ip >> 24, ( ip >> 16 ) & 0xFF, ( ip >> 8 ) & 0xFF, ip & 0xFF )
      localAdmin = str( value & 0xFFFF )
   elif extCommType == 2:
      # AS(32-bit):nn(16-bit)
      globalAdminVal = ( value >> 16 ) & 0xFFFFFFFF
      globalAdmin = str( globalAdminVal )
      # for values which could fit into a 2 byte AS, append an 'L' to indicate
      # that this is a 4 byte AS RouteTarget
      if globalAdminVal <= 0xFFFF:
         globalAdmin += 'L'
      localAdmin = str( value & 0xFFFF )
   else:
      # invalid type, just return the data
      globalAdmin = 'INVALID'
      localAdmin = '0x%X' % value
   return '%s:%s' % ( globalAdmin, localAdmin )

def readRouteTarget( data, cur ):
   # Get the value of the Route Target
   ( value, off ) = readU64( data, cur )
   return ( parseRouteTarget( value ), off )

def readRtMembershipRouteKey( data, cur ):
   ( originAs, off1 ) = readU32( data, cur )
   ( rtValue, off2 ) = readU64( data, cur + off1 )
   ( length, off3 ) = readU8( data, cur + off1 + off2 )
   ret = 'origin: %s;%s/%s; value: 0x%016X' % (
         originAs, parseRouteTarget( rtValue ), length, rtValue )
   return ( ret, off1 + off2 + off3 )

def readUdfDesc( data, cur ):
   ( aclType, off ) = readString( data, cur )
   ( ethType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( base, offInc ) = readString( data, cur + off )
   off += offInc
   ( ipProto, offInc ) = readU8( data, cur + off )
   off += offInc
   ( innerEtherType, offInc ) = readU16( data, cur + off )
   off += offInc
   ( offset, offInc ) = readU8( data, cur + off )
   off += offInc
   ( length, offInc ) = readU8( data, cur + off )
   off += offInc
   ( lagHashingMask, offInc ) = readU32( data, cur + off )
   off += offInc
   ( priority, offInc ) = readU8( data, cur + off )
   off += offInc
   ret = ( '%s ethtype:0x%x %s ipProto:%d innerEthType:0x%x offset:%d length:%d ' 
         'lagMask:0x%x priority:%d' ) % (
         aclType, ethType, base, ipProto, innerEtherType, offset, length,
         lagHashingMask, priority )
   return ( ret, off )

def readIntfEncap( data, cur ):
   ( intfId, off ) = readString( data, cur )
   ( outerVid, offInc ) = readU16( data, cur + off )
   off += offInc
   ( innerVid, offInc ) = readU16( data, cur + off )
   off += offInc
   ret = '%s(%d,%d)' % ( intfId, outerVid, innerVid )
   return ( ret, off )

def readIpFlowKey( data, cur ):
   ( vrfId, off ) = readU32( data, cur )
   ( vlanId, offInc ) = readU16( data, cur + off )
   off += offInc
   ( srcAddr, offInc ) = readIp( data, cur + off )
   off += offInc
   ( dstAddr, offInc ) = readIp( data, cur + off )
   off += offInc
   ( ipProtocolNumber, offInc ) = readU8( data, cur + off )
   off += offInc
   ( srcPort, offInc ) = readU16( data, cur + off )
   off += offInc
   ( dstPort, offInc ) = readU16( data, cur + off )
   off += offInc
   ret = ( "vrfId:%d vlanId:%d srcAddr:%s dstAddr:%s ipProtocol:%d srcPort:%d "
           "dstPort:%d" ) % ( vrfId, vlanId, srcAddr, dstAddr, ipProtocolNumber,
           srcPort, dstPort )
   return ( ret, off )

def readHoIpFlowKey( data, cur ):
   ( ipFlowKey, off ) = readIpFlowKey( data, cur )
   ( intf, offInc ) = readString( data, cur + off )
   off += offInc
   ret = "%s intf:%s" % ( ipFlowKey, intf )
   return ( ret, off )
         
percentFormatMap = {
   # The sockaddrUnInet is changed to a string.  So %A and %M need to be
   # changed to %s.
   "A": "%s",
   "M": "%s",

   # Memory addresses are treated as numbers.  So %p needs to be changed to
   # a recognized format.
   "p": "%#x",

   # Preserve %%
   "%": "%%",
}
BEGIN = ""
PERCENT = "%"
PERCENT_HASH = "%#"

def replaceFormatSpecifiers( msg ):
   """
   DFA to replace certain format specifiers in the input

   It replaces entries starting with "%" (not preceded by another "%") based
   on percentFormatMap.

   It also replaces, specifically, "%#A" with "%s", to maintain backward
   compatibility.

   Examples:
     %%d (no change)
     %d -> %s
     %p -> %#x
     %%#A (no change)
     %#A -> %s
   """
   out = ""
   state = BEGIN
   for c in msg:
      val = c
      if state is BEGIN:
         if c == "%":
            state = PERCENT
            continue
      elif state is PERCENT:
         if c == "#":
            state = PERCENT_HASH
            continue
         else:
            state = BEGIN
            val = percentFormatMap.get( c, "%" + c )
      elif state is PERCENT_HASH:
         state = BEGIN
         if c == "A":
            # "%#A"
            val = "%s"
         else:
            val = "%#" + c
      out += val
   return out


TrailerSize = 256
Msg = namedtuple( 'Msg','tsc filename line msg format' )
class QtReader(object):
   readers = {
      'u': readU8,
      's': readU16,
      'i': readU32,
      'q': readU64,
      'c': readChar,
      'p': readString,
      'b': readBool,
      'f': readFloat,
      'd': readDouble,
      'I': readIp,
      'E': readEth,
      'P': readIpPrefix,
      'I6': readIp6Addr,
      'P6': readIp6Prefix,
      'IG': readIpGenAddr,
      'PG': readIpGenPrefix,
      'IGM': readIpGenAddrWithFullMask,
      'A': readAfAddr,
      'IP': readIpAndPort,
      'C': readConnTuple,
      'ES': readEthSegment,
      'EL': readEvpnLabel,
      'EL2old': readEvpnL2AttrsWrongEnums,
      'EL2': readEvpnL2Attrs,
      'MK': readMRouteKey,
      'MFK': readMfibRouteKey,
      'MFGK': readMfibGenRouteKey,
      'MF': readMRouteFlags,
      'MIR': readMRouteIntfRptJoinState,
      'MIJ': readMRouteIntfJoinState,
      'ML': readMplsLabel,
      'LO': readMplsLabelOperation,
      'TK': readTunnelKey,
      'TKG': readTunnelKeyGen,
      'BRK': readBgpRouteKey,
      'BMPRK': readBmpRouteKey,
      'AFI': readAfi,
      'SAFI': readSafi,
      'NLRI': readNlriType,
      'RIBINT': readAdjRibinType,
      'BNH': readBgpGenNextHop,
      'AM4': readIpAddrWithMask,
      'AM6': readIp6AddrWithMask,
      'PW': readPseudowireKey,
      'US': readUnsignedSequence,
      'GR': readGenRange,
      'F': readFapmask,
      'EC': readEthColonSeparated,
      'PGKR' : readIpGenUcastKeyRef,
      'DMBK' : readDuidMacBindingKey,
      'BP' : readBfdPeer,
      'GFID' : readGlobalFecId,
      'EM': readEthMask,
      'CV': readCspfVertex,
      'FM' : readMplsFwdEqvClass,
      'IS': readIsisSystemId,
      'IDS': readIdSet,
      'BPK' : readBgpPeerKey,
      'VIDP': readVrfIdPair,
      'RRRP': readRoutingProtocol,
      'RRRK': readRibRouteKey,
      'AF': readAf,
      'VLAFP': readAfProto,
      'VLAFPV': readAfProtoVrfId,
      'LVK': readLfibViaKey,
      'MRTR': readMplsRouterId,
      'IGPN': readIgpNodeId,
      'RT': readRouteTarget,
      'RTMRK' : readRtMembershipRouteKey,
      'UDF' : readUdfDesc,
      'IE' : readIntfEncap,
      'HIFK' : readHoIpFlowKey,
      'IFK' : readIpFlowKey,
   }

   def __init__( self, fp ):
      self.msgIdExceededTotalCounters = None
      if fp.name.endswith( '.gz' ):
         with gzip.open( fp.name ) as f:
            self.data = f.read()
      else:
         self.data = fp.read()

      self.filename = os.path.basename(fp.name)
      fp.close()
      # self.sz is the total size allocated to the circular buffer
      self.version = readU32( self.data, 0 )[0]

      self.hasSelf = self.version >= 4
      self.msgCounterSize = 16 # count(4) + tscHi(4) + tscCount(8)
      if self.hasSelf:
         # count(4) + tscHi(4) + tscCount(8) + tscSelfCount(8)
         self.msgCounterSize = 24

      self.sz = readU32( self.data, 4 )[0]
      self.fileHeaderSize = readU32( self.data, 8 )[0]
      self.fileTrailerSize = readU32( self.data, 12 )[0]
      self.firstMsgOffset = readU32( self.data, 16 )[0]
      self.logCount = readU32( self.data, 20 )[0]
      self.tsc0 = readU64( self.data, 24 )[0]
      self.monotime0 = readDouble( self.data, 32 )[0]
      self.tsc1 = readU64( self.data, 40 )[0]
      self.monotime1 = readDouble( self.data, 48 )[0]
      self.utc1 = readDouble( self.data, 56 )[0]
      if self.version == 1:
         self.logSize = readU32( self.data, 64 )[0]
      elif self.version >= 2:
         self.logSizes = readSizeSpec( self.data, 64 )[0]
      self.ticksPerSecond = self._ticksPerSecond()
      self.numMsgCounters = \
         ( self.fileHeaderSize - self.firstMsgOffset ) // self.msgCounterSize

      # Directory used to hold message descriptors indexed by MsgId
      self.msgs = {}
      # If this string is non-empty we have encountered corruption.
      # Raise an exception with this message after emitting all the traces.
      self.corruptionMessage = ""
      self.caughtExceptions = ""
      # Warn the user if they are trying to parse a file format not supported
      # by this version of qtcat
      if self.version > mostRecentVersionSupported:
         warning = ( "The file you are trying to read is version {version}."
                     " This version of qtcat only supports up to file version"
                     " {supported}, so the output may be incorrect. Please use"
                     " a newer version of qtcat to ensure correct output.\n\n" )
         sys.stderr.write( warning.format( version=self.version,
                                           supported=mostRecentVersionSupported ) )

   def printHeader( self ):
      print( "Version:", self.version )
      print( "Size:", self.sz )
      print( "Header Size:", self.fileHeaderSize )
      print( "Trailer Size:", self.fileTrailerSize )
      print( "Fist Message Offset:", self.firstMsgOffset )
      print( "Profiling counters:", self.numMsgCounters )
      print( "Log Count:", self.logCount )
      print( "tsc0:", self.tsc0 )
      print( "monotime0:", self.monotime0 )
      print( "tsc1:", self.tsc1 )
      print( "monotime1:", self.monotime1 )
      print( "utc1: ", self.utc1 )
      if self.version == 1:
         print( "Log Size:", self.logSize )
      elif self.version >= 2:
         print( "Log Sizes:", self.logSizes )
      else:
         print( "Log Size: (unhandled header version: %u)" % self.version )

   def dumpProfiling( self, verbose, withTsc, brief, parsable, json, selfProfiling ):
      # must match definition in qtparse      
      def addJsonProfilingInfo(qtfile, filename, line, msg, count, avg, total):
         #pylint: disable-msg=E0602
         global JSON
         JSON += [ {
            "qt": qtfile,
            "file": filename,
            "line": line,
            "msg": msg,
            "count": count,
            "average": (avg[:-1].strip() if avg != '-' else 'None'),
            "total": (total[:-1].strip() if total != '-' else 'None')
         } ]
      def int2hr(num, verbose=False):
         if verbose:
            return (num, "")
         elif num < 10000:
            return (num, "x")
         elif num < 10000000:
            return (num // 1000, "Kx")
         elif num < 10000000000:
            return (num // 1000000, "Mx")
         elif num < 10000000000000:
            return (num // 1000000000, "Gx")
         elif num < 10000000000000000:
            return (num // 1000000000000, "Tx")
         else:
            return (num // 1000000000000000, "Px")
      data = self.data
      cur = self.firstMsgOffset
      i = 0
      tps = self.ticksPerSecond
      msgCount = (self.fileHeaderSize - self.firstMsgOffset) // self.msgCounterSize

      tscFmt = "%21s " if withTsc else "%s"
      selfProfilingFmt = ( tscFmt + "%14s %14s " ) if selfProfiling else "%s%s%s"
      if parsable:
         headerFmt = "%9s %10s %26s " + tscFmt + "%14s %12s " + selfProfilingFmt + \
                     "%s:%s \"%s\""
         fmt = "%4s%s%4s %10s %10s %15s " + tscFmt + "%14s %12s " + \
               selfProfilingFmt + "%s:%s \"%s\""
      else:
         headerFmt = "%9s %8s %26s " + tscFmt + "%14s %13s " + \
                     selfProfilingFmt + "%s:%s \"%s\""
         fmt = "%4s%s%4s %8s %10s %15s " + tscFmt + "%14s %13s " + \
               selfProfilingFmt + "%s:%s \"%s\""

      avgSelfTime = ""
      totalSelfTime = ""
      tscSelf = ""
      if selfProfiling:
         avgSelfTime = "avgSelfTime"
         totalSelfTime = "totalSelfTime"
         tscSelf = "tscSelf" if withTsc else ""

      if not json:
         print( headerFmt %
                ( "msgCount", "count", "time", "tsc" if withTsc else "",
                  "avgTime", "totalTime", tscSelf, avgSelfTime, totalSelfTime,
                  "filename", "line", "msg" ) )

      while cur <= self.fileHeaderSize - self.msgCounterSize:
         count = readU32( data, cur )[ 0 ]
         tscHiEn = readU32( data, cur + 4 )[ 0 ]
         off = bool( tscHiEn & 0x80000000 )
         tscHi = tscHiEn & 0x7fffffff
         dateStr, timeStr = self.ticksToTimeStr( tscHi << 28 )
         tscCount = readU64( data, cur + 8 )[ 0 ]
         tscSelfCount = readU64( data, cur + 16 )[ 0 ] if self.hasSelf else 0
         multiple = bool(self.msg(i+1*msgCount))
         cur += self.msgCounterSize
         if count and ( i > 0 or multiple ):
            avgTicks = tscCount/float(count)
            avgTimeStr = ("%10.9f" % (avgTicks/tps)) if tscCount else "-"
            totalTimeStr = ("%12.6f" % (tscCount/tps)) if tscCount else "-"
            if tscCount and not parsable:
               avgTimeStr += "s"
               totalTimeStr += "s"
            magt = int2hr( tscCount )

            if tscSelfCount and selfProfiling:
               avgSelfTicks = tscSelfCount / float( count )
               avgSelfTimeStr = ( "%10.9f" % ( avgSelfTicks / tps ) )
               totalSelfTimeStr = ( "%12.6f" % ( tscSelfCount / tps ) )
               if not parsable:
                  avgSelfTimeStr += "s"
                  totalSelfTimeStr += "s"
               magtSelf = int2hr( tscSelfCount )
            elif selfProfiling:
               avgSelfTimeStr = "-"
               totalSelfTimeStr = "-"
            else:
               avgSelfTimeStr = ""
               totalSelfTimeStr = ""

            tscString = ""
            tscSelfString = ""
            if withTsc:
               tscString = ( "%s%s/%1.2fc" %
                             ( magt[0], magt[1],
                               avgTicks ) ).strip() if tscCount else "-"
               if selfProfiling and tscSelfCount:
                  tscSelfString = ( "%s%s/%1.2fc" %
                                    ( magtSelf[ 0 ], magtSelf[ 1 ],
                                      avgSelfTicks ) ).strip()
               elif selfProfiling:
                  tscSelfString = "-"

            if parsable:
               countStr = ("%10u" % count)
            else:
               countStr = ("%s%s" % int2hr( count ))
            n = 0
            if ( i == 0 ):
               n = 1
               multiple = bool(self.msg(i+2*msgCount))
            while True:
               m = self.msg( i + n * msgCount)
               n += 1
               if m:
                  if brief and not tscCount:
                     continue
                  if json:
                     addJsonProfilingInfo(
                        self.filename,
                        m.filename,
                        m.line,
                        m.msg,
                        count,
                        avgTimeStr,
                        totalTimeStr
                     )
                  else:
                     print( fmt %
                      (i+(n-1)*msgCount,
                       "*" if multiple else " ",
                       "off" if off else "",
                       countStr,
                       "%s"%(dateStr),
                       "%s"%(timeStr),
                       tscString,
                       avgTimeStr,
                       totalTimeStr,
                       tscSelfString,
                       avgSelfTimeStr,
                       totalSelfTimeStr,
                       m.filename,
                       m.line,
                       m.msg) )
               elif n:
                  break
         i += 1

      if not selfProfiling:
         print( "\nUse the '-s' option to include self-profiling columns in "
                "the profiling information" )

   def msg( self, i ):
      assert i > 0
      return self.msgs.get( i, None )

   def readMessages( self ):
      cur = self.sz + self.fileTrailerSize
      msgCount = 0
      tscFileLineRe = '(\d+)\s(\S+)\s(\d+)\s'
      if self.version >= 3:
         # Version 3 file format includes the MsgId in the descriptor
         tscFileLineRe += '(\d+)\s'
      while True:
         # See AID3904 for differences between file versions 2 and 3
         d = self.data[cur:cur+256]
         m = re.match( tscFileLineRe, d )
         if not m:
            break
         msgCount += 1
         cur += len( m.group(0) )
         tsc = int( m.group(1) )
         filename = m.group(2)
         line = int( m.group(3) )
         if self.version >= 3:
            msgId = int( m.group(4) )
         else:
            msgId = msgCount
         if msgId > self.numMsgCounters:
            self.msgIdExceededTotalCounters = msgId
         msgLen = toU32( self.data[cur:cur+4] )
         cur += 4
         msg = self.data[cur:cur+msgLen-1] # minus one to skip null
         cur += msgLen
         formatLen = toU32( self.data[cur:cur+4] )
         fmt = self.data[cur+4 : cur+4+formatLen-1] # minus one to skip null
         cur += formatLen+4
         self.msgs[ msgId ] = Msg( tsc, filename, line, msg, fmt )

   @classmethod
   def reader( cls, key ):
      r = cls.readers.get( key )
      return r
         
   def _msgs( self, offset, level ):
      msgs = []
      data = self.data
      sz = self.sz
      cur = offset
      try:
         while cur < sz:
            m = None
            tsc = toU64( data[ cur : cur + 8] )
            if tsc == 0:
               break
            msgid = toU32( data[cur+8:cur+12] )
            cur += 12
            m = self.msg( msgid )
            # msgid can be 0 sometimes. xxx
            fields = []
            if m.format:
               fieldTypes = m.format.split(",")
               for fieldKey in fieldTypes:
                  r = self.reader( fieldKey )
                  if not r:
                     print( "Failed to find a reader for key", fieldKey )
                     # sys.exit( 1 )
                     fields.append( fieldKey )
                     continue
                  (strep, nBytes) = r( data, cur )
                  cur += nBytes
                  fields.append( strep )
            cur += 1               # skip backpointer
            msgs.append( (tsc, m, fields) )
      except Exception as e:
         # This appends a warning message to the trace output and returns the
         # messages which could be extracted.
         # qtcat will go on to process the rest of the log levels
         # We will raise an exception after processing the entire qt file with the
         # location of the QTRACE statement traced before the corruption

         failedTsc = 0
         prevTrace = ""

         if len( msgs ) > 0:
            # can't trust the current tsc - use the one from the previous trace
            failedTsc = msgs[ -1 ][ 0 ]
            prevTrace = "Level:{0} {1}:{2}, ".format( level,
                                                      msgs[ -1 ][ 1 ].filename,
                                                      msgs[ -1 ][ 1 ].line )
         else:
            prevTrace = " Level:{0} No previous trace, ".format( level )

         currentTrace = ""
         if m:
            # We were able to parse the msg object before the exception
            # Add it to the exception message
            currentTrace = "Trace that was being processed during corruption: "
            currentTrace += "Level:{0} {1}:{2}".format( level, m.filename, m.line )

         warningMsg = 30 * "#" + " Warning: Qt file corruption detected in " + \
                      "level " + str( level ) + " " + 30 * "#"
         m = Msg( failedTsc, "", 0, warningMsg, "" )
         msgs.append( ( failedTsc, m, [] ) )

         warningMsg = "#### There may be lost traces between here and" + \
                      " wrap point, or end of trace level - whichever comes" + \
                      " first ( timestamp is approximated )"
         m = Msg( failedTsc, "", 0, warningMsg, "" )
         msgs.append( ( failedTsc, m, [] ) )

         if self.corruptionMessage == "":
            self.corruptionMessage = "Last traces before corruption: "
         self.corruptionMessage += prevTrace
         if currentTrace:
            self.corruptionMessage += "\n" + currentTrace
         self.caughtExceptions += "\n" + type( e ).__name__ + ": " + str( e )

      return (msgs, cur)

   def _findOldest( self, splitPoint, tailPtr ):
      assert splitPoint > 0
      data = self.data
      cur = tailPtr
      if tailPtr > 0:
         # if tailPtr is 0, then we never wrapped, so skip all this
         while True:
            prevlen = toU8( data[cur-1] )
            nxt = cur - 1 - prevlen
            if nxt <= splitPoint + 8:
               break
            cur = nxt
      return cur

   def logStart( self, i ):
      assert i < self.logCount
      if self.version == 1:
         return self.fileHeaderSize + i * self.logSize
      elif self.version >= 2:
         return self.fileHeaderSize + sum(self.logSizes[ 0 : i ])*1024

   def logEnd( self, i ):
      assert i < self.logCount
      if self.version == 1:
         return self.fileHeaderSize + (i+1) * self.logSize
      elif self.version >= 2:
         return self.fileHeaderSize + sum(self.logSizes[ 0 : i + 1 ])*1024

   def _ticksPerSecond( self ):
      # Figure out how fast the TSC is going in the tracefile by
      # comparing the snapshots of tsc and clock_monotonic taken each
      # time the log wraps.  
      t0 = self.monotime0
      tsc0 = self.tsc0
      t1 = self.monotime1
      tsc1 = self.tsc1
      
      # Now figure out how fast time was going in the tracefile
      tscDelta = tsc1 - tsc0 
      tDelta = t1 - t0
      return float(tscDelta) / tDelta
      
   def ticksToTimeStr( self, ticks ):
      # If no timestamp was recorded, do not display an invalid timestamp.
      if not ticks:
         return "-", "-"
      tps = self.ticksPerSecond
      tsc1 = self.tsc1
      utc1 = self.utc1
      secondsAgo = ( tsc1 - ticks ) / tps
      timeUtc = utc1 - secondsAgo
      dateAndTime = str( datetime.fromtimestamp( timeUtc ) ).split()
      return dateAndTime[ 0 ], dateAndTime[ 1 ]

   def messages( self, level=0 ):
      """ Return a generator of tuples corresponding to traces, as follows:
      (localtime of trace, level, ns accuracy timer, tsc, msg)
      """
      # Message header is:
      #   tail pointer U32
      #   old monotonic time double
      #   old rdtsc U64(8)
      #   new monotonic time double
      data = self.data
      logStart = self.logStart( level )
      if logStart > len( data ):
         pdb()

      tailPtr = toU32( data[ logStart : logStart + 4] )

      # Now figure out how fast the TSC is going in the tracefile by
      # comparing the snapshots of tsc and clock_monotonic taken each
      # time the log wraps.  
      t0 = self.monotime0
      tsc0 = self.tsc0
      t1 = self.monotime1
      tsc1 = self.tsc1
      utc1 = self.utc1
      
      # xxx I need to handle the no-wrap case.

      # Now figure out how fast time was going in the tracefile
      tscDelta = tsc1 - tsc0 
      tDelta = t1 - t0
      if tDelta == 0:
         print( "nothing at level", level )
         return
      ticksPerSecond = tscDelta / tDelta
      utc0 = utc1 - tDelta

      # Now we need to read the newest half of the log first, and
      # figure out where it ends
      ( m1, end1 ) = self._msgs( logStart + 4, level ) # skip tailPtr
      end2 = self.logEnd( level )
      
      if (end1 < end2 - TrailerSize) and tailPtr:
         # Now walk backwards from the end of the log until we hit
         # where we just stopped
         oldest = self._findOldest( end1, tailPtr + logStart )

         # this is the oldest part of the log.  Read messages from here
         ( m2, end2 ) = self._msgs( oldest, level )
      else:
         m2 = []

      # Now print out all the log messages, oldest-to-newest.  The log
      # stores the time in clock_monotonic, but print it in localtime
      # as well.
      firstWrap = False
      firstMessage = True
      dashes = "--------------------------------"
      for m in (m2, m1):
         for (tsc, msg, fields) in m:
            fields = tuple(fields)
            t = tsc / ticksPerSecond
            timeSinceWrap = (tsc - tsc0) / ticksPerSecond
            when = utc0 + timeSinceWrap
            #deltaTicks = tsc - lastTsc
            #lastTsc = tsc
            #timeStr = time.strftime( "%F %T", time.localtime(when) )
            #print "%2s %s (%10.9f, %s): \"%s\"" % \
            #    (timeStr,level,t,("+%s" % deltaTicks) if lastTsc else "-",mstr)
            if firstMessage or firstWrap:
               if firstWrap:
                  mstr = "%s %d wrapped here %s" % (dashes, level, dashes)
               else:
                  mstr = "%s level %s first msg %s" % (dashes, level, dashes)
               yield (datetime.fromtimestamp(when), level, t, tsc, mstr, msg )
               firstMessage = False
               firstWrap = False
            
            msg_tmp = replaceFormatSpecifiers(msg.msg)

            try:
               mstr = msg_tmp % fields
            except ( ValueError, TypeError ):
               mstr = "%s %% (%s) " % (msg_tmp, ",".join( [str(i) for i in fields] ))
            yield (datetime.fromtimestamp(when), level, t, tsc, mstr, msg)
         if m2:
            firstWrap = True


def info(type_, value, tb):
   import traceback
   traceback.print_exception(type_, value, tb)
   # Uncomment below lines to enter pdb on error
   #import pdb as pdb_
   #pdb_.pm()

def expandRange( levelStr ):
   """Expand a range string of single-digit numbers, with optional
   dashes, to a list of integers in the same order they were
   presented.  So '92-583' expands to [9,2,3,4,5,8,3].  Duplicates are
   not removed.  This is used to parse the 'level' argument."""
   levels = []
   start = None
   dash = False
   for i in levelStr:
      if i in "0123456789":
         i = int(i)
         if dash:
            for i in range(start, i+1):
               levels.append(i)
            start = None
         else:
            if start is not None:
               levels.append( start )
            start = i
         dash = False
      elif i == "-":
         dash = True
      # Otherwise, we simply ignore the character

   if start is not None:
      levels.append( start )
   return levels

#test = [ "0-9", "02468", "0-59", "", "9", "3-0", "9 8 7 0" ]
#for i in test:
#   print i, expandRange(i)
#sys.exit(0)

def printFile(infile, options):
   what = options.what
   sys.excepthook = info
   qtr = QtReader( infile )
   qtr.readMessages()
   if qtr.msgIdExceededTotalCounters:
      print( "\n\nWARNING: Total msgId exceeded the available counters : %d\n\n"
             % qtr.msgIdExceededTotalCounters )
   if options.header:
      qtr.printHeader()
      exit(0)
   levels = expandRange( options.levels )

   def gencat( sources ):
      for s in sources:
         for i in s:
            yield i


   regexp = options.regexp
   if regexp:
      regexp = re.compile( regexp )
   lastTsc = 0
   t0 = 0
   if options.tsc:
      timestr = '0x%016x'
   else:
      timestr = '%015.9f'
   if options.files:
      fmt = "%s %s " + timestr + ", +%s %s:%s \"%s\""
   else:
      fmt = "%s %s " + timestr + ", +%s \"%s\""
   if not what or 'trace' in what:
      msgs = gencat(( qtr.messages(i) for i in levels ))
      smsgs = sorted( msgs, key=lambda x: x[3] )
      for (localtime, level, t, tsc, mstr, msg) in smsgs:
         if regexp and not regexp.search( mstr )  \
                    and not regexp.search( msg.filename+":"+str(msg.line)):
            continue
         if not t0:
            t0 = t
         if options.tsc:
            tdisplay = tsc
         elif options.absolute:
            tdisplay = t
         else:
            tdisplay = t - t0
         timeStr = str(localtime)
         deltaTicks = tsc - lastTsc
         lastTsc = tsc

         args = ((timeStr, level, tdisplay, deltaTicks,
                      msg.filename, msg.line, mstr) if options.files
                     else (timeStr, level, tdisplay, deltaTicks, mstr))
         print( fmt % args )

   if what and 'profile' in what:
      qtr.dumpProfiling( options.verbose, options.tsc, options.brief,
                         options.parsable, options.json, options.selfProfiling )

   if qtr.corruptionMessage != "":
      raise QtcatCorruptionException( qtr.corruptionMessage + \
                                      "\nExceptions which indicated corruption: " + \
                                      qtr.caughtExceptions )

JSON = []

def printFiles(options, args):
   if not args or args[0] == '-':
      printFile(sys.stdin, options)
      return
   else:
      files = args
      files.sort(reverse=True)
      numFiles = len(files)
      for fileName in files:
         infile = open(fileName)
         if (numFiles > 1):
            if not options.json:
               print( "file: ", fileName )
         printFile(infile, options)
   if options.json:
      import json
      print( json.dumps(JSON, indent=2) )

def main():
   import optparse

   parser = optparse.OptionParser( description="decode a quicktrace file" )
   parser.add_option('-p', '--profile',
                     dest='what', action='append_const', const='profile',
                     help='dump profiling information')
   parser.add_option('-t', '--trace',
                     dest='what', action='append_const', const='trace',
                     help='dump trace log')
   parser.add_option('--tsc',
                     dest='tsc', action='store_true',
                     help='print timestamp counter values')
   parser.add_option('-v', '--verbose', action='store_true',
                     help='print full packet counts')
   parser.add_option('-f', '--files', action='store_true',
                     help='print file and line number for trace statements')
   parser.add_option('-l', '--levels', action='store', type=str,
                     help='list of levels', default="0-9")
   parser.add_option('-r', '--regexp', action='store', type=str,
                     help='regexp to match on' )
   parser.add_option('--brief',
                     dest='brief', action='store_true',
                     help='print profiling information for function names only')
   parser.add_option('--parsable',
                     dest='parsable', action='store_true',
                     help='print profiling information in parsable format')
   parser.add_option('-a', '--absolute', action='store_true',
                     help="(Obsolete) Print absolute MONOTONIC time", default=False)
   parser.add_option('-H', '--header', action='store_true',
                     help="print qt header information and quit", default=False)
   parser.add_option('-j', '--json', action='store_true', dest='json', 
                     help='json format (profiling data only)')
   parser.add_option('-s', '--selfProfiling', action='store_true',
                     help="include self-profiling columns in profiling information",
                     default=False)
   (options, args) = parser.parse_args()

   # -P options forces the profiling output
   if options.json: 
      options.what = 'profile'

   if options.absolute:
      print( "The timestamp value printed using '-a' or '--absolute' option is "
             "not accurate enough for comparing across different qt files. "
             "Instead of '-a', use '--tsc' option." )
      return

   printFiles(options, args)

if __name__ ==  "__main__":
   try:
      main()
   except (IOError) as error:
      if error.errno == errno.EPIPE:
         # Catching IOError here makes 'qtcat ... | head' not generate an
         # ugly backtrace.
         # pylint: disable-msg=W0212
         os._exit(0)
      raise
   except KeyboardInterrupt:
      # pylint: disable-msg=W0212
      os._exit(0)
