This commit is contained in:
insanity 2018-08-12 19:46:46 +09:00
commit f532e6353b
59 changed files with 4086 additions and 0 deletions

15
Gopkg.lock generated Normal file
View File

@ -0,0 +1,15 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/google/gopacket"
packages = ["."]
revision = "11c65f1ca9081dfea43b4f9643f5c155583b73ba"
version = "v1.1.14"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "355ee061c527e073afa8c024489befb54aeaa19e316b5805db73a9fe25c66bd4"
solver-name = "gps-cdcl"
solver-version = 1

34
Gopkg.toml Normal file
View File

@ -0,0 +1,34 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/google/gopacket"
version = "1.1.14"
[prune]
go-tests = true
unused-packages = true

View File

@ -0,0 +1,16 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCollectionItem struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Item string `json:"item,omitempty"`
ItemClass string `json:"itemClass,omitempty"`
MetaItemUnit *MetaItemUnit `json:"metaItemUnit,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCollectionItemMapping struct {
ID json.Number `json:"id,Number,omitempty"`
MetaDisplayItemMapping *MetaDisplayItemMapping `json:"metaDisplayItemMapping,omitempty"`
MetaCollectionItem *MetaCollectionItem `json:"metaCollectionItem,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

14
meta/MetaCrawler.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCrawler struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCrawlerContainer struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCrawlerInputItem struct {
ID json.Number `json:"id,Number,omitempty"`
MetaInputType *MetaInputType `json:"metaInputType,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,16 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCrawlerMapping struct {
ID json.Number `json:"id,Number,omitempty"`
MetaTargetType *MetaTargetType `json:"metaTargetType,Number,omitempty"`
MetaCrawler *MetaCrawler `json:"metaCrawler,omitempty"`
DefaultInterval json.Number `json:"defaultInterval,omitempty"`
IsDefault bool `json:"isDefault,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

98
meta/MetaCryptoType.go Normal file
View File

@ -0,0 +1,98 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaCryptoType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaCryptoTypeEnum int
const (
MetaCryptoTypeEnumNONE MetaCryptoTypeEnum = iota + 1
MetaCryptoTypeEnumUNKNOWN
MetaCryptoTypeEnumAES
MetaCryptoTypeEnumCIPHER
MetaCryptoTypeEnumDES
MetaCryptoTypeEnumDSA
MetaCryptoTypeEnumECDSA
MetaCryptoTypeEnumELLIPTIC
MetaCryptoTypeEnumHMAC
MetaCryptoTypeEnumMD5
MetaCryptoTypeEnumRAND
MetaCryptoTypeEnumRC4
MetaCryptoTypeEnumRSA
MetaCryptoTypeEnumSHA1
MetaCryptoTypeEnumSHA256
MetaCryptoTypeEnumSUBTLE
MetaCryptoTypeEnumTLS
MetaCryptoTypeEnumX509
MetaCryptoTypeEnumPKIX
)
var (
metaCryptoTypeEnumID = map[MetaCryptoTypeEnum]string{
MetaCryptoTypeEnumNONE: "NONE",
MetaCryptoTypeEnumUNKNOWN: "UNKNOWN",
MetaCryptoTypeEnumAES: "AES",
MetaCryptoTypeEnumCIPHER: "CIPHER",
MetaCryptoTypeEnumDES: "DES",
MetaCryptoTypeEnumDSA: "DSA",
MetaCryptoTypeEnumECDSA: "ECDSA",
MetaCryptoTypeEnumELLIPTIC: "ELLIPTIC",
MetaCryptoTypeEnumHMAC: "HMAC",
MetaCryptoTypeEnumMD5: "MD5",
MetaCryptoTypeEnumRAND: "RAND",
MetaCryptoTypeEnumRC4: "RC4",
MetaCryptoTypeEnumRSA: "RSA",
MetaCryptoTypeEnumSHA1: "SHA1",
MetaCryptoTypeEnumSHA256: "SHA256",
MetaCryptoTypeEnumSUBTLE: "SUBTLE",
MetaCryptoTypeEnumTLS: "TLS",
MetaCryptoTypeEnumX509: "X509",
MetaCryptoTypeEnumPKIX: "PKIX",
}
metaCryptoTypeEnumKey = map[string]MetaCryptoTypeEnum{
"NONE": MetaCryptoTypeEnumNONE,
"UNKNOWN": MetaCryptoTypeEnumUNKNOWN,
"AES": MetaCryptoTypeEnumAES,
"CIPHER": MetaCryptoTypeEnumCIPHER,
"DES": MetaCryptoTypeEnumDES,
"DSA": MetaCryptoTypeEnumDSA,
"ECDSA": MetaCryptoTypeEnumECDSA,
"ELLIPTIC": MetaCryptoTypeEnumELLIPTIC,
"HMAC": MetaCryptoTypeEnumHMAC,
"MD5": MetaCryptoTypeEnumMD5,
"RAND": MetaCryptoTypeEnumRAND,
"RC4": MetaCryptoTypeEnumRC4,
"RSA": MetaCryptoTypeEnumRSA,
"SHA1": MetaCryptoTypeEnumSHA1,
"SHA256": MetaCryptoTypeEnumSHA256,
"SUBTLE": MetaCryptoTypeEnumSUBTLE,
"TLS": MetaCryptoTypeEnumTLS,
"X509": MetaCryptoTypeEnumX509,
"PKIX": MetaCryptoTypeEnumPKIX,
}
)
func (e MetaCryptoTypeEnum) String() string {
return metaCryptoTypeEnumID[e]
}
func ToMetaCryptoTypeEnum(v *MetaCryptoType) MetaCryptoTypeEnum {
return metaCryptoTypeEnumKey[v.Key]
}
func ToMetaCryptoType(v MetaCryptoTypeEnum) *MetaCryptoType {
return &MetaCryptoType{
Key: metaCryptoTypeEnumID[v],
}
}

14
meta/MetaDisplayItem.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaDisplayItem struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaDisplayItemCategory struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,20 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaDisplayItemMapping struct {
ID json.Number `json:"id,Number,omitempty"`
MetaDisplayItem *MetaDisplayItem `json:"metaDisplayItem,omitempty"`
MetaCrawlerMapping *MetaCrawlerMapping `json:"metaCrawlerMapping,omitempty"`
MetaItemUnit *MetaItemUnit `json:"metaItemUnit,omitempty"`
MetaDisplayItemCategory *MetaDisplayItemCategory `json:"metaDisplayItemCategory,omitempty"`
IsDefault bool `json:"isDefault,omitempty"`
Formula string `json:"formula,omitempty"`
Priority json.Number `json:"priority,omitempty"`
IsRequired bool `json:"isRequired,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

14
meta/MetaHistoryType.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaHistoryType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

47
meta/MetaIPType.go Normal file
View File

@ -0,0 +1,47 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaIPType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaIPTypeEnum int
const (
MetaIPTypeEnumV4 MetaIPTypeEnum = iota + 1
MetaIPTypeEnumV6
)
var (
metaIPTypeEnumID = map[MetaIPTypeEnum]string{
MetaIPTypeEnumV4: "V4",
MetaIPTypeEnumV6: "V6",
}
metaIPTypeEnumKey = map[string]MetaIPTypeEnum{
"V4": MetaIPTypeEnumV4,
"V6": MetaIPTypeEnumV6,
}
)
func (e MetaIPTypeEnum) String() string {
return metaIPTypeEnumID[e]
}
func ToMetaIPTypeEnum(v *MetaIPType) MetaIPTypeEnum {
return metaIPTypeEnumKey[v.Key]
}
func ToMetaIPType(v MetaIPTypeEnum) *MetaIPType {
return &MetaIPType{
Key: metaIPTypeEnumID[v],
}
}

50
meta/MetaInfraType.go Normal file
View File

@ -0,0 +1,50 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaInfraType struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaInfraTypeEnum int
const (
MetaInfraTypeEnumZONE MetaInfraTypeEnum = iota + 1
MetaInfraTypeEnumHOST
MetaInfraTypeEnumService
)
var (
metaInfraTypeEnumID = map[MetaInfraTypeEnum]string{
MetaInfraTypeEnumZONE: "ZONE",
MetaInfraTypeEnumHOST: "HOST",
MetaInfraTypeEnumService: "SERVICE",
}
metaInfraTypeEnumKey = map[string]MetaInfraTypeEnum{
"ZONE": MetaInfraTypeEnumZONE,
"HOST": MetaInfraTypeEnumHOST,
"SERVICE": MetaInfraTypeEnumService,
}
)
func (e MetaInfraTypeEnum) String() string {
return metaInfraTypeEnumID[e]
}
func ToMetaInfraTypeEnum(v *MetaInfraType) MetaInfraTypeEnum {
return metaInfraTypeEnumKey[v.Key]
}
func ToMetaInfraType(v MetaInfraTypeEnum) *MetaInfraType {
return &MetaInfraType{
Key: metaInfraTypeEnumID[v],
}
}

14
meta/MetaInputType.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaInputType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

15
meta/MetaItemUnit.go Normal file
View File

@ -0,0 +1,15 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaItemUnit struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Unit string `json:"unit,omitempty"`
Mark string `json:"mark,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

53
meta/MetaMemberStatus.go Normal file
View File

@ -0,0 +1,53 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaMemberStatus struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaMemberStatusEnum int
const (
MetaMemberStatusEnumNOAUTH MetaMemberStatusEnum = iota + 1
MetaMemberStatusEnumNORMAL
MetaMemberStatusEnumDORMANCY
MetaMemberStatusEnumWITHDRAWAL
)
var (
metaMemberStatusEnumID = map[MetaMemberStatusEnum]string{
MetaMemberStatusEnumNOAUTH: "NOAUTH",
MetaMemberStatusEnumNORMAL: "NORMAL",
MetaMemberStatusEnumDORMANCY: "DORMANCY",
MetaMemberStatusEnumWITHDRAWAL: "WITHDRAWAL",
}
metaMemberStatusEnumKey = map[string]MetaMemberStatusEnum{
"NOAUTH": MetaMemberStatusEnumNOAUTH,
"NORMAL": MetaMemberStatusEnumNORMAL,
"DORMANCY": MetaMemberStatusEnumDORMANCY,
"WITHDRAWAL": MetaMemberStatusEnumWITHDRAWAL,
}
)
func (e MetaMemberStatusEnum) String() string {
return metaMemberStatusEnumID[e]
}
func ToMetaMemberStatusEnum(v *MetaMemberStatus) MetaMemberStatusEnum {
return metaMemberStatusEnumKey[v.Key]
}
func ToMetaMemberStatus(v MetaMemberStatusEnum) *MetaMemberStatus {
return &MetaMemberStatus{
Key: metaMemberStatusEnumID[v],
}
}

View File

@ -0,0 +1,50 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaNoAuthProbeStatus struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaNoAuthProbeStatusEnum int
const (
MetaNoAuthProbeStatusEnumACCEPT MetaNoAuthProbeStatusEnum = iota + 1
MetaNoAuthProbeStatusEnumDENY
MetaNoAuthProbeStatusEnumPROCESSING
)
var (
metaNoAuthProbeStatusEnumID = map[MetaNoAuthProbeStatusEnum]string{
MetaNoAuthProbeStatusEnumACCEPT: "ACCEPT",
MetaNoAuthProbeStatusEnumDENY: "DENY",
MetaNoAuthProbeStatusEnumPROCESSING: "PROCESSING",
}
metaNoAuthProbeStatusEnumKey = map[string]MetaNoAuthProbeStatusEnum{
"ACCEPT": MetaNoAuthProbeStatusEnumACCEPT,
"DENY": MetaNoAuthProbeStatusEnumDENY,
"PROCESSING": MetaNoAuthProbeStatusEnumPROCESSING,
}
)
func (e MetaNoAuthProbeStatusEnum) String() string {
return metaNoAuthProbeStatusEnumID[e]
}
func ToMetaNoAuthProbeStatusEnum(v *MetaNoAuthProbeStatus) MetaNoAuthProbeStatusEnum {
return metaNoAuthProbeStatusEnumKey[v.Key]
}
func ToMetaNoAuthProbeStatus(v MetaNoAuthProbeStatusEnum) *MetaNoAuthProbeStatus {
return &MetaNoAuthProbeStatus{
Key: metaNoAuthProbeStatusEnumID[v],
}
}

47
meta/MetaPortType.go Normal file
View File

@ -0,0 +1,47 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaPortType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaPortTypeEnum int
const (
MetaPortTypeEnumTCP MetaPortTypeEnum = iota + 1
MetaPortTypeEnumUDP
)
var (
metaPortTypeEnumID = map[MetaPortTypeEnum]string{
MetaPortTypeEnumTCP: "TCP",
MetaPortTypeEnumUDP: "UDP",
}
metaPortTypeEnumKey = map[string]MetaPortTypeEnum{
"TCP": MetaPortTypeEnumTCP,
"UDP": MetaPortTypeEnumUDP,
}
)
func (e MetaPortTypeEnum) String() string {
return metaPortTypeEnumID[e]
}
func ToMetaPortTypeEnum(v *MetaPortType) MetaPortTypeEnum {
return metaPortTypeEnumKey[v.Key]
}
func ToMetaPortType(v MetaPortTypeEnum) *MetaPortType {
return &MetaPortType{
Key: metaPortTypeEnumID[v],
}
}

47
meta/MetaProbeStatus.go Normal file
View File

@ -0,0 +1,47 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaProbeStatus struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}
type MetaProbeStatusEnum int
const (
MetaProbeStatusEnumINITIAL MetaProbeStatusEnum = iota + 1
MetaProbeStatusEnumNORMAL
)
var (
metaProbeStatusEnumID = map[MetaProbeStatusEnum]string{
MetaProbeStatusEnumINITIAL: "INITIAL",
MetaProbeStatusEnumNORMAL: "NORMAL",
}
metaProbeStatusEnumKey = map[string]MetaProbeStatusEnum{
"INITIAL": MetaProbeStatusEnumINITIAL,
"NORMAL": MetaProbeStatusEnumNORMAL,
}
)
func (e MetaProbeStatusEnum) String() string {
return metaProbeStatusEnumID[e]
}
func ToMetaProbeStatusEnum(v *MetaProbeStatus) MetaProbeStatusEnum {
return metaProbeStatusEnumKey[v.Key]
}
func ToMetaProbeStatus(v MetaProbeStatusEnum) *MetaProbeStatus {
return &MetaProbeStatus{
Key: metaProbeStatusEnumID[v],
}
}

View File

@ -0,0 +1,18 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorDisplayItem struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
IsDefault bool `json:"isDefault,omitempty"`
MetaCrawler *MetaCrawler `json:"metaCrawler,omitempty"`
MetaSensorItemUnit *MetaSensorItemUnit `json:"metaSensorItemUnit,omitempty"`
MetaSensorItemType *MetaSensorItemType `json:"metaSensorItemType,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

14
meta/MetaSensorItem.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorItem struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

19
meta/MetaSensorItemKey.go Normal file
View File

@ -0,0 +1,19 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorItemKey struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
Froms string `json:"froms,omitempty"`
Option string `json:"option,omitempty"`
MetaSensorItem *MetaSensorItem `json:"metaSensorItem,omitempty"`
MetaCrawler *MetaCrawler `json:"metaCrawler,omitempty"`
MetaSensorItemUnit *MetaSensorItemUnit `json:"metaSensorItemUnit,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorItemType struct {
ID json.Number `json:"id,Number,omitempty"`
Name string `json:"name,omitempty"`
Key string `json:"key,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,15 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorItemUnit struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Unit string `json:"unit,omitempty"`
Mark string `json:"mark,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

14
meta/MetaSensorStatus.go Normal file
View File

@ -0,0 +1,14 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaSensorStatus struct {
ID json.Number `json:"id,Number,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,36 @@
package meta
type MetaTargetHostType struct {
MetaTargetType
}
type MetaTargetHostTypeEnum int
const (
MetaTargetHostTypeEnumUNKNOWN MetaTargetHostTypeEnum = iota + 1
)
var (
metaTargetHostTypeEnumID = map[MetaTargetHostTypeEnum]string{
MetaTargetHostTypeEnumUNKNOWN: "UNKNOWN",
}
metaTargetHostTypeEnumKey = map[string]MetaTargetHostTypeEnum{
"UNKNOWN": MetaTargetHostTypeEnumUNKNOWN,
}
)
func (e MetaTargetHostTypeEnum) String() string {
return metaTargetHostTypeEnumID[e]
}
func ToMetaTargetHostTypeEnum(v *MetaTargetHostType) MetaTargetHostTypeEnum {
return metaTargetHostTypeEnumKey[v.Key]
}
func ToMetaTargetHostType(v MetaTargetHostTypeEnum) *MetaTargetHostType {
m := &MetaTargetHostType{}
m.Key = metaTargetHostTypeEnumID[v]
return m
}

View File

@ -0,0 +1,36 @@
package meta
type MetaTargetServiceType struct {
MetaTargetType
}
type MetaTargetServiceTypeEnum int
const (
MetaTargetServiceTypeEnumUNKNOWN MetaTargetServiceTypeEnum = iota + 1
)
var (
metaTargetServiceTypeEnumID = map[MetaTargetServiceTypeEnum]string{
MetaTargetServiceTypeEnumUNKNOWN: "UNKNOWN",
}
metaTargetServiceTypeEnumKey = map[string]MetaTargetServiceTypeEnum{
"UNKNOWN": MetaTargetServiceTypeEnumUNKNOWN,
}
)
func (e MetaTargetServiceTypeEnum) String() string {
return metaTargetServiceTypeEnumID[e]
}
func ToMetaTargetServiceTypeEnum(v *MetaTargetServiceType) MetaTargetServiceTypeEnum {
return metaTargetServiceTypeEnumKey[v.Key]
}
func ToMetaTargetServiceType(v MetaTargetServiceTypeEnum) *MetaTargetServiceType {
m := &MetaTargetServiceType{}
m.Key = metaTargetServiceTypeEnumID[v]
return m
}

16
meta/MetaTargetType.go Normal file
View File

@ -0,0 +1,16 @@
package meta
import (
"encoding/json"
"git.loafle.net/overflow/model/util"
)
type MetaTargetType struct {
ID json.Number `json:"id,Number,omitempty"`
MetaInfraType *MetaInfraType `json:"metaInfraType,omitempty"`
Key string `json:"key,omitempty"`
Name string `json:"name,omitempty"`
IsSupported bool `json:"isSupported,omitempty"`
CreateDate *util.Timestamp `json:"createDate,omitempty"`
}

View File

@ -0,0 +1,39 @@
package meta
type MetaTargetZoneType struct {
MetaTargetType
}
type MetaTargetZoneTypeEnum int
const (
MetaTargetZoneTypeEnumUNKNOWN MetaTargetZoneTypeEnum = iota + 1
MetaTargetZoneTypeEnumZONE
)
var (
metaTargetZoneTypeEnumID = map[MetaTargetZoneTypeEnum]string{
MetaTargetZoneTypeEnumUNKNOWN: "UNKNOWN",
MetaTargetZoneTypeEnumZONE: "ZONE",
}
metaTargetZoneTypeEnumKey = map[string]MetaTargetZoneTypeEnum{
"UNKNOWN": MetaTargetZoneTypeEnumUNKNOWN,
"ZONE": MetaTargetZoneTypeEnumZONE,
}
)
func (e MetaTargetZoneTypeEnum) String() string {
return metaTargetZoneTypeEnumID[e]
}
func ToMetaTargetZoneTypeEnum(v *MetaTargetZoneType) MetaTargetZoneTypeEnum {
return metaTargetZoneTypeEnumKey[v.Key]
}
func ToMetaTargetZoneType(v MetaTargetZoneTypeEnum) *MetaTargetZoneType {
m := &MetaTargetZoneType{}
m.Key = metaTargetZoneTypeEnumID[v]
return m
}

16
scan/DiscoverHost.go Normal file
View File

@ -0,0 +1,16 @@
package discovery
import (
"git.loafle.net/overflow/model/meta"
)
type DiscoverHost struct {
MetaIPType *meta.MetaIPType `json:"metaIPType,omitempty"`
FirstScanRange string `json:"firstScanRange,omitempty"`
LastScanRange string `json:"lastScanRange,omitempty"`
ExcludeHosts []string `json:"excludeHosts,omitempty"`
IncludeHosts []string `json:"includeHosts,omitempty"`
DiscoverPort *DiscoverPort `json:"discoverPort,omitempty"`
}

28
scan/DiscoverPort.go Normal file
View File

@ -0,0 +1,28 @@
package discovery
type DiscoverPort struct {
FirstScanRange int `json:"firstScanRange,omitempty"`
LastScanRange int `json:"lastScanRange,omitempty"`
ExcludePorts []int `json:"excludePorts,omitempty"`
IncludeTCP bool `json:"includeTCP,omitempty"`
IncludeUDP bool `json:"includeUDP,omitempty"`
DiscoverService *DiscoverService `json:"discoverService,omitempty"`
}
func (dp *DiscoverPort) Contains(port int) bool {
if dp.FirstScanRange > port {
return false
}
if dp.LastScanRange < port {
return false
}
for _, p := range dp.ExcludePorts {
if p == port {
return false
}
}
return true
}

5
scan/DiscoverService.go Normal file
View File

@ -0,0 +1,5 @@
package discovery
type DiscoverService struct {
IncludeServices []string `json:"includeServices,omitempty"`
}

7
scan/DiscoverZone.go Normal file
View File

@ -0,0 +1,7 @@
package discovery
type DiscoverZone struct {
ExcludePatterns []string `json:"excludePatterns,omitempty"`
DiscoverHost *DiscoverHost `json:"discoverHost,omitempty"`
}

17
scan/Host.go Normal file
View File

@ -0,0 +1,17 @@
package discovery
import (
"git.loafle.net/overflow/model/meta"
"git.loafle.net/overflow/model/util"
)
type Host struct {
MetaIPType *meta.MetaIPType `json:"metaIPType,omitempty"`
Address string `json:"address,omitempty"`
Mac string `json:"mac,omitempty"`
Zone *Zone `json:"zone,omitempty"`
PortList []*Port `json:"portList,omitempty"`
DiscoveredDate *util.Timestamp `json:"discoveredDate,omitempty"`
}

20
scan/Port.go Normal file
View File

@ -0,0 +1,20 @@
package discovery
import (
"encoding/json"
"git.loafle.net/overflow/model/meta"
"git.loafle.net/overflow/model/util"
"github.com/google/gopacket"
)
type Port struct {
MetaPortType *meta.MetaPortType `json:"metaPortType,omitempty"`
PortNumber json.Number `json:"portNumber,omitempty"`
DiscoveredDate *util.Timestamp `json:"discoveredDate,omitempty"`
Host *Host `json:"host,omitempty"`
ServiceList []*Service `json:"serviceList,omitempty"`
UDPLayer gopacket.Layer `json:"-"`
}

16
scan/Service.go Normal file
View File

@ -0,0 +1,16 @@
package discovery
import (
"git.loafle.net/overflow/model/meta"
"git.loafle.net/overflow/model/util"
)
type Service struct {
MetaCryptoType *meta.MetaCryptoType `json:"metaCryptoType,omitempty"`
Key string `json:"key,omitempty"`
Description string `json:"description,omitempty"`
Port *Port `json:"port,omitempty"`
DiscoveredDate *util.Timestamp `json:"discoveredDate,omitempty"`
}

20
scan/Zone.go Normal file
View File

@ -0,0 +1,20 @@
package discovery
import (
"sync"
"git.loafle.net/overflow/model/meta"
"git.loafle.net/overflow/model/util"
)
type Zone struct {
Network string `json:"network,omitempty"`
Iface string `json:"iface,omitempty"`
MetaIPType *meta.MetaIPType `json:"metaIPType,omitempty"`
Address string `json:"address,omitempty"`
Mac string `json:"mac,omitempty"`
DiscoveredDate *util.Timestamp `json:"discoveredDate,omitempty"`
mtx sync.RWMutex `json:"-"`
}

42
util/timestamp.go Normal file
View File

@ -0,0 +1,42 @@
package util
import (
"fmt"
"strconv"
"time"
)
type Timestamp time.Time
func (t Timestamp) MarshalJSON() ([]byte, error) {
ts := time.Time(t).Unix()
stamp := fmt.Sprint(ts * 1000)
return []byte(stamp), nil
}
func (t *Timestamp) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = Timestamp(time.Unix(int64(ts)/1000, 0))
return nil
}
func (t Timestamp) String() string {
return time.Time(t).String()
}
func Now() Timestamp {
return Timestamp(time.Now())
}
func NowPtr() *Timestamp {
n := Now()
return &n
}
func Date(year int, month time.Month, day int) Timestamp {
return Timestamp(time.Date(year, month, day, 0, 0, 0, 0, time.UTC))
}

38
vendor/github.com/google/gopacket/.gitignore generated vendored Normal file
View File

@ -0,0 +1,38 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
#*
*~
# examples binaries
examples/synscan/synscan
examples/pfdump/pfdump
examples/pcapdump/pcapdump
examples/httpassembly/httpassembly
examples/statsassembly/statsassembly
examples/arpscan/arpscan
examples/bidirectional/bidirectional
examples/bytediff/bytediff
examples/reassemblydump/reassemblydump
layers/gen
macs/gen
pcap/pcap_tester

7
vendor/github.com/google/gopacket/.travis.gofmt.sh generated vendored Normal file
View File

@ -0,0 +1,7 @@
#!/bin/bash
cd "$(dirname $0)"
if [ -n "$(go fmt ./...)" ]; then
echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
exit 1
fi

25
vendor/github.com/google/gopacket/.travis.golint.sh generated vendored Normal file
View File

@ -0,0 +1,25 @@
#!/bin/bash
cd "$(dirname $0)"
go get github.com/golang/lint/golint
DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing"
# Add subdirectories here as we clean up golint on each.
for subdir in $DIRS; do
pushd $subdir
if golint |
grep -v CannotSetRFMon | # pcap exported error name
grep -v DataLost | # tcpassembly/tcpreader exported error name
grep .; then
exit 1
fi
popd
done
pushd layers
for file in $(cat .linted); do
if golint $file | grep .; then
exit 1
fi
done
popd

10
vendor/github.com/google/gopacket/.travis.govet.sh generated vendored Normal file
View File

@ -0,0 +1,10 @@
#!/bin/bash
cd "$(dirname $0)"
DIRS=". layers pcap pcapgo pfring tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs"
set -e
for subdir in $DIRS; do
pushd $subdir
go vet
popd
done

14
vendor/github.com/google/gopacket/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
install:
- go get github.com/google/gopacket
- go get github.com/google/gopacket/layers
- go get github.com/google/gopacket/tcpassembly
- go get github.com/google/gopacket/reassembly
script:
- go test github.com/google/gopacket
- go test github.com/google/gopacket/layers
- go test github.com/google/gopacket/tcpassembly
- go test github.com/google/gopacket/reassembly
- ./.travis.gofmt.sh
- ./.travis.govet.sh
- ./.travis.golint.sh

46
vendor/github.com/google/gopacket/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,46 @@
AUTHORS AND MAINTAINERS:
MAIN DEVELOPERS:
Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
AUTHORS:
Nigel Tao <nigeltao@google.com>
Cole Mickens <cole.mickens@gmail.com>
Ben Daglish <bdaglish@restorepoint.com>
Luis Martinez <martinezlc99@gmail.com>
Remco Verhoef <remco@dutchcoders.io>
Hiroaki Kawai <Hiroaki.Kawai@gmail.com>
Lukas Lueg <lukas.lueg@gmail.com>
Laurent Hausermann <laurent.hausermann@gmail.com>
Bill Green <bgreen@newrelic.com>
CONTRIBUTORS:
Attila Oláh <attila@attilaolah.eu>
Vittus Mikiassen <matt.miki.vimik@gmail.com>
Matthias Radestock <matthias.radestock@gmail.com>
Matthew Sackman <matthew@wellquite.org>
Loic Prylli <loicp@google.com>
Alexandre Fiori <fiorix@gmail.com>
Adrian Tam <adrian.c.m.tam@gmail.com>
Satoshi Matsumoto <kaorimatz@gmail.com>
David Stainton <dstainton415@gmail.com>
Jesse Ward <jesse@jesseward.com>
Kane Mathers <kane@kanemathers.name>
-----------------------------------------------
FORKED FROM github.com/akrennmair/gopcap
ALL THE FOLLOWING ARE FOR THAT PROJECT
MAIN DEVELOPERS:
Andreas Krennmair <ak@synflood.at>
CONTRIBUTORS:
Andrea Nall <anall@andreanall.com>
Daniel Arndt <danielarndt@gmail.com>
Dustin Sallings <dustin@spy.net>
Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
Guillaume Savary <guillaume@savary.name>
Mark Smith <mark@qq.is>
Miek Gieben <miek@miek.nl>
Mike Bell <mike@mikebell.org>
Trevor Strohman <strohman@google.com>

215
vendor/github.com/google/gopacket/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,215 @@
Contributing To gopacket
========================
So you've got some code and you'd like it to be part of gopacket... wonderful!
We're happy to accept contributions, whether they're fixes to old protocols, new
protocols entirely, or anything else you think would improve the gopacket
library. This document is designed to help you to do just that.
The first section deals with the plumbing: how to actually get a change
submitted.
The second section deals with coding style... Go is great in that it
has a uniform style implemented by 'go fmt', but there's still some decisions
we've made that go above and beyond, and if you follow them, they won't come up
in your code review.
The third section deals with some of the implementation decisions we've made,
which may help you to understand the current code and which we may ask you to
conform to (or provide compelling reasons for ignoring).
Overall, we hope this document will help you to understand our system and write
great code which fits in, and help us to turn around on your code review quickly
so the code can make it into the master branch as quickly as possible.
How To Submit Code
------------------
We use github.com's Pull Request feature to receive code contributions from
external contributors. See
https://help.github.com/articles/creating-a-pull-request/ for details on
how to create a request.
Also, there's a local script `gc` in the base directory of GoPacket that
runs a local set of checks, which should give you relatively high confidence
that your pull won't fail github pull checks.
```sh
go get github.com/google/gopacket
cd $GOROOT/src/pkg/github.com/google/gopacket
git checkout -b <mynewfeature> # create a new branch to work from
... code code code ...
./gc # Run this to do local commits, it performs a number of checks
```
To sum up:
* DO
+ Pull down the latest version.
+ Make a feature-specific branch.
+ Code using the style and methods discussed in the rest of this document.
+ Use the ./gc command to do local commits or check correctness.
+ Push your new feature branch up to github.com, as a pull request.
+ Handle comments and requests from reviewers, pushing new commits up to
your feature branch as problems are addressed.
+ Put interesting comments and discussions into commit comments.
* DON'T
+ Push to someone else's branch without their permission.
Coding Style
------------
* Go code must be run through `go fmt`, `go vet`, and `golint`
* Follow http://golang.org/doc/effective_go.html as much as possible.
+ In particular, http://golang.org/doc/effective_go.html#mixed-caps. Enums
should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs.
TcpSourcePort or TCP_SOURCE_PORT).
* Bonus points for giving enum types a String() field.
* Any exported types or functions should have commentary
(http://golang.org/doc/effective_go.html#commentary)
Coding Methods And Implementation Notes
---------------------------------------
### Error Handling
Many times, you'll be decoding a protocol and run across something bad, a packet
corruption or the like. How do you handle this? First off, ALWAYS report the
error. You can do this either by returning the error from the decode() function
(most common), or if you're up for it you can implement and add an ErrorLayer
through the packet builder (the first method is a simple shortcut that does
exactly this, then stops any future decoding).
Often, you'll already have decode some part of your protocol by the time you hit
your error. Use your own discretion to determine whether the stuff you've
already decoded should be returned to the caller or not:
```go
func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
prot := &MyProtocol{}
if len(data) < 10 {
// This error occurred before we did ANYTHING, so there's nothing in my
// protocol that the caller could possibly want. Just return the error.
return fmt.Errorf("Length %d less than 10", len(data))
}
prot.ImportantField1 = data[:5]
prot.ImportantField2 = data[5:10]
// At this point, we've already got enough information in 'prot' to
// warrant returning it to the caller, so we'll add it now.
p.AddLayer(prot)
if len(data) < 15 {
// We encountered an error later in the packet, but the caller already
// has the important info we've gleaned so far.
return fmt.Errorf("Length %d less than 15", len(data))
}
prot.ImportantField3 = data[10:15]
return nil // We've already added the layer, we can just return success.
}
```
In general, our code follows the approach of returning the first error it
encounters. In general, we don't trust any bytes after the first error we see.
### What Is A Layer?
The definition of a layer is up to the discretion of the coder. It should be
something important enough that it's actually useful to the caller (IE: every
TLV value should probably NOT be a layer). However, it can be more granular
than a single protocol... IPv6 and SCTP both implement many layers to handle the
various parts of the protocol. Use your best judgement, and prepare to defend
your decisions during code review. ;)
### Performance
We strive to make gopacket as fast as possible while still providing lots of
features. In general, this means:
* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize
others on an as-needed basis (tons of MPLS on your network? Time to optimize
MPLS!)
* Use fast operations. See the toplevel benchmark_test for benchmarks of some
of Go's underlying features and types.
* Test your performance changes! You should use the ./gc script's --benchmark
flag to submit any performance-related changes. Use pcap/gopacket_benchmark
to test your change against a PCAP file based on your traffic patterns.
* Don't be TOO hacky. Sometimes, removing an unused struct from a field causes
a huge performance hit, due to the way that Go currently handles its segmented
stack... don't be afraid to clean it up anyway. We'll trust the Go compiler
to get good enough over time to handle this. Also, this type of
compiler-specific optimization is very fragile; someone adding a field to an
entirely different struct elsewhere in the codebase could reverse any gains
you might achieve by aligning your allocations.
* Try to minimize memory allocations. If possible, use []byte to reference
pieces of the input, instead of using string, which requires copying the bytes
into a new memory allocation.
* Think hard about what should be evaluated lazily vs. not. In general, a
layer's struct should almost exactly mirror the layer's frame. Anything
that's more interesting should be a function. This may not always be
possible, but it's a good rule of thumb.
* Don't fear micro-optimizations. With the above in mind, we welcome
micro-optimizations that we think will have positive/neutral impacts on the
majority of workloads. A prime example of this is pre-allocating certain
structs within a larger one:
```go
type MyProtocol struct {
// Most packets have 1-4 of VeryCommon, so we preallocate it here.
initialAllocation [4]uint32
VeryCommon []uint32
}
func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
prot := &MyProtocol{}
prot.VeryCommon = proto.initialAllocation[:0]
for len(data) > 4 {
field := binary.BigEndian.Uint32(data[:4])
data = data[4:]
// Since we're using the underlying initialAllocation, we won't need to
// allocate new memory for the following append unless we more than 16
// bytes of data, which should be the uncommon case.
prot.VeryCommon = append(prot.VeryCommon, field)
}
p.AddLayer(prot)
if len(data) > 0 {
return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data))
}
return nil
}
```
### Slices And Data
If you're pulling a slice from the data you're decoding, don't copy it. Just
use the slice itself.
```go
type MyProtocol struct {
A, B net.IP
}
func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
p.AddLayer(&MyProtocol{
A: data[:4],
B: data[4:8],
})
return nil
}
```
The caller has already agreed, by using this library, that they won't modify the
set of bytes they pass in to the decoder, or the library has already copied the
set of bytes to a read-only location. See DecodeOptions.NoCopy for more
information.
### Enums/Types
If a protocol has an integer field (uint8, uint16, etc) with a couple of known
values that mean something special, make it a type. This allows us to do really
nice things like adding a String() function to them, so we can more easily
display those to users. Check out layers/enums.go for one example, as well as
layers/icmp.go for layer-specific enums.
When naming things, try for descriptiveness over suscinctness. For example,
choose DNSResponseRecord over DNSRR.

28
vendor/github.com/google/gopacket/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 Google, Inc. All rights reserved.
Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Andreas Krennmair, Google, nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

10
vendor/github.com/google/gopacket/README.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# GoPacket
This library provides packet decoding capabilities for Go.
See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket)
[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket)
Originally forked from the gopcap project written by Andreas
Krennmair <ak@synflood.at> (http://github.com/akrennmair/gopcap).

178
vendor/github.com/google/gopacket/base.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"fmt"
)
// Layer represents a single decoded packet layer (using either the
// OSI or TCP/IP definition of a layer). When decoding, a packet's data is
// broken up into a number of layers. The caller may call LayerType() to
// figure out which type of layer they've received from the packet. Optionally,
// they may then use a type assertion to get the actual layer type for deep
// inspection of the data.
type Layer interface {
// LayerType is the gopacket type for this layer.
LayerType() LayerType
// LayerContents returns the set of bytes that make up this layer.
LayerContents() []byte
// LayerPayload returns the set of bytes contained within this layer, not
// including the layer itself.
LayerPayload() []byte
}
// Payload is a Layer containing the payload of a packet. The definition of
// what constitutes the payload of a packet depends on previous layers; for
// TCP and UDP, we stop decoding above layer 4 and return the remaining
// bytes as a Payload. Payload is an ApplicationLayer.
type Payload []byte
// LayerType returns LayerTypePayload
func (p Payload) LayerType() LayerType { return LayerTypePayload }
// LayerContents returns the bytes making up this layer.
func (p Payload) LayerContents() []byte { return []byte(p) }
// LayerPayload returns the payload within this layer.
func (p Payload) LayerPayload() []byte { return nil }
// Payload returns this layer as bytes.
func (p Payload) Payload() []byte { return []byte(p) }
// String implements fmt.Stringer.
func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) }
// GoString implements fmt.GoStringer.
func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) }
// CanDecode implements DecodingLayer.
func (p Payload) CanDecode() LayerClass { return LayerTypePayload }
// NextLayerType implements DecodingLayer.
func (p Payload) NextLayerType() LayerType { return LayerTypeZero }
// DecodeFromBytes implements DecodingLayer.
func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error {
*p = Payload(data)
return nil
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
bytes, err := b.PrependBytes(len(p))
if err != nil {
return err
}
copy(bytes, p)
return nil
}
// decodePayload decodes data by returning it all in a Payload layer.
func decodePayload(data []byte, p PacketBuilder) error {
payload := &Payload{}
if err := payload.DecodeFromBytes(data, p); err != nil {
return nil
}
p.AddLayer(payload)
p.SetApplicationLayer(payload)
return nil
}
// Fragment is a Layer containing a fragment of a larger frame, used by layers
// like IPv4 and IPv6 that allow for fragmentation of their payloads.
type Fragment []byte
// LayerType returns LayerTypeFragment
func (p *Fragment) LayerType() LayerType { return LayerTypeFragment }
// LayerContents implements Layer.
func (p *Fragment) LayerContents() []byte { return []byte(*p) }
// LayerPayload implements Layer.
func (p *Fragment) LayerPayload() []byte { return nil }
// Payload returns this layer as a byte slice.
func (p *Fragment) Payload() []byte { return []byte(*p) }
// String implements fmt.Stringer.
func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) }
// CanDecode implements DecodingLayer.
func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment }
// NextLayerType implements DecodingLayer.
func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero }
// DecodeFromBytes implements DecodingLayer.
func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error {
*p = Fragment(data)
return nil
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
bytes, err := b.PrependBytes(len(*p))
if err != nil {
return err
}
copy(bytes, *p)
return nil
}
// decodeFragment decodes data by returning it all in a Fragment layer.
func decodeFragment(data []byte, p PacketBuilder) error {
payload := &Fragment{}
if err := payload.DecodeFromBytes(data, p); err != nil {
return nil
}
p.AddLayer(payload)
p.SetApplicationLayer(payload)
return nil
}
// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their
// corresponding OSI layers, as best as possible.
// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2)
type LinkLayer interface {
Layer
LinkFlow() Flow
}
// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI
// layer 3)
type NetworkLayer interface {
Layer
NetworkFlow() Flow
}
// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI
// layer 4)
type TransportLayer interface {
Layer
TransportFlow() Flow
}
// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI
// layer 7), also known as the packet payload.
type ApplicationLayer interface {
Layer
Payload() []byte
}
// ErrorLayer is a packet layer created when decoding of the packet has failed.
// Its payload is all the bytes that we were unable to decode, and the returned
// error details why the decoding failed.
type ErrorLayer interface {
Layer
Error() error
}

157
vendor/github.com/google/gopacket/decode.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"errors"
)
// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata.
type DecodeFeedback interface {
// SetTruncated should be called if during decoding you notice that a packet
// is shorter than internal layer variables (HeaderLength, or the like) say it
// should be. It sets packet.Metadata().Truncated.
SetTruncated()
}
type nilDecodeFeedback struct{}
func (nilDecodeFeedback) SetTruncated() {}
// NilDecodeFeedback implements DecodeFeedback by doing nothing.
var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{}
// PacketBuilder is used by layer decoders to store the layers they've decoded,
// and to defer future decoding via NextDecoder.
// Typically, the pattern for use is:
// func (m *myDecoder) Decode(data []byte, p PacketBuilder) error {
// if myLayer, err := myDecodingLogic(data); err != nil {
// return err
// } else {
// p.AddLayer(myLayer)
// }
// // maybe do this, if myLayer is a LinkLayer
// p.SetLinkLayer(myLayer)
// return p.NextDecoder(nextDecoder)
// }
type PacketBuilder interface {
DecodeFeedback
// AddLayer should be called by a decoder immediately upon successful
// decoding of a layer.
AddLayer(l Layer)
// The following functions set the various specific layers in the final
// packet. Note that if many layers call SetX, the first call is kept and all
// other calls are ignored.
SetLinkLayer(LinkLayer)
SetNetworkLayer(NetworkLayer)
SetTransportLayer(TransportLayer)
SetApplicationLayer(ApplicationLayer)
SetErrorLayer(ErrorLayer)
// NextDecoder should be called by a decoder when they're done decoding a
// packet layer but not done with decoding the entire packet. The next
// decoder will be called to decode the last AddLayer's LayerPayload.
// Because of this, NextDecoder must only be called once all other
// PacketBuilder calls have been made. Set*Layer and AddLayer calls after
// NextDecoder calls will behave incorrectly.
NextDecoder(next Decoder) error
// DumpPacketData is used solely for decoding. If you come across an error
// you need to diagnose while processing a packet, call this and your packet's
// data will be dumped to stderr so you can create a test. This should never
// be called from a production decoder.
DumpPacketData()
// DecodeOptions returns the decode options
DecodeOptions() *DecodeOptions
}
// Decoder is an interface for logic to decode a packet layer. Users may
// implement a Decoder to handle their own strange packet types, or may use one
// of the many decoders available in the 'layers' subpackage to decode things
// for them.
type Decoder interface {
// Decode decodes the bytes of a packet, sending decoded values and other
// information to PacketBuilder, and returning an error if unsuccessful. See
// the PacketBuilder documentation for more details.
Decode([]byte, PacketBuilder) error
}
// DecodeFunc wraps a function to make it a Decoder.
type DecodeFunc func([]byte, PacketBuilder) error
// Decode implements Decoder by calling itself.
func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error {
// function, call thyself.
return d(data, p)
}
// DecodePayload is a Decoder that returns a Payload layer containing all
// remaining bytes.
var DecodePayload Decoder = DecodeFunc(decodePayload)
// DecodeUnknown is a Decoder that returns an Unknown layer containing all
// remaining bytes, useful if you run up against a layer that you're unable to
// decode yet. This layer is considered an ErrorLayer.
var DecodeUnknown Decoder = DecodeFunc(decodeUnknown)
// DecodeFragment is a Decoder that returns a Fragment layer containing all
// remaining bytes.
var DecodeFragment Decoder = DecodeFunc(decodeFragment)
// LayerTypeZero is an invalid layer type, but can be used to determine whether
// layer type has actually been set correctly.
var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown})
// LayerTypeDecodeFailure is the layer type for the default error layer.
var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown})
// LayerTypePayload is the layer type for a payload that we don't try to decode
// but treat as a success, IE: an application-level payload.
var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload})
// LayerTypeFragment is the layer type for a fragment of a layer transported
// by an underlying layer that supports fragmentation.
var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment})
// DecodeFailure is a packet layer created if decoding of the packet data failed
// for some reason. It implements ErrorLayer. LayerContents will be the entire
// set of bytes that failed to parse, and Error will return the reason parsing
// failed.
type DecodeFailure struct {
data []byte
err error
stack []byte
}
// Error returns the error encountered during decoding.
func (d *DecodeFailure) Error() error { return d.err }
// LayerContents implements Layer.
func (d *DecodeFailure) LayerContents() []byte { return d.data }
// LayerPayload implements Layer.
func (d *DecodeFailure) LayerPayload() []byte { return nil }
// String implements fmt.Stringer.
func (d *DecodeFailure) String() string {
return "Packet decoding error: " + d.Error().Error()
}
// Dump implements Dumper.
func (d *DecodeFailure) Dump() (s string) {
if d.stack != nil {
s = string(d.stack)
}
return
}
// LayerType returns LayerTypeDecodeFailure
func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure }
// decodeUnknown "decodes" unsupported data types by returning an error.
// This decoder will thus always return a DecodeFailure layer.
func decodeUnknown(data []byte, p PacketBuilder) error {
return errors.New("Layer type not currently supported")
}

365
vendor/github.com/google/gopacket/doc.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
/*
Package gopacket provides packet decoding for the Go language.
gopacket contains many sub-packages with additional functionality you may find
useful, including:
* layers: You'll probably use this every time. This contains of the logic
built into gopacket for decoding packet protocols. Note that all example
code below assumes that you have imported both gopacket and
gopacket/layers.
* pcap: C bindings to use libpcap to read packets off the wire.
* pfring: C bindings to use PF_RING to read packets off the wire.
* afpacket: C bindings for Linux's AF_PACKET to read packets off the wire.
* tcpassembly: TCP stream reassembly
Also, if you're looking to dive right into code, see the examples subdirectory
for numerous simple binaries built using gopacket libraries.
Basic Usage
gopacket takes in packet data as a []byte and decodes it into a packet with
a non-zero number of "layers". Each layer corresponds to a protocol
within the bytes. Once a packet has been decoded, the layers of the packet
can be requested from the packet.
// Decode a packet
packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
// Get the TCP layer from this packet
if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
fmt.Println("This is a TCP packet!")
// Get actual TCP data from this layer
tcp, _ := tcpLayer.(*layers.TCP)
fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort)
}
// Iterate over all layers, printing out each layer type
for _, layer := range packet.Layers() {
fmt.Println("PACKET LAYER:", layer.LayerType())
}
Packets can be decoded from a number of starting points. Many of our base
types implement Decoder, which allow us to decode packets for which
we don't have full data.
// Decode an ethernet packet
ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default)
// Decode an IPv6 header and everything it contains
ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default)
// Decode a TCP header and its payload
tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default)
Reading Packets From A Source
Most of the time, you won't just have a []byte of packet data lying around.
Instead, you'll want to read packets in from somewhere (file, interface, etc)
and process them. To do that, you'll want to build a PacketSource.
First, you'll need to construct an object that implements the PacketDataSource
interface. There are implementations of this interface bundled with gopacket
in the gopacket/pcap and gopacket/pfring subpackages... see their documentation
for more information on their usage. Once you have a PacketDataSource, you can
pass it into NewPacketSource, along with a Decoder of your choice, to create
a PacketSource.
Once you have a PacketSource, you can read packets from it in multiple ways.
See the docs for PacketSource for more details. The easiest method is the
Packets function, which returns a channel, then asynchronously writes new
packets into that channel, closing the channel if the packetSource hits an
end-of-file.
packetSource := ... // construct using pcap or pfring
for packet := range packetSource.Packets() {
handlePacket(packet) // do something with each packet
}
You can change the decoding options of the packetSource by setting fields in
packetSource.DecodeOptions... see the following sections for more details.
Lazy Decoding
gopacket optionally decodes packet data lazily, meaning it
only decodes a packet layer when it needs to handle a function call.
// Create a packet, but don't actually decode anything yet
packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
// Now, decode the packet up to the first IPv4 layer found but no further.
// If no IPv4 layer was found, the whole packet will be decoded looking for
// it.
ip4 := packet.Layer(layers.LayerTypeIPv4)
// Decode all layers and return them. The layers up to the first IPv4 layer
// are already decoded, and will not require decoding a second time.
layers := packet.Layers()
Lazily-decoded packets are not concurrency-safe. Since layers have not all been
decoded, each call to Layer() or Layers() has the potential to mutate the packet
in order to decode the next layer. If a packet is used
in multiple goroutines concurrently, don't use gopacket.Lazy. Then gopacket
will decode the packet fully, and all future function calls won't mutate the
object.
NoCopy Decoding
By default, gopacket will copy the slice passed to NewPacket and store the
copy within the packet, so future mutations to the bytes underlying the slice
don't affect the packet and its layers. If you can guarantee that the
underlying slice bytes won't be changed, you can use NoCopy to tell
gopacket.NewPacket, and it'll use the passed-in slice itself.
// This channel returns new byte slices, each of which points to a new
// memory location that's guaranteed immutable for the duration of the
// packet.
for data := range myByteSliceChannel {
p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
doSomethingWithPacket(p)
}
The fastest method of decoding is to use both Lazy and NoCopy, but note from
the many caveats above that for some implementations either or both may be
dangerous.
Pointers To Known Layers
During decoding, certain layers are stored in the packet as well-known
layer types. For example, IPv4 and IPv6 are both considered NetworkLayer
layers, while TCP and UDP are both TransportLayer layers. We support 4
layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly
anagalous to layers 2, 3, 4, and 7 of the OSI model). To access these,
you can use the packet.LinkLayer, packet.NetworkLayer,
packet.TransportLayer, and packet.ApplicationLayer functions. Each of
these functions returns a corresponding interface
(gopacket.{Link,Network,Transport,Application}Layer). The first three
provide methods for getting src/dst addresses for that particular layer,
while the final layer provides a Payload function to get payload data.
This is helpful, for example, to get payloads for all packets regardless
of their underlying data type:
// Get packets from some source
for packet := range someSource {
if app := packet.ApplicationLayer(); app != nil {
if strings.Contains(string(app.Payload()), "magic string") {
fmt.Println("Found magic string in a packet!")
}
}
}
A particularly useful layer is ErrorLayer, which is set whenever there's
an error parsing part of the packet.
packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
if err := packet.ErrorLayer(); err != nil {
fmt.Println("Error decoding some part of the packet:", err)
}
Note that we don't return an error from NewPacket because we may have decoded
a number of layers successfully before running into our erroneous layer. You
may still be able to get your Ethernet and IPv4 layers correctly, even if
your TCP layer is malformed.
Flow And Endpoint
gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol
independent manner the fact that a packet is coming from A and going to B.
The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide
methods for extracting their flow information, without worrying about the type
of the underlying Layer.
A Flow is a simple object made up of a set of two Endpoints, one source and one
destination. It details the sender and receiver of the Layer of the Packet.
An Endpoint is a hashable representation of a source or destination. For
example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4
IP packet. A Flow can be broken into Endpoints, and Endpoints can be combined
into Flows:
packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
netFlow := packet.NetworkLayer().NetworkFlow()
src, dst := netFlow.Endpoints()
reverseFlow := gopacket.NewFlow(dst, src)
Both Endpoint and Flow objects can be used as map keys, and the equality
operator can compare them, so you can easily group together all packets
based on endpoint criteria:
flows := map[gopacket.Endpoint]chan gopacket.Packet
packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
// Send all TCP packets to channels based on their destination port.
if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil {
flows[tcp.TransportFlow().Dst()] <- packet
}
// Look for all packets with the same source and destination network address
if net := packet.NetworkLayer(); net != nil {
src, dst := net.NetworkFlow().Endpoints()
if src == dst {
fmt.Println("Fishy packet has same network source and dst: %s", src)
}
}
// Find all packets coming from UDP port 1000 to UDP port 500
interestingFlow := gopacket.NewFlow(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
fmt.Println("Found that UDP flow I was looking for!")
}
For load-balancing purposes, both Flow and Endpoint have FastHash() functions,
which provide quick, non-cryptographic hashes of their contents. Of particular
importance is the fact that Flow FastHash() is symmetric: A->B will have the same
hash as B->A. An example usage could be:
channels := [8]chan gopacket.Packet
for i := 0; i < 8; i++ {
channels[i] = make(chan gopacket.Packet)
go packetHandler(channels[i])
}
for packet := range getPackets() {
if net := packet.NetworkLayer(); net != nil {
channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet
}
}
This allows us to split up a packet stream while still making sure that each
stream sees all packets for a flow (and its bidirectional opposite).
Implementing Your Own Decoder
If your network has some strange encapsulation, you can implement your own
decoder. In this example, we handle Ethernet packets which are encapsulated
in a 4-byte header.
// Create a layer type, should be unique and high, so it doesn't conflict,
// giving it a name and a decoder to use.
var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)})
// Implement my layer
type MyLayer struct {
StrangeHeader []byte
payload []byte
}
func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType }
func (m MyLayer) LayerContents() []byte { return m.StrangeHeader }
func (m MyLayer) LayerPayload() []byte { return m.payload }
// Now implement a decoder... this one strips off the first 4 bytes of the
// packet.
func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error {
// Create my layer
p.AddLayer(&MyLayer{data[:4], data[4:]})
// Determine how to handle the rest of the packet
return p.NextDecoder(layers.LayerTypeEthernet)
}
// Finally, decode your packets:
p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy)
See the docs for Decoder and PacketBuilder for more details on how coding
decoders works, or look at RegisterLayerType and RegisterEndpointType to see how
to add layer/endpoint types to gopacket.
Fast Decoding With DecodingLayerParser
TLDR: DecodingLayerParser takes about 10% of the time as NewPacket to decode
packet data, but only for known packet stacks.
Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow
due to its need to allocate a new packet and every respective layer. It's very
versatile and can handle all known layer types, but sometimes you really only
care about a specific set of layers regardless, so that versatility is wasted.
DecodingLayerParser avoids memory allocation altogether by decoding packet
layers directly into preallocated objects, which you can then reference to get
the packet's information. A quick example:
func main() {
var eth layers.Ethernet
var ip4 layers.IPv4
var ip6 layers.IPv6
var tcp layers.TCP
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp)
decoded := []gopacket.LayerType{}
for packetData := range somehowGetPacketData() {
err := parser.DecodeLayers(packetData, &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeIPv6:
fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
case layers.LayerTypeIPv4:
fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
}
}
}
}
The important thing to note here is that the parser is modifying the passed in
layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly
speeding up the decoding process. It's even branching based on layer type...
it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack. However, it won't
handle any other type... since no other decoders were passed in, an (eth, ip4,
udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet,
LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't
decode a UDP packet).
Unfortunately, not all layers can be used by DecodingLayerParser... only those
implementing the DecodingLayer interface are usable. Also, it's possible to
create DecodingLayers that are not themselves Layers... see
layers.IPv6ExtensionSkipper for an example of this.
Creating Packet Data
As well as offering the ability to decode packet data, gopacket will allow you
to create packets from scratch, as well. A number of gopacket layers implement
the SerializableLayer interface; these layers can be serialized to a []byte in
the following manner:
ip := &layers.IPv4{
SrcIP: net.IP{1, 2, 3, 4},
DstIP: net.IP{5, 6, 7, 8},
// etc...
}
buf := gopacket.NewSerializeBuffer()
opts := gopacket.SerializeOptions{} // See SerializeOptions for more details.
err := ip.SerializeTo(&buf, opts)
if err != nil { panic(err) }
fmt.Println(buf.Bytes()) // prints out a byte slice containing the serialized IPv4 layer.
SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat
the current buffer's Bytes() slice as the payload of the serializing layer.
Therefore, you can serialize an entire packet by serializing a set of layers in
reverse order (Payload, then TCP, then IP, then Ethernet, for example). The
SerializeBuffer's SerializeLayers function is a helper that does exactly that.
To generate a (empty and useless, because no fields are set)
Ethernet(IPv4(TCP(Payload))) packet, for example, you can run:
buf := gopacket.NewSerializeBuffer()
opts := gopacket.SerializeOptions{}
gopacket.SerializeLayers(buf, opts,
&layers.Ethernet{},
&layers.IPv4{},
&layers.TCP{},
gopacket.Payload([]byte{1, 2, 3, 4}))
packetData := buf.Bytes()
A Final Note
If you use gopacket, you'll almost definitely want to make sure gopacket/layers
is imported, since when imported it sets all the LayerType variables and fills
in a lot of interesting variables/maps (DecodersByLayerName, etc). Therefore,
it's recommended that even if you don't use any layers functions directly, you still import with:
import (
_ "github.com/google/gopacket/layers"
)
*/
package gopacket

236
vendor/github.com/google/gopacket/flows.go generated vendored Normal file
View File

@ -0,0 +1,236 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"bytes"
"fmt"
"strconv"
)
// MaxEndpointSize determines the maximum size in bytes of an endpoint address.
//
// Endpoints/Flows have a problem: They need to be hashable. Therefore, they
// can't use a byte slice. The two obvious choices are to use a string or a
// byte array. Strings work great, but string creation requires memory
// allocation, which can be slow. Arrays work great, but have a fixed size. We
// originally used the former, now we've switched to the latter. Use of a fixed
// byte-array doubles the speed of constructing a flow (due to not needing to
// allocate). This is a huge increase... too much for us to pass up.
//
// The end result of this, though, is that an endpoint/flow can't be created
// using more than MaxEndpointSize bytes per address.
const MaxEndpointSize = 16
// Endpoint is the set of bytes used to address packets at various layers.
// See LinkLayer, NetworkLayer, and TransportLayer specifications.
// Endpoints are usable as map keys.
type Endpoint struct {
typ EndpointType
len int
raw [MaxEndpointSize]byte
}
// EndpointType returns the endpoint type associated with this endpoint.
func (a Endpoint) EndpointType() EndpointType { return a.typ }
// Raw returns the raw bytes of this endpoint. These aren't human-readable
// most of the time, but they are faster than calling String.
func (a Endpoint) Raw() []byte { return a.raw[:a.len] }
// LessThan provides a stable ordering for all endpoints. It sorts first based
// on the EndpointType of an endpoint, then based on the raw bytes of that
// endpoint.
//
// For some endpoints, the actual comparison may not make sense, however this
// ordering does provide useful information for most Endpoint types.
// Ordering is based first on endpoint type, then on raw endpoint bytes.
// Endpoint bytes are sorted lexigraphically.
func (a Endpoint) LessThan(b Endpoint) bool {
return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0)
}
// fnvHash is used by our FastHash functions, and implements the FNV hash
// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
// See http://isthe.com/chongo/tech/comp/fnv/.
func fnvHash(s []byte) (h uint64) {
h = fnvBasis
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= fnvPrime
}
return
}
const fnvBasis = 14695981039346656037
const fnvPrime = 1099511628211
// FastHash provides a quick hashing function for an endpoint, useful if you'd
// like to split up endpoints by modulos or other load-balancing techniques.
// It uses a variant of Fowler-Noll-Vo hashing.
//
// The output of FastHash is not guaranteed to remain the same through future
// code revisions, so should not be used to key values in persistent storage.
func (a Endpoint) FastHash() (h uint64) {
h = fnvHash(a.raw[:a.len])
h ^= uint64(a.typ)
h *= fnvPrime
return
}
// NewEndpoint creates a new Endpoint object.
//
// The size of raw must be less than MaxEndpointSize, otherwise this function
// will panic.
func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) {
e.len = len(raw)
if e.len > MaxEndpointSize {
panic("raw byte length greater than MaxEndpointSize")
}
e.typ = typ
copy(e.raw[:], raw)
return
}
// EndpointTypeMetadata is used to register a new endpoint type.
type EndpointTypeMetadata struct {
// Name is the string returned by an EndpointType's String function.
Name string
// Formatter is called from an Endpoint's String function to format the raw
// bytes in an Endpoint into a human-readable string.
Formatter func([]byte) string
}
// EndpointType is the type of a gopacket Endpoint. This type determines how
// the bytes stored in the endpoint should be interpreted.
type EndpointType int64
var endpointTypes = map[EndpointType]EndpointTypeMetadata{}
// RegisterEndpointType creates a new EndpointType and registers it globally.
// It MUST be passed a unique number, or it will panic. Numbers 0-999 are
// reserved for gopacket's use.
func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType {
t := EndpointType(num)
if _, ok := endpointTypes[t]; ok {
panic("Endpoint type number already in use")
}
endpointTypes[t] = meta
return t
}
func (e EndpointType) String() string {
if t, ok := endpointTypes[e]; ok {
return t.Name
}
return strconv.Itoa(int(e))
}
func (a Endpoint) String() string {
if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil {
return t.Formatter(a.raw[:a.len])
}
return fmt.Sprintf("%v:%v", a.typ, a.raw)
}
// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint.
// Flows are usable as map keys.
type Flow struct {
typ EndpointType
slen, dlen int
src, dst [MaxEndpointSize]byte
}
// FlowFromEndpoints creates a new flow by pasting together two endpoints.
// The endpoints must have the same EndpointType, or this function will return
// an error.
func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) {
if src.typ != dst.typ {
err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ)
return
}
return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil
}
// FastHash provides a quick hashing function for a flow, useful if you'd
// like to split up flows by modulos or other load-balancing techniques.
// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide
// with its reverse flow. IE: the flow A->B will have the same hash as the flow
// B->A.
//
// The output of FastHash is not guaranteed to remain the same through future
// code revisions, so should not be used to key values in persistent storage.
func (f Flow) FastHash() (h uint64) {
// This combination must be commutative. We don't use ^, since that would
// give the same hash for all A->A flows.
h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen])
h ^= uint64(f.typ)
h *= fnvPrime
return
}
// String returns a human-readable representation of this flow, in the form
// "Src->Dst"
func (f Flow) String() string {
s, d := f.Endpoints()
return fmt.Sprintf("%v->%v", s, d)
}
// EndpointType returns the EndpointType for this Flow.
func (f Flow) EndpointType() EndpointType {
return f.typ
}
// Endpoints returns the two Endpoints for this flow.
func (f Flow) Endpoints() (src, dst Endpoint) {
return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst}
}
// Src returns the source Endpoint for this flow.
func (f Flow) Src() (src Endpoint) {
src, _ = f.Endpoints()
return
}
// Dst returns the destination Endpoint for this flow.
func (f Flow) Dst() (dst Endpoint) {
_, dst = f.Endpoints()
return
}
// Reverse returns a new flow with endpoints reversed.
func (f Flow) Reverse() Flow {
return Flow{f.typ, f.dlen, f.slen, f.dst, f.src}
}
// NewFlow creates a new flow.
//
// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will
// panic.
func NewFlow(t EndpointType, src, dst []byte) (f Flow) {
f.slen = len(src)
f.dlen = len(dst)
if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize {
panic("flow raw byte length greater than MaxEndpointSize")
}
f.typ = t
copy(f.src[:], src)
copy(f.dst[:], dst)
return
}
// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints
// that are specified incorrectly during creation.
var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string {
return fmt.Sprintf("%v", b)
}})
// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid.
var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil)
// InvalidFlow is a singleton Flow of type EndpointInvalid.
var InvalidFlow = NewFlow(EndpointInvalid, nil, nil)

278
vendor/github.com/google/gopacket/gc generated vendored Normal file
View File

@ -0,0 +1,278 @@
#!/bin/bash
# Copyright 2012 Google, Inc. All rights reserved.
# This script provides a simple way to run benchmarks against previous code and
# keep a log of how benchmarks change over time. When used with the --benchmark
# flag, it runs benchmarks from the current code and from the last commit run
# with --benchmark, then stores the results in the git commit description. We
# rerun the old benchmarks along with the new ones, since there's no guarantee
# that git commits will happen on the same machine, so machine differences could
# cause wildly inaccurate results.
#
# If you're making changes to 'gopacket' which could cause performance changes,
# you may be requested to use this commit script to make sure your changes don't
# have large detrimental effects (or to show off how awesome your performance
# improvements are).
#
# If not run with the --benchmark flag, this script is still very useful... it
# makes sure all the correct go formatting, building, and testing work as
# expected.
function Usage {
cat <<EOF
USAGE: $0 [--benchmark regexp] [--root] [--gen] <git commit flags...>
--benchmark: Run benchmark comparisons against last benchmark'd commit
--root: Run tests that require root priviledges
--gen: Generate code for MACs/ports by pulling down external data
Note, some 'git commit' flags are necessary, if all else fails, pass in -a
EOF
exit 1
}
BENCH=""
GEN=""
ROOT=""
while [ ! -z "$1" ]; do
case "$1" in
"--benchmark")
BENCH="$2"
shift
shift
;;
"--gen")
GEN="yes"
shift
;;
"--root")
ROOT="yes"
shift
;;
"--help")
Usage
;;
"-h")
Usage
;;
"help")
Usage
;;
*)
break
;;
esac
done
function Root {
if [ ! -z "$ROOT" ]; then
local exec="$1"
# Some folks (like me) keep source code in places inaccessible by root (like
# NFS), so to make sure things run smoothly we copy them to a /tmp location.
local tmpfile="$(mktemp -t gopacket_XXXXXXXX)"
echo "Running root test executable $exec as $tmpfile"
cp "$exec" "$tmpfile"
chmod a+x "$tmpfile"
shift
sudo "$tmpfile" "$@"
fi
}
if [ "$#" -eq "0" ]; then
Usage
fi
cd $(dirname $0)
# Check for copyright notices.
for filename in $(find ./ -type f -name '*.go'); do
if ! head -n 1 "$filename" | grep -q Copyright; then
echo "File '$filename' may not have copyright notice"
exit 1
fi
done
set -e
set -x
if [ ! -z "$ROOT" ]; then
echo "Running SUDO to get root priviledges for root tests"
sudo echo "have root"
fi
if [ ! -z "$GEN" ]; then
pushd macs
go run gen.go | gofmt > valid_mac_prefixes.go
popd
pushd layers
go run gen.go | gofmt > iana_ports.go
popd
fi
# Make sure everything is formatted, compiles, and tests pass.
go fmt ./...
go test -i ./... 2>/dev/null >/dev/null || true
go test
go build
pushd examples/bytediff
go build
popd
if [ -f /usr/include/pcap.h ]; then
pushd pcap
go test ./...
go build ./...
go build pcap_tester.go
Root pcap_tester --mode=basic
Root pcap_tester --mode=filtered
Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources"
popd
pushd examples/pcapdump
go build
popd
pushd examples/arpscan
go build
popd
pushd examples/bidirectional
go build
popd
pushd examples/synscan
go build
popd
pushd examples/httpassembly
go build
popd
pushd examples/statsassembly
go build
popd
fi
pushd macs
go test ./...
gofmt -w gen.go
go build gen.go
popd
pushd tcpassembly
go test ./...
popd
pushd reassembly
go test ./...
popd
pushd layers
gofmt -w gen.go
go build gen.go
go test ./...
popd
pushd pcapgo
go test ./...
go build ./...
popd
if [ -f /usr/include/linux/if_packet.h ]; then
if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then
pushd afpacket
go build ./...
go test ./...
popd
fi
fi
if [ -f /usr/include/pfring.h ]; then
pushd pfring
go test ./...
go build ./...
popd
pushd examples/pfdump
go build
popd
fi
for travis_script in `ls .travis.*.sh`; do
./$travis_script
done
# Run our initial commit
git commit "$@"
if [ -z "$BENCH" ]; then
set +x
echo "We're not benchmarking and we've committed... we're done!"
exit
fi
### If we get here, we want to run benchmarks from current commit, and compare
### then to benchmarks from the last --benchmark commit.
# Get our current branch.
BRANCH="$(git branch | grep '^*' | awk '{print $2}')"
# File we're going to build our commit description in.
COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)"
# Add the word "BENCH" to the start of the git commit.
echo -n "BENCH " > $COMMIT_FILE
# Get the current description... there must be an easier way.
git log -n 1 | grep '^ ' | sed 's/^ //' >> $COMMIT_FILE
# Get the commit sha for the last benchmark commit
PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}')
## Run current benchmarks
cat >> $COMMIT_FILE <<EOF
----------------------------------------------------------
BENCHMARK_MARKER_DO_NOT_CHANGE
----------------------------------------------------------
Go version $(go version)
TEST BENCHMARKS "$BENCH"
EOF
# go seems to have trouble with 'go test --bench=. ./...'
go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
pushd layers
go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
popd
cat >> $COMMIT_FILE <<EOF
PCAP BENCHMARK
EOF
if [ "$BENCH" -eq ".*" ]; then
go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
fi
## Reset to last benchmark commit, run benchmarks
git checkout $PREV
cat >> $COMMIT_FILE <<EOF
----------------------------------------------------------
BENCHMARKING AGAINST COMMIT $PREV
----------------------------------------------------------
OLD TEST BENCHMARKS
EOF
# go seems to have trouble with 'go test --bench=. ./...'
go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
pushd layers
go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
popd
cat >> $COMMIT_FILE <<EOF
OLD PCAP BENCHMARK
EOF
if [ "$BENCH" -eq ".*" ]; then
go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
fi
## Reset back to the most recent commit, edit the commit message by appending
## benchmark results.
git checkout $BRANCH
git commit --amend -F $COMMIT_FILE

107
vendor/github.com/google/gopacket/layerclass.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
// LayerClass is a set of LayerTypes, used for grabbing one of a number of
// different types from a packet.
type LayerClass interface {
// Contains returns true if the given layer type should be considered part
// of this layer class.
Contains(LayerType) bool
// LayerTypes returns the set of all layer types in this layer class.
// Note that this may not be a fast operation on all LayerClass
// implementations.
LayerTypes() []LayerType
}
// Contains implements LayerClass.
func (l LayerType) Contains(a LayerType) bool {
return l == a
}
// LayerTypes implements LayerClass.
func (l LayerType) LayerTypes() []LayerType {
return []LayerType{l}
}
// LayerClassSlice implements a LayerClass with a slice.
type LayerClassSlice []bool
// Contains returns true if the given layer type should be considered part
// of this layer class.
func (s LayerClassSlice) Contains(t LayerType) bool {
return int(t) < len(s) && s[t]
}
// LayerTypes returns all layer types in this LayerClassSlice.
// Because of LayerClassSlice's implementation, this could be quite slow.
func (s LayerClassSlice) LayerTypes() (all []LayerType) {
for i := 0; i < len(s); i++ {
if s[i] {
all = append(all, LayerType(i))
}
}
return
}
// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of
// size max(types) and setting slice[t] to true for each type t. Note, if
// you implement your own LayerType and give it a high value, this WILL create
// a very large slice.
func NewLayerClassSlice(types []LayerType) LayerClassSlice {
var max LayerType
for _, typ := range types {
if typ > max {
max = typ
}
}
t := make([]bool, int(max+1))
for _, typ := range types {
t[typ] = true
}
return t
}
// LayerClassMap implements a LayerClass with a map.
type LayerClassMap map[LayerType]bool
// Contains returns true if the given layer type should be considered part
// of this layer class.
func (m LayerClassMap) Contains(t LayerType) bool {
return m[t]
}
// LayerTypes returns all layer types in this LayerClassMap.
func (m LayerClassMap) LayerTypes() (all []LayerType) {
for t := range m {
all = append(all, t)
}
return
}
// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each
// type in types.
func NewLayerClassMap(types []LayerType) LayerClassMap {
m := LayerClassMap{}
for _, typ := range types {
m[typ] = true
}
return m
}
// NewLayerClass creates a LayerClass, attempting to be smart about which type
// it creates based on which types are passed in.
func NewLayerClass(types []LayerType) LayerClass {
for _, typ := range types {
if typ > maxLayerType {
// NewLayerClassSlice could create a very large object, so instead create
// a map.
return NewLayerClassMap(types)
}
}
return NewLayerClassSlice(types)
}

111
vendor/github.com/google/gopacket/layertype.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"fmt"
"strconv"
)
// LayerType is a unique identifier for each type of layer. This enumeration
// does not match with any externally available numbering scheme... it's solely
// usable/useful within this library as a means for requesting layer types
// (see Packet.Layer) and determining which types of layers have been decoded.
//
// New LayerTypes may be created by calling gopacket.RegisterLayerType.
type LayerType int64
// LayerTypeMetadata contains metadata associated with each LayerType.
type LayerTypeMetadata struct {
// Name is the string returned by each layer type's String method.
Name string
// Decoder is the decoder to use when the layer type is passed in as a
// Decoder.
Decoder Decoder
}
type layerTypeMetadata struct {
inUse bool
LayerTypeMetadata
}
// DecodersByLayerName maps layer names to decoders for those layers.
// This allows users to specify decoders by name to a program and have that
// program pick the correct decoder accordingly.
var DecodersByLayerName = map[string]Decoder{}
const maxLayerType = 2000
var ltMeta [maxLayerType]layerTypeMetadata
var ltMetaMap = map[LayerType]layerTypeMetadata{}
// RegisterLayerType creates a new layer type and registers it globally.
// The number passed in must be unique, or a runtime panic will occur. Numbers
// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be
// used for common application-specific types, and are very fast. Any other
// number (negative or >= 2000) may be used for uncommon application-specific
// types, and are somewhat slower (they require a map lookup over an array
// index).
func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
if 0 <= num && num < maxLayerType {
if ltMeta[num].inUse {
panic("Layer type already exists")
}
} else {
if ltMetaMap[LayerType(num)].inUse {
panic("Layer type already exists")
}
}
return OverrideLayerType(num, meta)
}
// OverrideLayerType acts like RegisterLayerType, except that if the layer type
// has already been registered, it overrides the metadata with the passed-in
// metadata intead of panicing.
func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
if 0 <= num && num < maxLayerType {
ltMeta[num] = layerTypeMetadata{
inUse: true,
LayerTypeMetadata: meta,
}
} else {
ltMetaMap[LayerType(num)] = layerTypeMetadata{
inUse: true,
LayerTypeMetadata: meta,
}
}
DecodersByLayerName[meta.Name] = meta.Decoder
return LayerType(num)
}
// Decode decodes the given data using the decoder registered with the layer
// type.
func (t LayerType) Decode(data []byte, c PacketBuilder) error {
var d Decoder
if 0 <= int(t) && int(t) < maxLayerType {
d = ltMeta[int(t)].Decoder
} else {
d = ltMetaMap[t].Decoder
}
if d != nil {
return d.Decode(data, c)
}
return fmt.Errorf("Layer type %v has no associated decoder", t)
}
// String returns the string associated with this layer type.
func (t LayerType) String() (s string) {
if 0 <= int(t) && int(t) < maxLayerType {
s = ltMeta[int(t)].Name
} else {
s = ltMetaMap[t].Name
}
if s == "" {
s = strconv.Itoa(int(t))
}
return
}

838
vendor/github.com/google/gopacket/packet.go generated vendored Normal file
View File

@ -0,0 +1,838 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime/debug"
"strings"
"time"
)
// CaptureInfo provides standardized information about a packet captured off
// the wire or read from a file.
type CaptureInfo struct {
// Timestamp is the time the packet was captured, if that is known.
Timestamp time.Time
// CaptureLength is the total number of bytes read off of the wire.
CaptureLength int
// Length is the size of the original packet. Should always be >=
// CaptureLength.
Length int
// InterfaceIndex
InterfaceIndex int
}
// PacketMetadata contains metadata for a packet.
type PacketMetadata struct {
CaptureInfo
// Truncated is true if packet decoding logic detects that there are fewer
// bytes in the packet than are detailed in various headers (for example, if
// the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
// This is also set automatically for packets captured off the wire if
// CaptureInfo.CaptureLength < CaptureInfo.Length.
Truncated bool
}
// Packet is the primary object used by gopacket. Packets are created by a
// Decoder's Decode call. A packet is made up of a set of Data, which
// is broken into a number of Layers as it is decoded.
type Packet interface {
//// Functions for outputting the packet as a human-readable string:
//// ------------------------------------------------------------------
// String returns a human-readable string representation of the packet.
// It uses LayerString on each layer to output the layer.
String() string
// Dump returns a verbose human-readable string representation of the packet,
// including a hex dump of all layers. It uses LayerDump on each layer to
// output the layer.
Dump() string
//// Functions for accessing arbitrary packet layers:
//// ------------------------------------------------------------------
// Layers returns all layers in this packet, computing them as necessary
Layers() []Layer
// Layer returns the first layer in this packet of the given type, or nil
Layer(LayerType) Layer
// LayerClass returns the first layer in this packet of the given class,
// or nil.
LayerClass(LayerClass) Layer
//// Functions for accessing specific types of packet layers. These functions
//// return the first layer of each type found within the packet.
//// ------------------------------------------------------------------
// LinkLayer returns the first link layer in the packet
LinkLayer() LinkLayer
// NetworkLayer returns the first network layer in the packet
NetworkLayer() NetworkLayer
// TransportLayer returns the first transport layer in the packet
TransportLayer() TransportLayer
// ApplicationLayer returns the first application layer in the packet
ApplicationLayer() ApplicationLayer
// ErrorLayer is particularly useful, since it returns nil if the packet
// was fully decoded successfully, and non-nil if an error was encountered
// in decoding and the packet was only partially decoded. Thus, its output
// can be used to determine if the entire packet was able to be decoded.
ErrorLayer() ErrorLayer
//// Functions for accessing data specific to the packet:
//// ------------------------------------------------------------------
// Data returns the set of bytes that make up this entire packet.
Data() []byte
// Metadata returns packet metadata associated with this packet.
Metadata() *PacketMetadata
}
// packet contains all the information we need to fulfill the Packet interface,
// and its two "subclasses" (yes, no such thing in Go, bear with me),
// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
// various functions needed to access this information.
type packet struct {
// data contains the entire packet data for a packet
data []byte
// initialLayers is space for an initial set of layers already created inside
// the packet.
initialLayers [6]Layer
// layers contains each layer we've already decoded
layers []Layer
// last is the last layer added to the packet
last Layer
// metadata is the PacketMetadata for this packet
metadata PacketMetadata
decodeOptions DecodeOptions
// Pointers to the various important layers
link LinkLayer
network NetworkLayer
transport TransportLayer
application ApplicationLayer
failure ErrorLayer
}
func (p *packet) SetTruncated() {
p.metadata.Truncated = true
}
func (p *packet) SetLinkLayer(l LinkLayer) {
if p.link == nil {
p.link = l
}
}
func (p *packet) SetNetworkLayer(l NetworkLayer) {
if p.network == nil {
p.network = l
}
}
func (p *packet) SetTransportLayer(l TransportLayer) {
if p.transport == nil {
p.transport = l
}
}
func (p *packet) SetApplicationLayer(l ApplicationLayer) {
if p.application == nil {
p.application = l
}
}
func (p *packet) SetErrorLayer(l ErrorLayer) {
if p.failure == nil {
p.failure = l
}
}
func (p *packet) AddLayer(l Layer) {
p.layers = append(p.layers, l)
p.last = l
}
func (p *packet) DumpPacketData() {
fmt.Fprint(os.Stderr, p.packetDump())
os.Stderr.Sync()
}
func (p *packet) Metadata() *PacketMetadata {
return &p.metadata
}
func (p *packet) Data() []byte {
return p.data
}
func (p *packet) DecodeOptions() *DecodeOptions {
return &p.decodeOptions
}
func (p *packet) addFinalDecodeError(err error, stack []byte) {
fail := &DecodeFailure{err: err, stack: stack}
if p.last == nil {
fail.data = p.data
} else {
fail.data = p.last.LayerPayload()
}
p.AddLayer(fail)
p.SetErrorLayer(fail)
}
func (p *packet) recoverDecodeError() {
if !p.decodeOptions.SkipDecodeRecovery {
if r := recover(); r != nil {
p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
}
}
}
// LayerString outputs an individual layer as a string. The layer is output
// in a single line, with no trailing newline. This function is specifically
// designed to do the right thing for most layers... it follows the following
// rules:
// * If the Layer has a String function, just output that.
// * Otherwise, output all exported fields in the layer, recursing into
// exported slices and structs.
// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported
// and unexported fields... many times packet layers contain unexported stuff
// that would just mess up the output of the layer, see for example the
// Payload layer and it's internal 'data' field, which contains a large byte
// array that would really mess up formatting.
func LayerString(l Layer) string {
return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
}
// Dumper dumps verbose information on a value. If a layer type implements
// Dumper, then its LayerDump() string will include the results in its output.
type Dumper interface {
Dump() string
}
// LayerDump outputs a very verbose string representation of a layer. Its
// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
// It contains newlines and ends with a newline.
func LayerDump(l Layer) string {
var b bytes.Buffer
b.WriteString(LayerString(l))
b.WriteByte('\n')
if d, ok := l.(Dumper); ok {
dump := d.Dump()
if dump != "" {
b.WriteString(dump)
if dump[len(dump)-1] != '\n' {
b.WriteByte('\n')
}
}
}
b.WriteString(hex.Dump(l.LayerContents()))
return b.String()
}
// layerString outputs, recursively, a layer in a "smart" way. See docs for
// LayerString for more details.
//
// Params:
// i - value to write out
// anonymous: if we're currently recursing an anonymous member of a struct
// writeSpace: if we've already written a value in a struct, and need to
// write a space before writing more. This happens when we write various
// anonymous values, and need to keep writing more.
func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
// Let String() functions take precedence.
if v.CanInterface() {
if s, ok := v.Interface().(fmt.Stringer); ok {
return s.String()
}
}
// Reflect, and spit out all the exported fields as key=value.
switch v.Type().Kind() {
case reflect.Interface, reflect.Ptr:
if v.IsNil() {
return "nil"
}
r := v.Elem()
return layerString(r, anonymous, writeSpace)
case reflect.Struct:
var b bytes.Buffer
typ := v.Type()
if !anonymous {
b.WriteByte('{')
}
for i := 0; i < v.NumField(); i++ {
// Check if this is upper-case.
ftype := typ.Field(i)
f := v.Field(i)
if ftype.Anonymous {
anonStr := layerString(f, true, writeSpace)
writeSpace = writeSpace || anonStr != ""
b.WriteString(anonStr)
} else if ftype.PkgPath == "" { // exported
if writeSpace {
b.WriteByte(' ')
}
writeSpace = true
fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
}
}
if !anonymous {
b.WriteByte('}')
}
return b.String()
case reflect.Slice:
var b bytes.Buffer
b.WriteByte('[')
if v.Len() > 4 {
fmt.Fprintf(&b, "..%d..", v.Len())
} else {
for j := 0; j < v.Len(); j++ {
if j != 0 {
b.WriteString(", ")
}
b.WriteString(layerString(v.Index(j), false, false))
}
}
b.WriteByte(']')
return b.String()
}
return fmt.Sprintf("%v", v.Interface())
}
const (
longBytesLength = 128
)
// LongBytesGoString returns a string representation of the byte slice shortened
// using the format '<type>{<truncated slice> ... (<n> bytes)}' if it
// exceeds a predetermined length. Can be used to avoid filling the display with
// very long byte strings.
func LongBytesGoString(buf []byte) string {
if len(buf) < longBytesLength {
return fmt.Sprintf("%#v", buf)
}
s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
s = strings.TrimSuffix(s, "}")
return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
}
func baseLayerString(value reflect.Value) string {
t := value.Type()
content := value.Field(0)
c := make([]byte, content.Len())
for i := range c {
c[i] = byte(content.Index(i).Uint())
}
payload := value.Field(1)
p := make([]byte, payload.Len())
for i := range p {
p[i] = byte(payload.Index(i).Uint())
}
return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
LongBytesGoString(c),
LongBytesGoString(p))
}
func layerGoString(i interface{}, b *bytes.Buffer) {
if s, ok := i.(fmt.GoStringer); ok {
b.WriteString(s.GoString())
return
}
var v reflect.Value
var ok bool
if v, ok = i.(reflect.Value); !ok {
v = reflect.ValueOf(i)
}
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if v.Kind() == reflect.Ptr {
b.WriteByte('&')
}
layerGoString(v.Elem().Interface(), b)
case reflect.Struct:
t := v.Type()
b.WriteString(t.String())
b.WriteByte('{')
for i := 0; i < v.NumField(); i++ {
if i > 0 {
b.WriteString(", ")
}
if t.Field(i).Name == "BaseLayer" {
fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
} else if v.Field(i).Kind() == reflect.Struct {
fmt.Fprintf(b, "%s:", t.Field(i).Name)
layerGoString(v.Field(i), b)
} else if v.Field(i).Kind() == reflect.Ptr {
b.WriteByte('&')
layerGoString(v.Field(i), b)
} else {
fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
}
}
b.WriteByte('}')
default:
fmt.Fprintf(b, "%#v", i)
}
}
// LayerGoString returns a representation of the layer in Go syntax,
// taking care to shorten "very long" BaseLayer byte slices
func LayerGoString(l Layer) string {
b := new(bytes.Buffer)
layerGoString(l, b)
return b.String()
}
func (p *packet) packetString() string {
var b bytes.Buffer
fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
if p.metadata.Truncated {
b.WriteString(", truncated")
}
if p.metadata.Length > 0 {
fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
}
if !p.metadata.Timestamp.IsZero() {
fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
}
b.WriteByte('\n')
for i, l := range p.layers {
fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
}
return b.String()
}
func (p *packet) packetDump() string {
var b bytes.Buffer
fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
for i, l := range p.layers {
fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
}
return b.String()
}
// eagerPacket is a packet implementation that does eager decoding. Upon
// initial construction, it decodes all the layers it can from packet data.
// eagerPacket implements Packet and PacketBuilder.
type eagerPacket struct {
packet
}
var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
func (p *eagerPacket) NextDecoder(next Decoder) error {
if next == nil {
return errNilDecoder
}
if p.last == nil {
return errors.New("NextDecoder called, but no layers added yet")
}
d := p.last.LayerPayload()
if len(d) == 0 {
return nil
}
// Since we're eager, immediately call the next decoder.
return next.Decode(d, p)
}
func (p *eagerPacket) initialDecode(dec Decoder) {
defer p.recoverDecodeError()
err := dec.Decode(p.data, p)
if err != nil {
p.addFinalDecodeError(err, nil)
}
}
func (p *eagerPacket) LinkLayer() LinkLayer {
return p.link
}
func (p *eagerPacket) NetworkLayer() NetworkLayer {
return p.network
}
func (p *eagerPacket) TransportLayer() TransportLayer {
return p.transport
}
func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
return p.application
}
func (p *eagerPacket) ErrorLayer() ErrorLayer {
return p.failure
}
func (p *eagerPacket) Layers() []Layer {
return p.layers
}
func (p *eagerPacket) Layer(t LayerType) Layer {
for _, l := range p.layers {
if l.LayerType() == t {
return l
}
}
return nil
}
func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
for _, l := range p.layers {
if lc.Contains(l.LayerType()) {
return l
}
}
return nil
}
func (p *eagerPacket) String() string { return p.packetString() }
func (p *eagerPacket) Dump() string { return p.packetDump() }
// lazyPacket does lazy decoding on its packet data. On construction it does
// no initial decoding. For each function call, it decodes only as many layers
// as are necessary to compute the return value for that function.
// lazyPacket implements Packet and PacketBuilder.
type lazyPacket struct {
packet
next Decoder
}
func (p *lazyPacket) NextDecoder(next Decoder) error {
if next == nil {
return errNilDecoder
}
p.next = next
return nil
}
func (p *lazyPacket) decodeNextLayer() {
if p.next == nil {
return
}
d := p.data
if p.last != nil {
d = p.last.LayerPayload()
}
next := p.next
p.next = nil
// We've just set p.next to nil, so if we see we have no data, this should be
// the final call we get to decodeNextLayer if we return here.
if len(d) == 0 {
return
}
defer p.recoverDecodeError()
err := next.Decode(d, p)
if err != nil {
p.addFinalDecodeError(err, nil)
}
}
func (p *lazyPacket) LinkLayer() LinkLayer {
for p.link == nil && p.next != nil {
p.decodeNextLayer()
}
return p.link
}
func (p *lazyPacket) NetworkLayer() NetworkLayer {
for p.network == nil && p.next != nil {
p.decodeNextLayer()
}
return p.network
}
func (p *lazyPacket) TransportLayer() TransportLayer {
for p.transport == nil && p.next != nil {
p.decodeNextLayer()
}
return p.transport
}
func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
for p.application == nil && p.next != nil {
p.decodeNextLayer()
}
return p.application
}
func (p *lazyPacket) ErrorLayer() ErrorLayer {
for p.failure == nil && p.next != nil {
p.decodeNextLayer()
}
return p.failure
}
func (p *lazyPacket) Layers() []Layer {
for p.next != nil {
p.decodeNextLayer()
}
return p.layers
}
func (p *lazyPacket) Layer(t LayerType) Layer {
for _, l := range p.layers {
if l.LayerType() == t {
return l
}
}
numLayers := len(p.layers)
for p.next != nil {
p.decodeNextLayer()
for _, l := range p.layers[numLayers:] {
if l.LayerType() == t {
return l
}
}
numLayers = len(p.layers)
}
return nil
}
func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
for _, l := range p.layers {
if lc.Contains(l.LayerType()) {
return l
}
}
numLayers := len(p.layers)
for p.next != nil {
p.decodeNextLayer()
for _, l := range p.layers[numLayers:] {
if lc.Contains(l.LayerType()) {
return l
}
}
numLayers = len(p.layers)
}
return nil
}
func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() }
// DecodeOptions tells gopacket how to decode a packet.
type DecodeOptions struct {
// Lazy decoding decodes the minimum number of layers needed to return data
// for a packet at each function call. Be careful using this with concurrent
// packet processors, as each call to packet.* could mutate the packet, and
// two concurrent function calls could interact poorly.
Lazy bool
// NoCopy decoding doesn't copy its input buffer into storage that's owned by
// the packet. If you can guarantee that the bytes underlying the slice
// passed into NewPacket aren't going to be modified, this can be faster. If
// there's any chance that those bytes WILL be changed, this will invalidate
// your packets.
NoCopy bool
// SkipDecodeRecovery skips over panic recovery during packet decoding.
// Normally, when packets decode, if a panic occurs, that panic is captured
// by a recover(), and a DecodeFailure layer is added to the packet detailing
// the issue. If this flag is set, panics are instead allowed to continue up
// the stack.
SkipDecodeRecovery bool
// DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
// decoder. If true, we should try to decode layers after TCP in single packets.
// This is disabled by default because the reassembly package drives the decoding
// of TCP payload data after reassembly.
DecodeStreamsAsDatagrams bool
}
// Default decoding provides the safest (but slowest) method for decoding
// packets. It eagerly processes all layers (so it's concurrency-safe) and it
// copies its input buffer upon creation of the packet (so the packet remains
// valid if the underlying slice is modified. Both of these take time,
// though, so beware. If you can guarantee that the packet will only be used
// by one goroutine at a time, set Lazy decoding. If you can guarantee that
// the underlying slice won't change, set NoCopy decoding.
var Default = DecodeOptions{}
// Lazy is a DecodeOptions with just Lazy set.
var Lazy = DecodeOptions{Lazy: true}
// NoCopy is a DecodeOptions with just NoCopy set.
var NoCopy = DecodeOptions{NoCopy: true}
// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
// NewPacket creates a new Packet object from a set of bytes. The
// firstLayerDecoder tells it how to interpret the first layer from the bytes,
// future layers will be generated from that first layer automatically.
func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet {
if !options.NoCopy {
dataCopy := make([]byte, len(data))
copy(dataCopy, data)
data = dataCopy
}
if options.Lazy {
p := &lazyPacket{
packet: packet{data: data, decodeOptions: options},
next: firstLayerDecoder,
}
p.layers = p.initialLayers[:0]
// Crazy craziness:
// If the following return statemet is REMOVED, and Lazy is FALSE, then
// eager packet processing becomes 17% FASTER. No, there is no logical
// explanation for this. However, it's such a hacky micro-optimization that
// we really can't rely on it. It appears to have to do with the size the
// compiler guesses for this function's stack space, since one symptom is
// that with the return statement in place, we more than double calls to
// runtime.morestack/runtime.lessstack. We'll hope the compiler gets better
// over time and we get this optimization for free. Until then, we'll have
// to live with slower packet processing.
return p
}
p := &eagerPacket{
packet: packet{data: data, decodeOptions: options},
}
p.layers = p.initialLayers[:0]
p.initialDecode(firstLayerDecoder)
return p
}
// PacketDataSource is an interface for some source of packet data. Users may
// create their own implementations, or use the existing implementations in
// gopacket/pcap (libpcap, allows reading from live interfaces or from
// pcap files) or gopacket/pfring (PF_RING, allows reading from live
// interfaces).
type PacketDataSource interface {
// ReadPacketData returns the next packet available from this data source.
// It returns:
// data: The bytes of an individual packet.
// ci: Metadata about the capture
// err: An error encountered while reading packet data. If err != nil,
// then data/ci will be ignored.
ReadPacketData() (data []byte, ci CaptureInfo, err error)
}
// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
// of internal PacketDataSources, each of which will stop with io.EOF after
// reading a finite number of packets. The returned PacketDataSource will
// return all packets from the first finite source, followed by all packets from
// the second, etc. Once all finite sources have returned io.EOF, the returned
// source will as well.
func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
c := concat(pds)
return &c
}
type concat []PacketDataSource
func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
for len(*c) > 0 {
data, ci, err = (*c)[0].ReadPacketData()
if err == io.EOF {
*c = (*c)[1:]
continue
}
return
}
return nil, CaptureInfo{}, io.EOF
}
// ZeroCopyPacketDataSource is an interface to pull packet data from sources
// that allow data to be returned without copying to a user-controlled buffer.
// It's very similar to PacketDataSource, except that the caller must be more
// careful in how the returned buffer is handled.
type ZeroCopyPacketDataSource interface {
// ZeroCopyReadPacketData returns the next packet available from this data source.
// It returns:
// data: The bytes of an individual packet. Unlike with
// PacketDataSource's ReadPacketData, the slice returned here points
// to a buffer owned by the data source. In particular, the bytes in
// this buffer may be changed by future calls to
// ZeroCopyReadPacketData. Do not use the returned buffer after
// subsequent ZeroCopyReadPacketData calls.
// ci: Metadata about the capture
// err: An error encountered while reading packet data. If err != nil,
// then data/ci will be ignored.
ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
}
// PacketSource reads in packets from a PacketDataSource, decodes them, and
// returns them.
//
// There are currently two different methods for reading packets in through
// a PacketSource:
//
// Reading With Packets Function
//
// This method is the most convenient and easiest to code, but lacks
// flexibility. Packets returns a 'chan Packet', then asynchronously writes
// packets into that channel. Packets uses a blocking channel, and closes
// it if an io.EOF is returned by the underlying PacketDataSource. All other
// PacketDataSource errors are ignored and discarded.
// for packet := range packetSource.Packets() {
// ...
// }
//
// Reading With NextPacket Function
//
// This method is the most flexible, and exposes errors that may be
// encountered by the underlying PacketDataSource. It's also the fastest
// in a tight loop, since it doesn't have the overhead of a channel
// read/write. However, it requires the user to handle errors, most
// importantly the io.EOF error in cases where packets are being read from
// a file.
// for {
// packet, err := packetSource.NextPacket()
// if err == io.EOF {
// break
// } else if err != nil {
// log.Println("Error:", err)
// continue
// }
// handlePacket(packet) // Do something with each packet.
// }
type PacketSource struct {
source PacketDataSource
decoder Decoder
// DecodeOptions is the set of options to use for decoding each piece
// of packet data. This can/should be changed by the user to reflect the
// way packets should be decoded.
DecodeOptions
c chan Packet
}
// NewPacketSource creates a packet data source.
func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource {
return &PacketSource{
source: source,
decoder: decoder,
}
}
// NextPacket returns the next decoded packet from the PacketSource. On error,
// it returns a nil packet and a non-nil error.
func (p *PacketSource) NextPacket() (Packet, error) {
data, ci, err := p.source.ReadPacketData()
if err != nil {
return nil, err
}
packet := NewPacket(data, p.decoder, p.DecodeOptions)
m := packet.Metadata()
m.CaptureInfo = ci
m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
return packet, nil
}
// packetsToChannel reads in all packets from the packet source and sends them
// to the given channel. When it receives an error, it ignores it. When it
// receives an io.EOF, it closes the channel.
func (p *PacketSource) packetsToChannel() {
defer close(p.c)
for {
packet, err := p.NextPacket()
if err == io.EOF {
return
} else if err == nil {
p.c <- packet
}
}
}
// Packets returns a channel of packets, allowing easy iterating over
// packets. Packets will be asynchronously read in from the underlying
// PacketDataSource and written to the returned channel. If the underlying
// PacketDataSource returns an io.EOF error, the channel will be closed.
// If any other error is encountered, it is ignored.
//
// for packet := range packetSource.Packets() {
// handlePacket(packet) // Do something with each packet.
// }
//
// If called more than once, returns the same channel.
func (p *PacketSource) Packets() chan Packet {
if p.c == nil {
p.c = make(chan Packet, 1000)
go p.packetsToChannel()
}
return p.c
}

198
vendor/github.com/google/gopacket/parser.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"fmt"
)
// DecodingLayer is an interface for packet layers that can decode themselves.
//
// The important part of DecodingLayer is that they decode themselves in-place.
// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
// the new state defined by the data passed in. A returned error leaves the
// DecodingLayer in an unknown intermediate state, thus its fields should not be
// trusted.
//
// Because the DecodingLayer is resetting its own fields, a call to
// DecodeFromBytes should normally not require any memory allocation.
type DecodingLayer interface {
// DecodeFromBytes resets the internal state of this layer to the state
// defined by the passed-in bytes. Slices in the DecodingLayer may
// reference the passed-in data, so care should be taken to copy it
// first should later modification of data be required before the
// DecodingLayer is discarded.
DecodeFromBytes(data []byte, df DecodeFeedback) error
// CanDecode returns the set of LayerTypes this DecodingLayer can
// decode. For Layers that are also DecodingLayers, this will most
// often be that Layer's LayerType().
CanDecode() LayerClass
// NextLayerType returns the LayerType which should be used to decode
// the LayerPayload.
NextLayerType() LayerType
// LayerPayload is the set of bytes remaining to decode after a call to
// DecodeFromBytes.
LayerPayload() []byte
}
// DecodingLayerParser parses a given set of layer types. See DecodeLayers for
// more information on how DecodingLayerParser should be used.
type DecodingLayerParser struct {
// DecodingLayerParserOptions is the set of options available to the
// user to define the parser's behavior.
DecodingLayerParserOptions
first LayerType
decoders map[LayerType]DecodingLayer
df DecodeFeedback
// Truncated is set when a decode layer detects that the packet has been
// truncated.
Truncated bool
}
// AddDecodingLayer adds a decoding layer to the parser. This adds support for
// the decoding layer's CanDecode layers to the parser... should they be
// encountered, they'll be parsed.
func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
for _, typ := range d.CanDecode().LayerTypes() {
l.decoders[typ] = d
}
}
// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
// DecodingLayerParser. Users should simply read Truncated after calling
// DecodeLayers.
func (l *DecodingLayerParser) SetTruncated() {
l.Truncated = true
}
// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
// of the given DecodingLayers with AddDecodingLayer.
//
// Each call to DecodeLayers will attempt to decode the given bytes first by
// treating them as a 'first'-type layer, then by using NextLayerType on
// subsequently decoded layers to find the next relevant decoder. Should a
// deoder not be available for the layer type returned by NextLayerType,
// decoding will stop.
func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
dlp := &DecodingLayerParser{
decoders: make(map[LayerType]DecodingLayer),
first: first,
}
dlp.df = dlp // Cast this once to the interface
for _, d := range decoders {
dlp.AddDecodingLayer(d)
}
return dlp
}
// DecodeLayers decodes as many layers as possible from the given data. It
// initially treats the data as layer type 'typ', then uses NextLayerType on
// each subsequent decoded layer until it gets to a layer type it doesn't know
// how to parse.
//
// For each layer successfully decoded, DecodeLayers appends the layer type to
// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so
// there's no need to empty it yourself.
//
// This decoding method is about an order of magnitude faster than packet
// decoding, because it only decodes known layers that have already been
// allocated. This means it doesn't need to allocate each layer it returns...
// instead it overwrites the layers that already exist.
//
// Example usage:
// func main() {
// var eth layers.Ethernet
// var ip4 layers.IPv4
// var ip6 layers.IPv6
// var tcp layers.TCP
// var udp layers.UDP
// var payload gopacket.Payload
// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp, &udp, &payload)
// var source gopacket.PacketDataSource = getMyDataSource()
// decodedLayers := make([]gopacket.LayerType, 0, 10)
// for {
// data, _, err := source.ReadPacketData()
// if err == nil {
// fmt.Println("Error reading packet data: ", err)
// continue
// }
// fmt.Println("Decoding packet")
// err = parser.DecodeLayers(data, &decodedLayers)
// for _, typ := range decodedLayers {
// fmt.Println(" Successfully decoded layer type", typ)
// switch typ {
// case layers.LayerTypeEthernet:
// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC)
// case layers.LayerTypeIPv4:
// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
// case layers.LayerTypeIPv6:
// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
// case layers.LayerTypeTCP:
// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort)
// case layers.LayerTypeUDP:
// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort)
// }
// }
// if decodedLayers.Truncated {
// fmt.Println(" Packet has been truncated")
// }
// if err != nil {
// fmt.Println(" Error encountered:", err)
// }
// }
// }
//
// If DecodeLayers is unable to decode the next layer type, it will return the
// error UnsupportedLayerType.
func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
l.Truncated = false
if !l.IgnorePanic {
defer panicToError(&err)
}
typ := l.first
*decoded = (*decoded)[:0] // Truncated decoded layers.
for len(data) > 0 {
decoder, ok := l.decoders[typ]
if !ok {
return UnsupportedLayerType(typ)
} else if err = decoder.DecodeFromBytes(data, l.df); err != nil {
return err
}
*decoded = append(*decoded, typ)
typ = decoder.NextLayerType()
data = decoder.LayerPayload()
}
return nil
}
// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
// encounters a layer type that the DecodingLayerParser has no decoder for.
type UnsupportedLayerType LayerType
// Error implements the error interface, returning a string to say that the
// given layer type is unsupported.
func (e UnsupportedLayerType) Error() string {
return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
}
func panicToError(e *error) {
if r := recover(); r != nil {
*e = fmt.Errorf("panic: %v", r)
}
}
// DecodingLayerParserOptions provides options to affect the behavior of a given
// DecodingLayerParser.
type DecodingLayerParserOptions struct {
// IgnorePanic determines whether a DecodingLayerParser should stop
// panics on its own (by returning them as an error from DecodeLayers)
// or should allow them to raise up the stack. Handling errors does add
// latency to the process of decoding layers, but is much safer for
// callers. IgnorePanic defaults to false, thus if the caller does
// nothing decode panics will be returned as errors.
IgnorePanic bool
}

213
vendor/github.com/google/gopacket/writer.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
// Copyright 2012 Google, Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package gopacket
import (
"fmt"
)
// SerializableLayer allows its implementations to be written out as a set of bytes,
// so those bytes may be sent on the wire or otherwise used by the caller.
// SerializableLayer is implemented by certain Layer types, and can be encoded to
// bytes using the LayerWriter object.
type SerializableLayer interface {
// SerializeTo writes this layer to a slice, growing that slice if necessary
// to make it fit the layer's data.
// Args:
// b: SerializeBuffer to write this layer on to. When called, b.Bytes()
// is the payload this layer should wrap, if any. Note that this
// layer can either prepend itself (common), append itself
// (uncommon), or both (sometimes padding or footers are required at
// the end of packet data). It's also possible (though probably very
// rarely needed) to overwrite any bytes in the current payload.
// After this call, b.Bytes() should return the byte encoding of
// this layer wrapping the original b.Bytes() payload.
// opts: options to use while writing out data.
// Returns:
// error if a problem was encountered during encoding. If an error is
// returned, the bytes in data should be considered invalidated, and
// not used.
//
// SerializeTo calls SHOULD entirely ignore LayerContents and
// LayerPayload. It just serializes based on struct fields, neither
// modifying nor using contents/payload.
SerializeTo(b SerializeBuffer, opts SerializeOptions) error
}
// SerializeOptions provides options for behaviors that SerializableLayers may want to
// implement.
type SerializeOptions struct {
// FixLengths determines whether, during serialization, layers should fix
// the values for any length field that depends on the payload.
FixLengths bool
// ComputeChecksums determines whether, during serialization, layers
// should recompute checksums based on their payloads.
ComputeChecksums bool
}
// SerializeBuffer is a helper used by gopacket for writing out packet layers.
// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes
// return byte slices before the current Bytes(), AppendBytes returns byte
// slices after.
//
// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
// you want to make sure they're all zeros, set them as such.
//
// SerializeBuffer is specifically designed to handle packet writing, where unlike
// with normal writes it's easier to start writing at the inner-most layer and
// work out, meaning that we often need to prepend bytes. This runs counter to
// typical writes to byte slices using append(), where we only write at the end
// of the buffer.
//
// It can be reused via Clear. Note, however, that a Clear call will invalidate the
// byte slices returned by any previous Bytes() call (the same buffer is
// reused).
//
// 1) Reusing a write buffer is generally much faster than creating a new one,
// and with the default implementation it avoids additional memory allocations.
// 2) If a byte slice from a previous Bytes() call will continue to be used,
// it's better to create a new SerializeBuffer.
//
// The Clear method is specifically designed to minimize memory allocations for
// similar later workloads on the SerializeBuffer. IE: if you make a set of
// Prepend/Append calls, then clear, then make the same calls with the same
// sizes, the second round (and all future similar rounds) shouldn't allocate
// any new memory.
type SerializeBuffer interface {
// Bytes returns the contiguous set of bytes collected so far by Prepend/Append
// calls. The slice returned by Bytes will be modified by future Clear calls,
// so if you're planning on clearing this SerializeBuffer, you may want to copy
// Bytes somewhere safe first.
Bytes() []byte
// PrependBytes returns a set of bytes which prepends the current bytes in this
// buffer. These bytes start in an indeterminate state, so they should be
// overwritten by the caller. The caller must only call PrependBytes if they
// know they're going to immediately overwrite all bytes returned.
PrependBytes(num int) ([]byte, error)
// AppendBytes returns a set of bytes which appends the current bytes in this
// buffer. These bytes start in an indeterminate state, so they should be
// overwritten by the caller. The caller must only call AppendBytes if they
// know they're going to immediately overwrite all bytes returned.
AppendBytes(num int) ([]byte, error)
// Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear,
// the byte slice returned by any previous call to Bytes() for this buffer
// should be considered invalidated.
Clear() error
}
type serializeBuffer struct {
data []byte
start int
prepended, appended int
}
// NewSerializeBuffer creates a new instance of the default implementation of
// the SerializeBuffer interface.
func NewSerializeBuffer() SerializeBuffer {
return &serializeBuffer{}
}
// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
// expected number of bytes prepended/appended. This tends to decrease the
// number of memory allocations made by the buffer during writes.
func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
return &serializeBuffer{
data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
start: expectedPrependLength,
prepended: expectedPrependLength,
appended: expectedAppendLength,
}
}
func (w *serializeBuffer) Bytes() []byte {
return w.data[w.start:]
}
func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
if num < 0 {
panic("num < 0")
}
if w.start < num {
toPrepend := w.prepended
if toPrepend < num {
toPrepend = num
}
w.prepended += toPrepend
length := cap(w.data) + toPrepend
newData := make([]byte, length)
newStart := w.start + toPrepend
copy(newData[newStart:], w.data[w.start:])
w.start = newStart
w.data = newData[:toPrepend+len(w.data)]
}
w.start -= num
return w.data[w.start : w.start+num], nil
}
func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
if num < 0 {
panic("num < 0")
}
initialLength := len(w.data)
if cap(w.data)-initialLength < num {
toAppend := w.appended
if toAppend < num {
toAppend = num
}
w.appended += toAppend
newData := make([]byte, cap(w.data)+toAppend)
copy(newData[w.start:], w.data[w.start:])
w.data = newData[:initialLength]
}
// Grow the buffer. We know it'll be under capacity given above.
w.data = w.data[:initialLength+num]
return w.data[initialLength:], nil
}
func (w *serializeBuffer) Clear() error {
w.start = w.prepended
w.data = w.data[:w.start]
return nil
}
// SerializeLayers clears the given write buffer, then writes all layers into it so
// they correctly wrap each other. Note that by clearing the buffer, it
// invalidates all slices previously returned by w.Bytes()
//
// Example:
// buf := gopacket.NewSerializeBuffer()
// opts := gopacket.SerializeOptions{}
// gopacket.SerializeLayers(buf, opts, a, b, c)
// firstPayload := buf.Bytes() // contains byte representation of a(b(c))
// gopacket.SerializeLayers(buf, opts, d, e, f)
// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
w.Clear()
for i := len(layers) - 1; i >= 0; i-- {
layer := layers[i]
err := layer.SerializeTo(w, opts)
if err != nil {
return err
}
}
return nil
}
// SerializePacket is a convenience function that calls SerializeLayers
// on packet's Layers().
// It returns an error if one of the packet layers is not a SerializebleLayer.
func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
sls := []SerializableLayer{}
for _, layer := range packet.Layers() {
sl, ok := layer.(SerializableLayer)
if !ok {
return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
}
sls = append(sls, sl)
}
return SerializeLayers(buf, opts, sls...)
}