From 4bb562a43a1dc63d622b29b8d603aa0596865f22 Mon Sep 17 00:00:00 2001 From: William Stearns Date: Mon, 26 Oct 2020 23:28:46 -0400 Subject: [PATCH 01/10] Initial merge of passer-ng --- passer.py | 3399 ++++++++++++++++------------------------------------- 1 file changed, 1040 insertions(+), 2359 deletions(-) diff --git a/passer.py b/passer.py index efa210b..014a12d 100755 --- a/passer.py +++ b/passer.py @@ -1,59 +1,54 @@ -#!/usr/bin/python -"""Passer learns, by watching network traffic, about the servers and clients on your network.""" -#Copyright 2008-2018, William Stearns -#Passer is a PASsive SERvice sniffer. -#Home site http://www.stearns.org/passer/ -#Github repository https://github.com/organizations/activecm/passer/ -#Dedicated to Mae Anne Laroche. - -#Released under the GPL version 3: -#This program is free software: you can redistribute it and/or modify -#it under the terms of the GNU General Public License as published by -#the Free Software Foundation, either version 3 of the License, or -#(at your option) any later version. -#This program is distributed in the hope that it will be useful, -#but WITHOUT ANY WARRANTY; without even the implied warranty of -#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -#GNU General Public License for more details. -#You should have received a copy of the GNU General Public License -#along with this program. If not, see . - - -#======== Imports ======== +#!/usr/bin/env python +"""Multiprocessing version of passer.""" +#Copyright 2018 William Stearns +#Released under GPL v3 + +#FIXME - on KeyboardInterrupt exception, drain input queue. + +from __future__ import print_function +from ipaddress import summarize_address_range, IPv4Address, IPv6Address +from multiprocessing import Process, current_process, Manager +import multiprocessing import os -import ipaddress import sys -import re +import csv +import fileinput +import time import json -import binascii #converting hex text to raw bytes -import signal #For catching Ctrl-C -import string #Needed for python 2.5.2? -import warnings #Needed for p0f? -import unicodedata #Needed for removing control characters -import pytz -import __main__ #Needed to access variables in __main__ from functions without implicit/explicit globals +import socket #For dns lookups +import codecs #For python2 utf-8 writing +#from scapy.all import sniff, Scapy_Exception, PcapWriter +from scapy.all import * #Please make sure you have an up-to-date version of scapy, at least 2.4.0 + +sys.path.insert(0, '.') #Allows us to load from the current directory (There was one claim that we need to create an empty file __init__.py , but this does not appear to be required.) +ip2asn_loaded = False try: - #from scapy.all import p0f - #from scapy.all import ARP, CookedLinux, DHCP, DNS, DNSQR, DNSRR, Dot11, Dot11AssoReq, Dot11AssoResp, Dot11Auth, Dot11Beacon, Dot11Deauth, Dot11Elt, Dot11ProbeReq, Dot11ProbeResp, Dot11WEP, Dot3, ESP, Ether, GRE, ICMP, ICMPerror, ICMPv6DestUnreach, ICMPv6EchoRequest, ICMPv6EchoReply, ICMPv6MLDone, ICMPv6MLQuery, ICMPv6MLReport, ICMPv6ND_NA, ICMPv6ND_NS, ICMPv6ND_RA, ICMPv6ND_RS, ICMPv6ND_Redirect, ICMPv6NDOptDstLLAddr, ICMPv6NDOptPrefixInfo, ICMPv6NDOptRDNSS, ICMPv6NDOptSrcLLAddr, ICMPv6PacketTooBig, ICMPv6TimeExceeded, IP, IPerror, IPerror6, IPv6, IPv6ExtHdrHopByHop, ISAKMP, LLC, LLMNRQuery, NBNSQueryRequest, NBNSQueryResponse, NBTDatagram, NTPControl, NTPPrivate, PcapWriter, RadioTap, Radius, Raw, SNMP, SNMPget, SNMPbulk, SNMPvarbind, SNMPresponse, TCP, TCPerror, TFTP, UDP, UDPerror, conf, ls, sniff - #When running pylint, comment out the following line and uncomment the above, revert when done with pylint - from scapy.all import * #Required for Scapy 2.0 and above - use_scapy_all = True -except: - from scapy import * #Scapy 1.0 - use_scapy_all = False - -if use_scapy_all: - try: - from scapy.all import NTPHeader - has_advanced_ntp_headers = True #V2.2.0 and below don't have NTPHeader - except ImportError: - has_advanced_ntp_headers = False + from ip2asn import load_asn_table, ip_asn_lookup, formatted_asn_output +except ImportError: + sys.stderr.write('Unable to load ip2asn , skipping ASN lookups of IP addresses.\n') else: - has_advanced_ntp_headers = False + ip2asn_loaded = True + +scapy_traceroute_loaded = False +try: + from scapy_traceroute import traceroute_hop_list +except ImportError: + sys.stderr.write('Unable to load scapy_traceroute , skipping traceroute path lookups.\n') +else: + scapy_traceroute_loaded = True + +try: + from passer_lib import * #Support functions for this script +except ImportError: + sys.stderr.write('Unable to load passer_lib , exiting.\n') + quit() + +if sys.version_info > (3, 0): #Python 3 + from queue import Empty, Full +else: #Python 2 + from Queue import Empty, Full -sys.path.insert(0, '.') #Allows us to load from the current directory (There was one claim that we need to create an empty file __init__.py , but this does not appear to be required.) -from passer_lib import * #Support functions for this script try: if not passer_lib_version: sys.stderr.write('Unable to load passer_lib , exiting.\n') @@ -62,2282 +57,958 @@ sys.stderr.write('Unable to load passer_lib , exiting.\n') quit() +#Note; this particular module hasn't been updated in a while and doesn't support python3. +##sudo port install GeoLiteCity py27-geoip py34-geoip py35-geoip py36-geoip +##sudo yum install geolite2-city #NOT python-pygeoip +##sudo pip install python-geoip python-geoip-geolite2 -#Note, to get p0f working, one must: -#sudo hack /usr/lib/python2.6/site-packages/scapy/modules/p0f.py -#and add: -#from scapy.all import * -#And: -#def p0f_correl(x,y): -# d = 0 -# # wwww can be "*" or "%nn" -# #d += (x[0] == y[0] or y[0] == "*" or (y[0][0] == "%" and x[0].isdigit() and (int(x[0]) % int(y[0][1:])) == 0)) -#Change above line to: -# d += (x[0] == y[0] or y[0] == "*" or (y[0][0] == "%" and str(x[0]).isdigit() and (int(x[0]) % int(y[0][1:])) == 0)) +#Instead, use this: +#sudo pip3 install maxminddb-geolite2 +geolite_loaded = False +try: + from geolite2 import geolite2 +except ImportError: + sys.stderr.write('Unable to load geolite2 , skipping geolocation lookups.\n') +else: + geolite_loaded = True -if os.path.isfile("/etc/p0f/p0f.fp") or os.path.exists("/opt/local/share/p0f/p0f.fp") or os.path.exists("/usr/share/p0f/p0f.fp"): +if os.path.exists("/etc/p0f/p0f.fp") or os.path.exists("/opt/local/share/p0f/p0f.fp") or os.path.exists("/usr/share/p0f/p0f.fp"): load_module("p0f") else: sys.stderr.write("/etc/p0f/p0f.fp not found; please install p0f version 2 to enable OS fingerprinting.\n") - sys.stderr.flush + #FIXME - remember whether it's loaded or not and test this before trying to use p0f +p_test_version = '0.39' +Verbose = False +ShowProgress = False #In most handlers, spit out a letter when each handler finishes processing a packet +out_format = 'csv' #Either 'json' or 'csv' +max_processed_acks = 5 #For TCP packets, we only want to look at the early ACK packets. After we've looked at this many in a given direction we stop handing the rest to TCP_extract -#======== Global arrays ======== -#These two are used to discover servers. If we've seen a SYN go to a port, and a SYN/ACK back from it, -#that's a pretty good sign it's a server. Not truly stateful, but a generally good guess. - - - -botnet_warning_list = {} #Dictionary of "IP,proto_port": ['warning1', 'warning2'] entries that say if you see that trio, that IP should get this/these warnings. - #If we see syn/ack coming back from tcp C&C's, tag the host as 'bot_candc' and the dest IP of the syn/ack as 'bot' - #For UDP, just use any data heading _to_ the CandC to tag both ends (source is 'bot', dest os 'bot_candc') - #FIXME - implement - - -must_stop = False #Set to true if exit requested by signal - - - -#======== Port lists ======== - -#From 122.224.158.195, payload is "8'\x82\xd7\x8fZ\xdbc\xfe\x00\x00\x00\x00\x00" -fenull_scan_names = {"21": "udp-21", "22": "udp-22", "23": "udp-23", "25": "udp-25", "49": "udp-49", "80": "udp-80", "102": "udp-102", "110": "udp-110", "143": "udp-143", "636": "udp-636", "992": "udp-992", "993": "udp-993", "995": "udp-995"} -empty_payload_ports = ('1', '17', '19', '18895', '50174', '50597', '50902', '52498', '52576', '52620', '52775', '52956', '55180', '56089', '57347', '57563', '57694', '58034', '58153', '58861', '59024', '59413', '60463', '60799', '61016', '61651', '62473', '62915', '63137', '63556', '63571', '63878', '64727', '65154', '65251') -halflife_altport = ("1265", "2303", "20100", "21025", "21550", "27000", "27017", "27018", "27019", "27022", "27030", "27035", "27050", "27078", "27080", "28015", "28100", "45081") - -#For all of the following, see if the payload contains snmp. -### IPv4/UDPv4/21 22 23 25 tacacs=49 http=80 iso-tsap=102 110 143 igmpv3lite=465 ldaps=636 omirr=808 992 993 995 client -snmp_altport = ("21", "22", "23", "25", "49", "80", "102", "110", "143", "465", "636", "808", "992", "993", "995") - -meet_ports = ('19302', '19303', '19304', '19305', '19306', '19307', '19308', '19309') #https://support.google.com/a/answer/7582935?hl=en -qualys_udp_scan_port_names = {"7": "echo", "13": "daytime", "17": "qotd", "19": "chargen", "37": "time", "111": "sunrpc", "123": "ntp", "177": "xdmcp", "407": "timbuktu", "443": "udp443", "464": "kpasswd", "517": "talk", "518": "ntalk", "520": "rip", "623": "asf-rmcp", "1194": "openvpn", "1434": "mssql", "1645": "sightline", "1701": "l2f", "1812": "radius", "1978": "unisql", "2002": "globe", "2049": "nfs", "4000": "terabase"} -skype_ports = ('21105', '21546', '22795', '23353', '24484', '26079', '27252', '27944') -zmap_host_www_ports = ("80", "563", "655", "830", "898", "989", "990", "991", "992", "995", "1293", "1707", "1900", "2484", "3269", "3544", "4843", "5000", "5031", "6379", "6619", "9899", "11214", "11215", "18091", "18092", "37215") -www163com_ports = ("21", "22", "23", "25", "49", "80", "102", "110", "143", "636", "992", "993", "995") - -#======== IP address lists ======== -SteamFriendsServers = ("69.28.148.250", "69.28.156.250", "72.165.61.161", "72.165.61.185", "72.165.61.186", "72.165.61.188", "68.142.64.164", "68.142.64.165", "68.142.64.166") -meet_hosts = ( - '2607:f8b0:4002:c08::7f', '2607:f8b0:400c:c00::7f', '2a00:1450:4013:c03::7f', '2a00:1450:400c:c08::7f', '2800:3f0:4003:c00::7f', '2a00:1450:400c:c08::7f', '2607:f8b0:4002:c07::7f', '2a00:1450:4010:c01::7f', '2607:f8b0:400d:c0d::7f', "2a00:1450:400c:c06::7f", '2404:6800:4003:c00::7f', '2607:f8b0:400d:c09::7f', '2a00:1450:400c:c06::7f', '2a00:1450:4010:c08::7f', - '2607:f8b0:4002:0c08:0000:0000:0000:007f', '2607:f8b0:400c:0c00:0000:0000:0000:007f', '2a00:1450:4013:0c03:0000:0000:0000:007f', '2a00:1450:400c:0c08:0000:0000:0000:007f', '2800:3f0:4003:0c00:0000:0000:0000:007f', '2a00:1450:400c:0c08:0000:0000:0000:007f', '2607:f8b0:4002:0c07:0000:0000:0000:007f', '2a00:1450:4010:0c01:0000:0000:0000:007f', '2607:f8b0:400d:0c0d:0000:0000:0000:007f', "2a00:1450:400c:0c06:0000:0000:0000:007f", '2404:6800:4003:c00:0000:0000:0000:7f', '2607:f8b0:400d:0c09:0000:0000:0000:007f', '2a00:1450:400c:0c06:0000:0000:0000:007f', '2a00:1450:4010:0c08:0000:0000:0000:007f', - '64.233.165.127', '64.233.177.127', '64.233.186.127', '66.102.1.127', '74.125.134.127', '74.125.140.127', '74.125.143.127', '74.125.196.127', '74.125.200.127', '173.194.207.127', '209.85.232.127' - ) #Second line is the same as the first with ipv6 expanded. -skype_hosts = ('52.179.141.141', '100.112.42.45') -shodan_hosts = ('66.240.192.138', '66.240.236.119', '71.6.146.185', '80.82.77.33', '94.102.49.190') #census8.shodan.io, census6.shodan.io, pirate.census.shodan.io, sky.census.shodan.io, flower.census.shodan.io -qualys_scan_ips = ('64.39.99.152', '64.39.111.38') -qualys_subnet_starts = ('64.39.96.', '64.39.99.', '64.39.102.', '64.39.103.', '64.39.105.', '64.39.106.', '64.39.111.') -vonage_ntp = ("216.115.23.75", "216.115.23.76", "69.59.240.75") -vonage_sip_servers = ("216.115.30.28", "69.59.227.77", "69.59.232.33", "69.59.240.84") -aol_dns_servers = ("205.188.146.72", "205.188.157.241", "205.188.157.242", "205.188.157.243", "205.188.157.244", "64.12.51.145", "64.12.51.148", "149.174.54.131") -nessus_scan_ips = ('167.88.145.12') -known_scan_ips = ('137.226.113.7') -broadcast_udp_ports = ("2223", "8082", "8600", "8097", "9034", "9035", "9036", "9500", "9999", "21327", "21328") - -#======== Decodes ======== -nullbyte = binascii.unhexlify('00') -twobyte = binascii.unhexlify('02') -twozero = binascii.unhexlify('0200') -fournulls = binascii.unhexlify('00000000') -fenulls = binascii.unhexlify('fe0000000000') -stream_ihs_discovery_header = binascii.unhexlify('FFFFFFFF214C5FA0') -www163com_payload = binascii.unhexlify('03') + b"www" + binascii.unhexlify('03') + b"163" + binascii.unhexlify('03') + b"com" #\x03www\x03163\x03com -a0_string = b'A' + nullbyte -zeroone = binascii.unhexlify('0001') -zerotwo = binascii.unhexlify('0002') -eight_fs = binascii.unhexlify('FFFFFFFF') -crestron_prelude = binascii.unhexlify('14000000010400030000') -ip_start_bytes = binascii.unhexlify('4500') -two_prelude_ip_start = (binascii.unhexlify('020000004500'), binascii.unhexlify('020000004502'), binascii.unhexlify('020000004510')) -quake3_disconnect = binascii.unhexlify('FFFFFFFF') + b'disconnect' -torrent_connection_id = binascii.unhexlify('0000041727101980') -ethernetip_list_identity = binascii.unhexlify('6300') -ntp_get_monlist = binascii.unhexlify('1700032a') -cacti_payload = binascii.unhexlify('000100') + b'cacti-monitoring-system' + binascii.unhexlify('00') -ubiquiti_discover = binascii.unhexlify('01000000') - -#======== Regexes ======== -StoraHostnameMatch = re.compile('Hostname:<([a-zA-Z0-9_\.-]+)>') -SSDPLocationMatch = re.compile('LOCATION:([a-zA-Z0-9:,/_\. -]+)\r') -SSDPServerMatch = re.compile('[Ss][Ee][Rr][Vv][Ee][Rr]:([a-zA-Z0-9:,/_\. -]+)\r') -BrotherAnnounceMatch = re.compile('IP=([0-9][0-9\.]*):5492[56];IPv6=\[([0-9a-fA-F:][0-9a-fA-F:]*)\]:5492[56],\[([0-9a-fA-F:][0-9a-fA-F:]*)\]:5492[56];NODENAME="([0-9a-zA-Z][0-9a-zA-Z]*)"') -SyslogMatch = re.compile('^<[0-9][0-9]*>[A-Z][a-z][a-z] [ 0-9][0-9] [0-2][0-9]:[0-9][0-9]:[0-9][0-9] ([^ ][^ ]*) ([^: [][^: []*)[: []') #Match 1 is short hostname, match 2 is process name that generated the message - - -#======== Misc ======== -#See "Reference ID (refid)" in https://www.ietf.org/rfc/rfc5905.txt -known_ntp_refs = ('1PPS', 'ACTS', 'ATOM', 'BCS', 'CDMA', 'CHU', 'CTD', 'DCF', 'DCFP', 'DCFa', 'DCFp', 'DCFs', 'GAL', 'GCC', 'GNSS', 'GOES', 'GPS', 'GPS1', 'GPSD', 'GPSm', 'GPSs', 'GOOG', 'HBG', 'INIT', 'IRIG', 'JJY', 'kPPS', 'LOCL', 'LORC', 'MRS', 'MSF', 'MSL', 'NICT', 'NIST', 'NMC1', 'NMEA', 'NTS', 'OCXO', 'ONBR', 'PPS', 'PPS0', 'PPS1', 'PTB', 'PTP', 'PZF', 'RATE', 'ROA', 'SHM', 'SLK', 'SOCK', 'STEP', 'TAC', 'TDF', 'TRUE', 'UPPS', 'USIQ', 'USNO', 'UTC', 'WWV', 'WWVB', 'WWVH', 'XMIS', 'i', 'shm0', '', None) - -botnet_domains = ('ddos.cat.') -botnet_hosts = ('magnesium.ddos.cat.') - -#For my internal use to look for new service strings -#This payload logging is disabled when prefs['devel'] == False -#Quite likely a security risk, I don't recommend enabling it. -ServerPayloadDir = '/var/tmp/passer-server/' -ClientPayloadDir = '/var/tmp/passer-client/' - -debug_known_layer_lists = False - - -known_layer_lists = [ - ['802.3', 'LLC', 'Raw'], - ['802.3', 'LLC', 'SNAP', 'Raw'], - ['802.3', 'LLC', 'SNAP', 'Spanning Tree Protocol', 'Raw'], - ['802.3', 'LLC', 'Spanning Tree Protocol', 'Padding'], - ['802.3', 'Padding'], - - ['cooked linux', 'IP', 'ESP'], - ['cooked linux', 'IP', 'ICMP'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw', 'Padding'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP', 'Raw'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS', 'Padding'], - ['cooked linux', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Raw'], - ['cooked linux', 'IP', 'ICMP', 'Raw'], - ['cooked linux', 'IP', 'Raw'], -# p[CookedLinux].pkttype == 'unicast' will be useful - ['cooked linux', 'IP', 'TCP'], - ['cooked linux', 'IP', 'TCP', 'Raw'], - ['cooked linux', 'IP', 'UDP', 'DNS'], - ['cooked linux', 'IP', 'UDP', 'DNS', 'Raw'], -# Pull current timestamp out of this (.ref, .orig, .recv, or .sent fields of p[NTPHeader] ; see https://tools.ietf.org/html/rfc958) - ['cooked linux', 'IP', 'UDP', 'NTPHeader'], - ['cooked linux', 'IP', 'UDP', 'Private (mode 7)', 'Raw'], - ['cooked linux', 'IP', 'UDP', 'Raw'], - - ['Ethernet', '802.1Q', 'ARP', 'Padding'], - - ['Ethernet', '802.1Q', 'IP', 'ESP'], - - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'TCP'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'TCP', 'Raw'], - - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'BOOTP', 'DHCP options'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'DNS'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'DNS', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'ISAKMP', 'ISAKMP SA'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'NBNS query request'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'NTPHeader'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'Private (mode 7)'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'Private (mode 7)', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'RIP header', 'RIP entry'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'SNMP'], - ['Ethernet', '802.1Q', 'IP', 'GRE', 'IP', 'UDP', 'TFTP opcode', 'TFTP Read Request'], - - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'ICMP', 'Raw', 'Padding'], - - ['Ethernet', '802.1Q', 'IP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'Raw', 'Padding'], - - ['Ethernet', '802.1Q', 'IP', 'TCP'], - ['Ethernet', '802.1Q', 'IP', 'TCP', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'TCP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'TCP', 'Raw', 'Padding'], -# Warning; Skinny layer appears to be a mis-identification - ['Ethernet', '802.1Q', 'IP', 'TCP', 'Skinny', 'Raw'], - - ['Ethernet', '802.1Q', 'IP', 'UDP', 'DNS'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'DNS', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'DNS', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'DNS', 'Raw', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'Raw'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'Raw', 'Padding'], - ['Ethernet', '802.1Q', 'IP', 'UDP', 'SNMP'], - - ['Ethernet', '802.1Q', 'IP', 'VRRP', 'Padding'], - - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'TCP in ICMP'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'UDP in ICMP', 'DNS'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'UDP in ICMP', 'Raw'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Echo Reply'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Echo Request'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Advertisement'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Advertisement', 'ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address'], -# Grab source mac from last option - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', '802.1Q', 'IPv6', 'ICMPv6 Time Exceeded', 'IPv6 in ICMPv6', 'UDP in ICMP', 'DNS'], -#(raw contains E\x00\x00 8 bytes in) - ['Ethernet', '802.1Q', 'IPv6', 'IP', 'GRE', 'Raw'], - ['Ethernet', '802.1Q', 'IPv6', 'Padding'], - ['Ethernet', '802.1Q', 'IPv6', 'Raw'], - ['Ethernet', '802.1Q', 'IPv6', 'TCP'], - ['Ethernet', '802.1Q', 'IPv6', 'TCP', 'Raw'], - ['Ethernet', '802.1Q', 'IPv6', 'UDP', 'DNS'], - ['Ethernet', '802.1Q', 'IPv6', 'UDP', 'Raw'], - - ['Ethernet', '802.1Q', 'LLC', 'SNAP', 'Spanning Tree Protocol', 'Raw'], - - ['Ethernet', '802.1Q', 'Raw'], - - ['Ethernet', 'ARP'], - ['Ethernet', 'ARP', 'Padding'], - - ['Ethernet', 'EAPOL', 'Raw'], - - ['Ethernet', 'IP', 'AH'], - - ['Ethernet', 'IP', 'ICMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'ICMP in ICMP', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'TCP in ICMP', 'Raw'], - - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Control message'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'DNS', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'ESP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'ISAKMP', 'ISAKMP SA'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'ISAKMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NBNS query request'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NBNS query request', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NBNS query response', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NBT Datagram Packet', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NTPHeader'], -#(happened to be malicious, and headers were misparsed)) - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NTPHeader', 'NTPv4 extensions'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NTPHeader', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'NTPHeader', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Private (mode 7)'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Private (mode 7)', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'RIP header'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'RIP header', 'RIP entry'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'RIP header', 'RIP entry', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Radius'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'SNMP'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'SNMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'TFTP opcode', 'TFTP Read Request'], - ['Ethernet', 'IP', 'ICMP', 'IP in ICMP', 'UDP in ICMP', 'TFTP opcode', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'Padding'], - ['Ethernet', 'IP', 'ICMP', 'Raw'], - ['Ethernet', 'IP', 'ICMP', 'Raw', 'Padding'], - - ['Ethernet', 'IP', 'Raw'], - ['Ethernet', 'IP', 'Raw', 'Padding'], - - ['Ethernet', 'IP', 'TCP'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'NBT Session Packet', 'SMBNegociate Protocol Request Header', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail', 'SMB Negociate Protocol Request Tail'], - ['Ethernet', 'IP', 'TCP', 'Padding'], - ['Ethernet', 'IP', 'TCP', 'Raw'], - ['Ethernet', 'IP', 'TCP', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'TCP', 'Skinny', 'Raw'], - - ['Ethernet', 'IP', 'UDP', 'BOOTP', 'DHCP options'], - ['Ethernet', 'IP', 'UDP', 'BOOTP', 'DHCP options', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'Control message', 'Padding'], -#DNSRR for question record, but not a formal layer, it appears - ['Ethernet', 'IP', 'UDP', 'DNS'], - ['Ethernet', 'IP', 'UDP', 'DNS', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'DNS', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'DNS', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'ESP'], - ['Ethernet', 'IP', 'UDP', 'HSRP', 'HSRP MD5 Authentication', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'HSRP', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'ISAKMP', 'ISAKMP SA'], - ['Ethernet', 'IP', 'UDP', 'ISAKMP', 'ISAKMP SA', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'ISAKMP', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'Link Local Multicast Node Resolution - Query'], - ['Ethernet', 'IP', 'UDP', 'NBNS query request'], - ['Ethernet', 'IP', 'UDP', 'NBNS query request', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'NBNS query request', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'NBNS query response'], - ['Ethernet', 'IP', 'UDP', 'NBNS query response', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'NBT Datagram Packet', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'NTPHeader'], - ['Ethernet', 'IP', 'UDP', 'NTPHeader', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'NTPHeader', 'NTPv4 extensions'], - ['Ethernet', 'IP', 'UDP', 'NTPHeader', 'Authenticator'], - ['Ethernet', 'IP', 'UDP', 'NTPHeader', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'Private (mode 7)', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'Private (mode 7)', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'Private (mode 7)', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'Radius', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'RIP header', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'RIP header', 'RIP entry'], - ['Ethernet', 'IP', 'UDP', 'RIP header', 'RIP entry', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'RIP header', 'RIP entry', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'Radius'], - ['Ethernet', 'IP', 'UDP', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'SNMP'], - ['Ethernet', 'IP', 'UDP', 'SNMP', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'SNMP', 'Raw'], - ['Ethernet', 'IP', 'UDP', 'TFTP opcode', 'Raw', 'Padding'], - ['Ethernet', 'IP', 'UDP', 'TFTP opcode', 'TFTP Read Request', 'Padding'], - - ['Ethernet', 'IP', 'VRRP'], - ['Ethernet', 'IP', 'VRRP', 'Padding'], - - ['Ethernet', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'TCP in ICMP'], - ['Ethernet', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'UDP in ICMP', 'DNS'], - ['Ethernet', 'IPv6', 'ICMPv6 Destination Unreachable', 'IPv6 in ICMPv6', 'UDP in ICMP', 'Raw'], - - ['Ethernet', 'IPv6', 'ICMPv6 Echo Reply'], - ['Ethernet', 'IPv6', 'ICMPv6 Echo Request'], - - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Advertisement'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Advertisement', 'ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation', 'Raw'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - MTU', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Prefix Information'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option', 'ICMPv6 Neighbor Discovery Option - Prefix Information', 'ICMPv6 Neighbor Discovery Option - Route Information Option', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option', 'ICMPv6 Neighbor Discovery Option - Prefix Information', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address', 'ICMPv6 Neighbor Discovery Option - MTU', 'ICMPv6 Neighbor Discovery Option - Prefix Information'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Advertisement', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address', 'ICMPv6 Neighbor Discovery Option - Prefix Information'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Solicitation'], - ['Ethernet', 'IPv6', 'ICMPv6 Neighbor Discovery - Router Solicitation', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address'], - - ['Ethernet', 'IPv6', 'ICMPv6 Packet Too Big', 'IPv6 in ICMPv6', 'TCP in ICMP', 'Raw'], - - ['Ethernet', 'IPv6', 'ICMPv6 Time Exceeded', 'IPv6 in ICMPv6', 'UDP in ICMP', 'DNS'], - - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Fragmentation header', 'TCP', 'Raw'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Fragmentation header', 'UDP', 'Raw'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Fragmentation header', 'UDP', 'Raw', 'Padding'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'ICMPv6 Neighbor Discovery - Neighbor Advertisement', 'ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'MLD - Multicast Listener Done'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'MLD - Multicast Listener Query'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'MLD - Multicast Listener Report'], - ['Ethernet', 'IPv6', 'IPv6 Extension Header - Hop-by-Hop Options Header', 'Raw'], - - ['Ethernet', 'IPv6', 'Padding'], - ['Ethernet', 'IPv6', 'Raw'], - ['Ethernet', 'IPv6', 'TCP'], - ['Ethernet', 'IPv6', 'TCP', 'Raw'], - - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Confirm Message', 'DHCP6 Client Identifier Option', 'DHCP6 Option Request Option', 'DHCP6 Elapsed Time Option', 'DHCP6 Identity Association for Non-temporary Addresses Option'], - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Request Message', 'DHCP6 Client Identifier Option', 'DHCP6 Option Request Option', 'DHCP6 Elapsed Time Option', 'DHCP6 Server Identifier Option', 'DHCP6 Identity Association for Non-temporary Addresses Option'], -# p[DHCP6OptClientFQDN].fqdn is an fqdn - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Solicit Message', 'DHCP6 Client Identifier Option', 'DHCP6 Option Request Option', 'DHCP6 Elapsed Time Option', 'DHCP6 Rapid Commit Option', 'DHCP6 Option - Client FQDN', 'DHCP6 Identity Association for Non-temporary Addresses Option'], - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Solicit Message', 'DHCP6 Client Identifier Option', 'DHCP6 Option Request Option', 'DHCP6 Elapsed Time Option', 'DHCP6 Identity Association for Non-temporary Addresses Option'], - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Solicit Message', 'DHCP6 Elapsed Time Option', 'DHCP6 Client Identifier Option', 'DHCP6 Identity Association for Non-temporary Addresses Option', 'DHCP6 Option - Client FQDN', 'DHCP6 Vendor Class Option', 'DHCP6 Option Request Option'], - ['Ethernet', 'IPv6', 'UDP', 'DHCPv6 Solicit Message', 'DHCP6 Elapsed Time Option', 'DHCP6 Client Identifier Option', 'DHCP6 Identity Association for Non-temporary Addresses Option', 'DHCP6 Option Request Option', 'DHCP6 Option - Client FQDN'], - ['Ethernet', 'IPv6', 'UDP', 'DNS'], - ['Ethernet', 'IPv6', 'UDP', 'DNS', 'Raw'], - ['Ethernet', 'IPv6', 'UDP', 'Link Local Multicast Node Resolution - Query'], - ['Ethernet', 'IPv6', 'UDP', 'NTPHeader'], - ['Ethernet', 'IPv6', 'UDP', 'Raw'], - - ['Ethernet', 'Raw'], - - ['IP', 'ICMP', 'Raw'], - ['IP', 'Raw'], - ['IP', 'TCP'], - ['IP', 'TCP', 'Raw'], - ['IP', 'UDP'], - ['IP', 'UDP', 'BOOTP', 'DHCP options'], - ['IP', 'UDP', 'DNS'], - ['IP', 'UDP', 'DNS', 'Raw'], - ['IP', 'UDP', 'ISAKMP', 'ISAKMP SA'], - ['IP', 'UDP', 'NBNS query request'], - ['IP', 'UDP', 'NTPHeader'], - ['IP', 'UDP', 'Private (mode 7)'], - ['IP', 'UDP', 'Private (mode 7)', 'Raw'], - ['IP', 'UDP', 'RIP header', 'RIP entry'], - ['IP', 'UDP', 'Raw'], - ['IP', 'UDP', 'SNMP'], - ['IP', 'UDP', 'TFTP opcode', 'TFTP Read Request'], - ['Raw'] - ] - -#Following converts the label (readable string returned by ReturnLayers) to key (the string needed to find the actual layer in a packet. -#For example layer_label_to_key['Private (mode 7)' is 'NTPPrivate' -layer_label_to_key = {'802.1Q': 'Dot1Q', '802.3': 'Dot3', - 'AH': 'AH', 'ARP': 'ARP', 'Authenticator': 'NTPAuthenticator', - 'BOOTP': 'BOOTP', - 'Control message': 'NTPControl', - 'DHCP options': 'DHCP', 'DHCP6 Client Identifier Option': 'DHCP6OptClientId', 'DHCP6 Elapsed Time Option': 'DHCP6OptElapsedTime', - 'DHCP6 Identity Association for Non-temporary Addresses Option': 'DHCP6OptIA_NA', 'DHCP6 Option - Client FQDN': 'DHCP6OptClientFQDN', - 'DHCP6 Option Request Option': 'DHCP6OptOptReq', 'DHCP6 Rapid Commit Option': 'DHCP6OptRapidCommit', - 'DHCP6 Server Identifier Option': 'DHCP6OptServerId', 'DHCPv6 Solicit Message': 'DHCP6_Solicit', 'DHCP6 Vendor Class Option': 'DHCP6OptVendorClass', - 'DHCPv6 Confirm Message': 'DHCP6_Confirm', 'DHCPv6 Request Message': 'DHCP6_Request', 'DNS': 'DNS', - 'EAPOL': 'EAPOL', 'ESP': 'ESP', 'Ethernet': 'Ethernet', - 'GRE': 'GRE', - 'HSRP': 'HSRP', 'HSRP MD5 Authentication': 'HSRPmd5', - 'ICMP': 'ICMP', 'ICMP in ICMP': 'ICMPerror', 'ICMPv6 Destination Unreachable': 'ICMPv6DestUnreach', 'ICMPv6 Echo Reply': 'ICMPv6EchoReply', 'ICMPv6 Echo Request': 'ICMPv6EchoRequest', - 'ICMPv6 Neighbor Discovery - Neighbor Advertisement': 'ICMPv6ND_NA', - 'ICMPv6 Neighbor Discovery - Neighbor Solicitation': 'ICMPv6ND_NS', - 'ICMPv6 Neighbor Discovery - Router Advertisement': 'ICMPv6ND_RA', - 'ICMPv6 Neighbor Discovery - Router Solicitation': 'ICMPv6ND_RS', - 'ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address': 'ICMPv6NDOptDstLLAddr', - 'ICMPv6 Neighbor Discovery Option - MTU': 'ICMPv6NDOptMTU', - 'ICMPv6 Neighbor Discovery Option - Prefix Information': 'ICMPv6NDOptPrefixInfo', - 'ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option': 'ICMPv6NDOptRDNSS', - 'ICMPv6 Neighbor Discovery Option - Route Information Option': 'ICMPv6NDOptRouteInfo', - 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address': 'ICMPv6NDOptSrcLLAddr', - 'ICMPv6 Packet Too Big': 'ICMPv6PacketTooBig', 'ICMPv6 Time Exceeded': 'ICMPv6TimeExceeded', - 'IP': 'IP', 'IP in ICMP': 'IPerror', 'IPv6': 'IPv6', 'IPv6 Extension Header - Fragmentation header': 'IPv6ExtHdrFragment', - 'IPv6 Extension Header - Hop-by-Hop Options Header': 'IPv6ExtHdrHopByHop', 'IPv6 in ICMPv6': 'IPerror6', - 'ISAKMP': 'ISAKMP', 'ISAKMP SA': 'ISAKMP_payload_SA', - 'LLC': 'LLC', 'Link Local Multicast Node Resolution - Query': 'LLMNRQuery', - 'MLD - Multicast Listener Done': 'ICMPv6MLDone', 'MLD - Multicast Listener Query': 'ICMPv6MLQuery', 'MLD - Multicast Listener Report': 'ICMPv6MLReport', - 'NBNS query request': 'NBNSQueryRequest', 'NBNS query response': 'NBNSQueryResponse', 'NBT Datagram Packet': 'NBTDatagram', - 'NBT Session Packet': 'NBTSession', 'NTPHeader': 'NTPHeader', 'NTPv4 extensions': 'NTPExtensions', 'Padding': 'Padding', - 'Private (mode 7)': 'NTPPrivate', 'Radius': 'Radius', 'RIP entry': 'RIPEntry', 'RIP header': 'RIP', 'Raw': 'Raw', - 'SMBNegociate Protocol Request Header': 'SMBNegociate_Protocol_Request_Header', 'SMB Negociate Protocol Request Tail': 'SMBNegociate_Protocol_Request_Tail', 'SNAP': 'SNAP', 'SNMP': 'SNMP', - 'Skinny': 'Skinny', 'Spanning Tree Protocol': 'STP', - 'TCP': 'TCP', 'TCP in ICMP': 'TCPError', 'TFTP opcode': 'TFTP', 'TFTP Read Request': 'TFTP_RRQ', - 'UDP': 'UDP', 'UDP in ICMP': 'UDPerror', - 'VRRP': 'VRRP', - 'cooked linux': 'CookedLinux'} -#=============================================================================================== - -phys_layers = set(['802.1Q', 'Ethernet', 'cooked linux']) -addr_layers = set(['IP', 'IPv6', 'IPv6 Extension Header - Fragmentation header', 'IPv6 Extension Header - Hop-by-Hop Options Header']) -task_layers = set(['BOOTP', 'Control message', 'DHCP options', 'DNS', 'GRE', 'HSRP', 'HSRP MD5 Authentication', 'ICMP', 'ICMPv6 Destination Unreachable', 'ICMPv6 Neighbor Discovery - Neighbor Solicitation', 'IP', 'IP in ICMP', 'ICMP in ICMP', 'ICMPv6 Packet Too Big', 'IPv6 in ICMPv6', 'ISAKMP', 'ISAKMP SA', 'NBNS query request', 'NBNS query response', 'NBT Datagram Packet', 'NTPHeader', 'Private (mode 7)', 'Radius', 'RIP header', 'RIP entry', 'Skinny', 'TCP', 'TCP in ICMP', 'TFTP opcode', 'TFTP Read Request', 'UDP', 'UDP in ICMP', 'SNMP', 'VRRP']) -trailer_layers = set(['Raw', 'Padding']) -special_layers = set(['802.1Q', '802.3', 'ARP', 'EAPOL', 'Ethernet', 'LLC', 'Padding', 'Raw', 'SNAP', 'Spanning Tree Protocol']) - -meta = {} #Empty dictionary - not used in this version of passer, but will be used in the next. Fills the open space in the ShowPacket function call. - -passerVersion = "2.89" - - -#======== Functions ======== - - - -def layer_slice(layer_l): - """Break a list of layers into physical, address, task, trailer, special and unknown components. Either the first 4 will be lists - which, when concatenated will return the original list (and unknown will be []), special will contain a the original list (and the - rest will be []), or the first 5 will be [] and the original list will be in unknown.""" - - phys_l = [] - addr_l = [] - task_l = [] - trailer_l = [] - special_l = [] - unknown_l = [] - split_ok = True - - if set(layer_l).issubset(special_layers): - return [], [], [], [], layer_l, [] - - addr_i = 0 - while addr_i < len(layer_l) and layer_l[addr_i] not in addr_layers: - addr_i += 1 - - if addr_i == len(layer_l): - #No IP layer was found - split_ok = False - unknown_l = layer_l - else: - #IP layer was found at layer_l[addr_i] - phys_l = layer_l[0:addr_i] - addr_l = [layer_l[addr_i]] - task_l = layer_l[addr_i+1:] - - while task_l and task_l[0] in addr_layers: - #We have an additional address layer at the beginning of task - append it to addr_l - addr_l.append(task_l[0]) - task_l = task_l[1:] - - while task_l and task_l[-1] in trailer_layers: - #Move this junk layer to the beginning of trailer and strip from task_l. - trailer_l.insert(0, task_l[-1]) - task_l = task_l[0:-1] - split_ok = set(phys_l).issubset(phys_layers) and set(addr_l).issubset(addr_layers) and set(task_l).issubset(task_layers) and set(trailer_l).issubset(trailer_layers) - - if split_ok: - return (phys_l, addr_l, task_l, trailer_l, [], []) - else: - return ([], [], [], [], [], layer_l) - - -#for X in known_layer_lists: -# p, a, t, z, s, u = layer_slice(X) -# if u: -# print(str(u)) -# elif s: -# print("Special: " + str(s)) -#quit() - - -def signal_handler(sig, frame): - """_Should_ catch ctrl-C and allow graceful exit with a reporting feature on the way out. - Unfortunately, the handler is executed in the main python thread, and most of the script - is running inside sniff. May have to set a flag here and exit sniff if flag set?""" - #https://docs.python.org/3/library/signal.html - #https://www.cybrary.it/0p3n/sniffing-inside-thread-scapy-python/ ? - #For the moment we are _not_ stopping passer on ctrl-c. - - global must_stop - - if sig == signal.SIGINT: - #sys.stderr.write("Ctrl-C pressed, exiting in a moment.\n") - sys.stderr.write("Ctrl-C pressed, generating summary lines.\n") - generate_summary_lines() - must_stop = True - #sys.exit(1) - else: - sys.stderr.write("Unhandled signal type: " + str(sig) + "\n") - - -def exit_now(): - """Returns true if exit was requested. Checks global must_stop, which is set in signal_handler.""" +#On mac OS/X with python 2.7 at least, queue sizes are limited to 32767 +max_handler_qsz = 16384 +highpri_packet_qsz = 32767 +lowpri_packet_qsz = 4096 +output_qsz = 32767 +unhandled_qsz = 32767 +ip_lookup_qsz = 32767 +host_lookup_qsz = 4096 +nice_raise = 2 #Medium/lower priority processes raise their nice level by this amount/this amount+2. This is in addition to any adjustments by running the entire program under the "nice" executable. - sys.stderr.write("exit_now called") - return must_stop +#======== Support functions ======== +def whatami(base_name): + """Returns debug string with information about the current process.""" + ret_whatami = base_name -def exit_now_packet_param(one_packet_param): - """Returns true if exit was requested. Checks global must_stop, which is set in signal_handler. Packet handed to us is ignored.""" + ret_whatami += '/self_name=' + str(current_process().name) - sys.stderr.write("exit_now_packet_param called") + if hasattr(os, 'getppid'): + ret_whatami += '/ppid=' + str(os.getppid()) + ret_whatami += '/pid=' + str(os.getpid()) - return must_stop + return ret_whatami -def generate_summary_lines(): - """Print any remaining lines, generally ones that are stored but not a direct result of a packet.""" +def Progress(progress_string): + """If ShowProgress is True, put a progress indicator to stderr.""" - #Because this is called with no apparent way to hand down params other than the raw packet, we have to pull these two from main by hand. - prefs = cl_args - dests = destinations + if ShowProgress: + sys.stderr.write(str(progress_string)) + sys.stderr.flush() - #These come first because they may add 'scan' to the suspicious characteristics list for one or more IPs, which will be printed by the next loop. -#FIXME - if "ClosedUDPPortsReceived" in processpacket.__dict__: #Cross-function variable - for an_ip in sorted(processpacket.ClosedUDPPortsReceived): - if len(processpacket.ClosedUDPPortsReceived[an_ip]) >= min_closed_ports_for_scanner: - ReportId("IP", an_ip, "IP", "suspicious", 'Scanned ' + str(len(processpacket.ClosedUDPPortsReceived[an_ip])) + ' UDP closed ports.', (['scan', ]), prefs, dests) -#FIXME - #if "ClosedTCPPortsReceived" in processpacket.__dict__: #Cross-function variable - # for an_ip in sorted(processpacket.ClosedTCPPortsReceived): - # if len(processpacket.ClosedTCPPortsReceived[an_ip]) >= min_closed_ports_for_scanner: - # ReportId("IP", an_ip, "IP", "suspicious", 'Scanned ' + str(len(processpacket.ClosedTCPPortsReceived[an_ip])) + ' TCP closed ports.', (['scan', ]), prefs, dests) +#======== Specific layer handlers ======== +def output_handler(sh_da, prefs, dests): + """Process all CSV output supplied by the other processes.""" - for an_ip in sorted(ReportId.NewSuspiciousIPs): #Cross-function variable - ReportId("IP", an_ip, "IP", "suspicious", 'Warnings:' + ':'.join(ReportId.NewSuspiciousIPs[an_ip]), ([]), prefs, dests) #Cross-function variable + if "need_to_exit" not in output_handler.__dict__: + output_handler.need_to_exit = False - return + if 'tr_already_submitted' not in output_handler.__dict__: + output_handler.tr_already_submitted = set([]) + os.nice(nice_raise) #Lower priority to give higher priority to critical tasks + debug_out(whatami('output'), prefs, dests) -def remove_control_characters(s): - """Strip out any control characters in the string.""" + if "lines_sent" not in output_handler.__dict__: + output_handler.lines_sent = [] - return "".join(ch for ch in unicode(s) if unicodedata.category(ch)[0] != "C") + if "log_h" not in output_handler.__dict__: + output_handler.log_h = None + if prefs['log']: + try: + if sys.version_info > (3, 0): #Python 3 + output_handler.log_h = open(prefs['log'], 'a', errors='backslashreplace') + else: #Python 2 + output_handler.log_h = codecs.open(prefs['log'], 'a', errors='ignore') + except: + debug_out("Unable to append to " + prefs['log'] + ", no logging will be done.", prefs, dests) -def packet_timestamps(pt_p): - """This returns the timestamp in (floating point) seconds-since-the-epoch and (string) UTC human readable formats.""" - #Add , prefs, dests to params if any debug_out statements needed + while True: + try: + out_rec = dests['output'].get(block=True, timeout=None) + except KeyboardInterrupt: + output_handler.need_to_exit = True + break + else: + if out_rec is None: + output_handler.need_to_exit = True + break + if out_rec not in output_handler.lines_sent: + output_handler.lines_sent.append(out_rec) + if len(out_rec) != 6: + debug_out('outrecord length != 6', prefs, dests) + elif out_format == 'json': + out_string = '{"Type": "' + str(out_rec[Type_e]) + '", "IPAddr": "' + str(out_rec[IPAddr_e]) + '", "Proto": "' + str(out_rec[Proto_e]) + '", "State": "' + str(out_rec[State_e]) + '", "Description": "' + str(out_rec[Description_e]) + '", "Warnings": ' + str(list(out_rec[Warnings_e])) + '}' + try: + print(out_string) #.decode('utf-8') + except UnicodeDecodeError: + pass + except: + raise + if output_handler.log_h is not None: + try: + if sys.version_info > (3, 0): #Python 3 + output_handler.log_h.write(out_string + '\n') + else: #Python 2 + output_handler.log_h.write(out_string.encode('utf-8') + '\n') + output_handler.log_h.flush() + except UnicodeDecodeError: + pass + except: + raise + + elif out_format == 'csv': + out_csv = ','.join((out_rec[Type_e], out_rec[IPAddr_e], out_rec[Proto_e], out_rec[State_e], out_rec[Description_e] + ' ' + str(list(out_rec[Warnings_e])).replace(',', ' ').replace("'", '').strip('[]'))) + try: + print(out_csv) #.decode('utf-8') + except UnicodeDecodeError: + pass + except: + raise + + if output_handler.log_h is not None: + try: + if sys.version_info > (3, 0): #Python 3 + output_handler.log_h.write(out_csv + '\n') #.encode('utf-8') , believed to be wrong + else: #Python 2 + output_handler.log_h.write(out_csv.encode('utf-8') + '\n') + output_handler.log_h.flush() + except UnicodeDecodeError: + pass + except: + raise + + if prefs['active'] and not output_handler.need_to_exit: + if out_rec[IPAddr_e] not in ('', '0.0.0.0', '::', '0000:0000:0000:0000:0000:0000:0000:0000'): + try: + dests['ip_lookup_asn'].put(out_rec[IPAddr_e], block=False) + except Full: + pass + + try: + dests['ip_lookup_geoip'].put(out_rec[IPAddr_e], block=False) + except Full: + pass + + try: + dests['ip_lookup_hostname'].put(out_rec[IPAddr_e], block=False) + except Full: + pass + + if out_rec[IPAddr_e] not in output_handler.tr_already_submitted: + output_handler.tr_already_submitted.add(out_rec[IPAddr_e]) + try: + dests['ip_lookup_traceroute'].put(out_rec[IPAddr_e], block=False) + except Full: + pass + if out_rec[Type_e] == "DN" and out_rec[Proto_e] in ('A', 'AAAA', 'PTR', 'CNAME'): + try: + dests['host_lookup'].put(out_rec[State_e], block=False) + except Full: + pass + debug_out('Exiting output', prefs, dests) + + + +def unhandled_handler(sh_da, prefs, dests): + """Save all unhandled packets supplied by the other processes.""" + + os.nice(nice_raise) #Lower priority to give higher priority to critical tasks + debug_out(whatami('unhandled'), prefs, dests) + + if "packets_saved" not in unhandled_handler.__dict__: + unhandled_handler.packets_saved = [] + + if "unhandled_h" not in unhandled_handler.__dict__: + unhandled_handler.unhandled_h = None + if prefs['unhandled']: + try: + unhandled_handler.unhandled_h = PcapWriter(filename=prefs['unhandled'], append=True) + except: + debug_out("Unable to open " + prefs['unhandled'] + ", no unhandled packets will be saved.", prefs, dests) - p_timestamp = pt_p.time #packet.time can be read from an existing packet or written to a created packet. - p_seconds_since_epoch = float(time.mktime(datetime.fromtimestamp(p_timestamp).timetuple())) - #debug_out(str(p_seconds_since_epoch), prefs, dests) + if unhandled_handler.unhandled_h is not None: + while True: + try: + out_rec = dests['unhandled'].get(block=True, timeout=None) + except KeyboardInterrupt: + break + else: + if out_rec is None: + break + if out_rec not in unhandled_handler.packets_saved: + unhandled_handler.packets_saved.append(out_rec) + unhandled_handler.unhandled_h.write(out_rec) - p_human_readable_utc = datetime.fromtimestamp(p_seconds_since_epoch, tz=pytz.utc).strftime('%Y-%m-%d %H:%M:%S') #This shows UTC - #debug_out(p_human_readable, prefs, dests) + debug_out('Exiting unhandled', prefs, dests) - #Not used at the moment. - #p_human_readable_localtz = datetime.fromtimestamp(p_timestamp).strftime('%Y-%m-%d %H:%M:%S') - #debug_out(p_human_readable_localtz, prefs, dests) #This is the human readable timestamp in local time - return (p_seconds_since_epoch, p_human_readable_utc) +def suspicious_handler(sh_da, prefs, dests): + """Save all suspicious packets supplied by the other processes.""" -##FIXME - remove this function -#def LogNewPayload(PayloadDir, PayloadFile, Payload): -# """Saves the payload from an ack packet to a file named after the server or client port involved.""" -# -# #Better yet, wrpcap("/path/to/pcap", list_of_packets) -# -# if prefs['devel']: -# if os.path.isdir(PayloadDir): -# if not Payload == b'None': -# pfile = open(PayloadFile, 'a') -# pfile.write(Payload) -# pfile.close() + os.nice(nice_raise) #Lower priority to give higher priority to critical tasks + debug_out(whatami('suspicious'), prefs, dests) + if "packets_saved" not in suspicious_handler.__dict__: + suspicious_handler.packets_saved = [] -def write_object(filename, generic_object): - """Write out an object to a file.""" + if "suspicious_h" not in suspicious_handler.__dict__: + suspicious_handler.suspicious_h = None + if prefs['suspicious']: + try: + suspicious_handler.suspicious_h = PcapWriter(filename=prefs['suspicious'], append=True) + except: + debug_out("Unable to open " + prefs['suspicious'] + ", no suspicious packets will be saved.", prefs, dests) - try: - with open(filename, "wb") as write_h: - write_h.write(generic_object.encode('utf-8')) - except: - sys.stderr.write("Problem writing " + filename + ", skipping.") - raise + if suspicious_handler.suspicious_h is not None: + while True: + try: + out_rec = dests['suspicious'].get(block=True, timeout=None) + except KeyboardInterrupt: + break + else: + if out_rec is None: + break + if out_rec not in suspicious_handler.packets_saved: + suspicious_handler.packets_saved.append(out_rec) + suspicious_handler.suspicious_h.write(out_rec) - return + debug_out('Exiting suspicious', prefs, dests) -#def mac_of_ipaddr(ipv6addr): -# """For a supplied IPv6 address in EUI-64 format, return the mac address of the system that's behind it. For an address not in that format, return ''.""" +def ip_lookup_geoip_extract(ip_addr, prefs, dests): + """Lookup Geoip information about an IP address and return as a set of tuples.""" + state_set = set([]) + if "geo_reader" not in ip_lookup_geoip_extract.__dict__: + ip_lookup_geoip_extract.geo_reader = geolite2.reader() -#May be able to do this with just a dict. -#def bot_warnings(bw_ip, bw_proto, bw_port): -# """For the given IP, TCP/UDP, port trio, return any additional warnings if that machine may be part of a bot.""" -# -# -# bw_warnings = [] -# -# -# orig_text = '' -# -# return - - - - -def ReportId(Type, CompressedIPAddr, Proto, State, Description, Warnings, prefs, dests): - """Print and log a new piece of network information.""" - - #Can't use : for separator, IPv6, similarly '.' for ipv4 - #Can't use "/" because of filesystem - #Don't want to use space because of filesystem - # Type, IPAddr, Proto State Optional description (may be empty) - # 'IP', IPaddr, 'IP', dead or live, p0f OS description - # 'MA', IPaddr, 'Ethernet', MacAddr, ManufDescription - # 'TC', IPaddr, 'TCP_'Port, closed or open, client description - # 'TS', IPaddr, 'TCP_'Port, closed or listening, server description - # 'UC', IPaddr, 'UDP_'Port, open or closed, udp client port description - # 'US', IPaddr, 'UDP_'Port, open or closed, udp server port description - # 'DN', IPaddr, 'A' or 'PTR', hostname, possible extra info - # 'RO', IPaddr, 'TTLEx', router, possible extra info - # 'PC', IPaddr, 'PROTO_'PNum open, protocol name - # 'PS', IPaddr, 'PROTO_'PNum open, protocol name - - #Persistent data structures - these are loaded at first entry into the function and persist for the life of the process. - if "GenDesc" not in ReportId.__dict__: - #Dictionary of Dictionaries of sets, replaces the specific dictionaries. First key is 2 letter record type, second key is IP address, final value (a set) is what we have seen for that record type and IP. - ReportId.GenDesc = {'DN': {}, 'IP': {}, 'MA': {}, 'NA': {}, 'PC': {}, 'PS': {}, 'RO': {}, 'TC': {}, 'TS': {}, 'UC': {}, 'US': {}} - - #Dictionary of lists. Key is IP address, value is list which contains all this IP address' suspicious characteristics. - if "SuspiciousIPs" not in ReportId.__dict__: - ReportId.SuspiciousIPs = load_json_from_file(suspicious_ips_file) - if ReportId.SuspiciousIPs: - for one_trusted in __main__.TrustedIPs: - if one_trusted in ReportId.SuspiciousIPs: - del ReportId.SuspiciousIPs[one_trusted] - else: - debug_out("Problem reading/parsing " + suspicious_ips_file + ", skipping.", prefs, dests) - ReportId.SuspiciousIPs = {} + if not ip_addr.startswith(('10.', '169.254.', '172.16.', '172.17.', '172.18.', '172.19.', '172.20.', '172.21.', '172.22.', '172.23.', '172.24.', '172.25.', '172.26.', '172.27.', '172.28.', '172.29.', '172.30.', '172.31.', '192.168.', '127.', 'fe80:')): + geo_match = ip_lookup_geoip_extract.geo_reader.get(ip_addr) + if geo_match: + rec_type = "CC" - #Just like above, but _only_ the entries added during this session; used for printing with ctrl-c or at the end. - if "NewSuspiciousIPs" not in ReportId.__dict__: - ReportId.NewSuspiciousIPs = {} + Country = "" + city_state = "" + if "country" in geo_match and "names" in geo_match["country"] and "en" in geo_match["country"]["names"]: + Country = geo_match["country"]["names"]["en"] - if "MacAddr" not in ReportId.__dict__: - ReportId.MacAddr = {} #String dictionary: For a given IP (key), what is its mac (value)? + if "subdivisions" in geo_match: #This is a list. + city_state = "/" + for one_subdiv in geo_match['subdivisions']: + if "names" in one_subdiv and "en" in one_subdiv["names"]: + city_state = "/" + one_subdiv["names"]["en"] + rec_type = "CSC" + break #Stop looking through the geo_match['subdivisions'] list once we've found one. - if "EtherManuf" not in ReportId.__dict__: - ReportId.EtherManuf = {} #String dictionary: for a given key of the first three uppercase octets of a mac address ("00:01:0F"), who made this card? + if "city" in geo_match and "names" in geo_match["city"] and "en" in geo_match["city"]["names"]: + city_state = city_state + "/" + geo_match["city"]["names"]["en"] + rec_type = "CSC" + else: + city_state = city_state + "/" - ReportId.EtherManuf = MacDataDict(['/usr/share/ettercap/etter.finger.mac', '/opt/local/share/ettercap/etter.finger.mac', '/usr/share/nmap/nmap-mac-prefixes', '/opt/local/share/nmap/nmap-mac-prefixes', '/usr/share/wireshark/manuf', '/opt/local/share/wireshark/manuf', '/usr/share/ethereal/manuf', '/usr/share/arp-scan/ieee-oui.txt', '/opt/local/share/arp-scan/ieee-oui.txt'], prefs, dests) + if "country" in geo_match and "iso_code" in geo_match["country"]: + if rec_type == 'CC': + state_set.add(("GE", ip_addr, rec_type, geo_match["country"]["iso_code"], Country, ())) + else: + state_set.add(("GE", ip_addr, rec_type, geo_match["country"]["iso_code"], Country + city_state, ())) - if len(ReportId.EtherManuf) == 0: - debug_out("None of the default mac address listings found. Please install ettercap, nmap, wireshark, and/or arp-scan.", cl_args, destinations) - else: - debug_out(str(len(ReportId.EtherManuf)) + " mac prefixes loaded.", cl_args, destinations) + return state_set - if "log_h" not in ReportId.__dict__: - ReportId.log_h = None - if prefs['log']: - try: - ReportId.log_h = open(prefs['log'], 'a') - except: - debug_out("Unable to append to " + prefs['log'] + ", no logging will be done.", cl_args, destinations) - IPAddr = explode_ip(CompressedIPAddr, prefs, dests) +def ip_lookup_geoip_handler(ip_lookup_geoip_q, sh_da, prefs, dests): + """Lookup Geoip information about IP addresses.""" - Location = IPAddr + "," + Proto - Description = Description.replace('\n', '').replace('\r', '').replace(',', ' ') + if prefs['active'] and prefs['geolite_loaded']: + os.nice(nice_raise+2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('ip_lookup_geoip'), prefs, dests) - if Warnings: #Non-empty set of strings - if Description: - Description += ' ' - Description += 'Warnings:' + ':'.join(Warnings) + if "ips_researched" not in ip_lookup_geoip_handler.__dict__: + ip_lookup_geoip_handler.ips_researched = ['', '0.0.0.0', '::', '0000:0000:0000:0000:0000:0000:0000:0000'] #No point in looking these up - if IPAddr in __main__.TrustedIPs: - if Warnings == ['plaintext'] and Proto == 'UDP_514': - pass - elif Warnings == ['portpolicyviolation', ]: - debug_out("Attempt to add trusted IP " + IPAddr + " to SuspiciousIPs because of portpolicyviolation.", prefs, dests) + while True: + try: + out_rec = ip_lookup_geoip_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - debug_out("Attempt to add trusted IP " + IPAddr + " to SuspiciousIPs.", prefs, dests) - debug_out("Attempt to add trusted IP " + IPAddr + " to SuspiciousIPs." + '|' + str(Type) + '|' + str(Proto) + '|' + str(State) + '|' + str(Description) + '|' + str(Warnings), prefs, dests) - #quit() - elif 'spoofed' not in Warnings: - #We have to add this warning to ReportId.SuspiciousIPs, the master list of _all_ warnings for all IPs.... - if IPAddr not in ReportId.SuspiciousIPs: - ReportId.SuspiciousIPs[IPAddr] = [] - for one_warning in Warnings: - if one_warning not in ReportId.SuspiciousIPs[IPAddr]: - ReportId.SuspiciousIPs[IPAddr].append(one_warning) - - #....and we have to add it to ReportId.NewSuspiciousIPs, which only holds the new things we've discovered this session. - if IPAddr not in ReportId.NewSuspiciousIPs: - ReportId.NewSuspiciousIPs[IPAddr] = [] - for one_warning in Warnings: - if one_warning not in ReportId.NewSuspiciousIPs[IPAddr]: - ReportId.NewSuspiciousIPs[IPAddr].append(one_warning) - - - ShouldPrint = True - - if Type not in ReportId.GenDesc: - ReportId.GenDesc[Type] = {} - - if Type in ("TS", "US"): - if Location not in ReportId.GenDesc[Type]: - ReportId.GenDesc[Type][Location] = set() - - if State + ',' + Description in ReportId.GenDesc[Type][Location]: - ShouldPrint = False #Don't print if we've already printed it with this state + description - else: - ReportId.GenDesc[Type][Location].add(State + ',' + Description) - elif Type in ("TC", "UC"): - if Location not in ReportId.GenDesc[Type]: - ReportId.GenDesc[Type][Location] = set() + if out_rec is None: + break + out_rec = explode_ip(out_rec, prefs, dests) - if State + ',' + Description in ReportId.GenDesc[Type][Location]: - ShouldPrint = False #Don't print if we've already printed it with this state + description - else: - ReportId.GenDesc[Type][Location].add(State + ',' + Description) - elif Type in ("IP", "NA", "PC", "PS"): - if Location not in ReportId.GenDesc[Type]: - ReportId.GenDesc[Type][Location] = set() + if out_rec not in ip_lookup_geoip_handler.ips_researched: + ip_lookup_geoip_handler.ips_researched.append(out_rec) - if State + ',' + Description in ReportId.GenDesc[Type][Location]: - ShouldPrint = False #Don't print if we've already printed it with this state + description - else: - ReportId.GenDesc[Type][Location].add(State + ',' + Description) - elif Type == "DN": - #Note that State will be the Hostname, and Proto is the Record type - if Location not in ReportId.GenDesc[Type]: - ReportId.GenDesc[Type][Location] = set() - - #FIXME - perhaps description could indicate low TTL? <300? <150? - if Proto in ('A', 'AAAA', 'CNAME', 'PTR') and State == '': - ShouldPrint = False - elif State == '' and IPAddr in ('::', '0000:0000:0000:0000:0000:0000:0000:0000'): #Not sure if this should be limited to hostnames with and Proto in ('A', 'AAAA', 'CNAME', 'PTR') - ShouldPrint = False - elif State + ',' + Description in ReportId.GenDesc[Type][Location]: - ShouldPrint = False - else: - ReportId.GenDesc[Type][Location].add(State + ',' + Description) #Add this Hostname to the list - elif Type == "RO": - if Description == '': - description_string = Proto #This holds the type of packet that causes us to believe it's a router, like "RouterAdv" - else: - description_string = Description + for statement in ip_lookup_geoip_extract(out_rec, prefs, dests): + dests['output'].put(statement) - if IPAddr not in ReportId.GenDesc[Type]: #If we ever need to test if an IP is a router, use IPAddr in ReportId.GenDesc['RO'] - ReportId.GenDesc[Type][IPAddr] = set() + Progress('g') - if description_string in ReportId.GenDesc[Type][IPAddr]: - ShouldPrint = False #Don't print if we've already printed it with this description - else: - ReportId.GenDesc[Type][IPAddr].add(description_string) - elif Type == "MA": - State = State.upper() - if IPAddr in ('', '::', '0000:0000:0000:0000:0000:0000:0000:0000'): - ShouldPrint = False #Not registering :: as a null IP address - elif (IPAddr in ReportId.MacAddr) and (ReportId.MacAddr[IPAddr] == State): - ShouldPrint = False #Already known, no need to reprint + debug_out('Exiting ip_lookup_geoip', prefs, dests) + elif prefs['active'] and not prefs['geolite_loaded']: + debug_out('Unable to load geolite2 module, exiting ip_lookup_geoip', prefs, dests) + + + +def ip_lookup_asn_extract(ip_addr, prefs, dests): + """Lookup ASN information about an IP address and return as a set of tuples.""" + + state_set = set([]) + + if "as_nums" not in ip_lookup_asn_extract.__dict__: + ip_lookup_asn_extract.as_nums = {} + + if "asn_countries" not in ip_lookup_asn_extract.__dict__: + ip_lookup_asn_extract.asn_countries = {} + + if "asn_descriptions" not in ip_lookup_asn_extract.__dict__: + ip_lookup_asn_extract.asn_descriptions = {} + #script_dir = os.path.dirname(os.path.abspath(__file__)) + if os.path.exists(config_dir + '/ip2asn-combined.tsv') and os.access(config_dir + '/ip2asn-combined.tsv', os.R_OK): + ip_lookup_asn_extract.as_nums, ip_lookup_asn_extract.asn_countries, ip_lookup_asn_extract.asn_descriptions = load_asn_table(config_dir + '/ip2asn-combined.tsv') else: - ReportId.MacAddr[IPAddr] = State - if State[:8] in ReportId.EtherManuf: - Description = ReportId.EtherManuf[State[:8]].replace(',', ' ') + debug_out(config_dir + '/ip2asn-combined.tsv either does not exist or is not readable, please download from https://iptoasn.com/data/ip2asn-combined.tsv.gz and decompress with gunzip.', prefs, dests) + #elif os.path.exists(script_dir + '/ip2asn-combined.tsv'): + # ip_lookup_asn_extract.as_nums, ip_lookup_asn_extract.asn_countries, ip_lookup_asn_extract.asn_descriptions = load_asn_table(script_dir + '/ip2asn-combined.tsv') + #elif os.path.exists('./ip2asn-combined.tsv'): + # ip_lookup_asn_extract.as_nums, ip_lookup_asn_extract.asn_countries, ip_lookup_asn_extract.asn_descriptions = load_asn_table('./ip2asn-combined.tsv') - if ShouldPrint: - try: - OutString = Type + "," + IPAddr + "," + Proto + "," + State + "," + Description - if prefs['timestamp']: - OutString += ',' + str(processpacket.current_stamp) + ',' + processpacket.current_string - #else: - # OutString += ',,' #Future: When we're not showing the timestamps, still create the columns so logs line up - print(OutString) - if ReportId.log_h is not None: - ReportId.log_h.write(OutString + '\n') - ReportId.log_h.flush() - except UnicodeDecodeError: - pass + if not ip_addr.startswith(('10.', '169.254.', '172.16.', '172.17.', '172.18.', '172.19.', '172.20.', '172.21.', '172.22.', '172.23.', '172.24.', '172.25.', '172.26.', '172.27.', '172.28.', '172.29.', '172.30.', '172.31.', '192.168.', '127.', 'fe80:')): + for one_d in formatted_asn_output(ip_addr, 'json', ip_lookup_asn_extract.as_nums, ip_lookup_asn_extract.asn_countries, ip_lookup_asn_extract.asn_descriptions): + state_set.add((one_d['Type'], one_d['IPAddr'], one_d['Proto'], one_d['State'], one_d['Description'], ())) + + return state_set + + + +def ip_lookup_asn_handler(ip_lookup_asn_q, sh_da, prefs, dests): + """Lookup ASN information about IP addresses.""" + + if prefs['active'] and prefs['ip2asn_loaded']: + os.nice(nice_raise+2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('ip_lookup_asn'), prefs, dests) + + if "ips_researched" not in ip_lookup_asn_handler.__dict__: + ip_lookup_asn_handler.ips_researched = ['', '0.0.0.0', '::', '0000:0000:0000:0000:0000:0000:0000:0000'] #No point in looking these up + while True: + try: + out_rec = ip_lookup_asn_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break + else: + if out_rec is None: + break + out_rec = explode_ip(out_rec, prefs, dests) + + if out_rec not in ip_lookup_asn_handler.ips_researched: + ip_lookup_asn_handler.ips_researched.append(out_rec) -def ReportAll(output_tuple_set, prefs, dests): - """Wrapper function for original passer script used to accept a set of tuples generated by {LAYER}_extract functions and send them to ReportId. - Example call: ReportAll(ARP_extract(p, meta)) .""" + for statement in ip_lookup_asn_extract(out_rec, prefs, dests): + dests['output'].put(statement) - for a_tuple in output_tuple_set: - ReportId(a_tuple[Type_e], a_tuple[IPAddr_e], a_tuple[Proto_e], a_tuple[State_e], a_tuple[Description_e], a_tuple[Warnings_e], prefs, dests) + Progress('a') + debug_out('Exiting ip_lookup_asn', prefs, dests) + elif prefs['active'] and not prefs['ip2asn_loaded']: + debug_out('Unable to load ip2asn module, exiting ip_lookup_asn', prefs, dests) -def process_udp_ports(meta, p, prefs, dests): - """Process a UDP packet (ipv4 or ipv6).""" - #Persistent variables - #String dictionary: What server is on this "IP,Proto_Port"? Locally found strings. - if "UDPManualServerDescription" not in process_udp_ports.__dict__: - process_udp_ports.UDPManualServerDescription = {} - #Transition variables - sIP = meta['sIP'] - dIP = meta['dIP'] - sport = meta['sport'] - dport = meta['dport'] - SrcService = meta['SrcService'] - DstService = meta['DstService'] - SrcClient = meta['SrcClient'] - FromPort = sIP + ",UDP_" + sport +def ip_lookup_hostname_extract(ip_addr, prefs, dests): + """Lookup hostnames for an IP address and return as a set of tuples.""" - if p.getlayer(Raw): - Payload = p.getlayer(Raw).load + state_set = set([]) + + try: + r_name, _, _ = socket.gethostbyaddr(ip_addr) #Don't need params 2 and 3, r_alias, r_addresslist + except (socket.herror, socket.gaierror, KeyboardInterrupt, OSError): + pass else: - Payload = b"" - - #Persistent variables - if "SipPhoneMatch" not in process_udp_ports.__dict__: - process_udp_ports.SipPhoneMatch = re.compile('Contact: ([0-9-]+) /dev/null 2>&1\nchmod 777') > -1: - ReportId("UC", sIP, "UDP_" + dport, "open", "udp123/client sending shellcode", (['malicious', ]), prefs, dests) - elif (sport == "123") or (dport == "123"): - ntp_stratum = p[NTPHeader].stratum - #What comes back in the "id" field is either an IPv4 address of sIP's primary reference (good!) or - #the first 4 bytes of the MD5 hash of the IPv6 address of sIP's primary reference (bad.) Without actively - #checking, there's no way to distinguish the two cases. https://www.nwtime.org/ntps-refid/ - ntp_id = p[NTPHeader].id - ntp_ref_id = str(p[NTPHeader].ref_id).rstrip(' \t\r\n\0') - if ntp_id: - ReportId("US", sIP, "UDP_" + sport, "open", 'ntp/server stratum=' + str(ntp_stratum) + ' reference=' + str(ntp_id), ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = 'ntp/server stratum=' + str(ntp_stratum) + ' reference=' + str(ntp_id) - ReportId("US", ntp_id, "UDP_" + sport, "open", 'ntp/server inferred from being a reference but must be checked.', ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[ntp_id + ",UDP_" + sport] = 'ntp/server inferred from being a reference but must be checked.' - elif ntp_ref_id in known_ntp_refs: - ReportId("US", sIP, "UDP_" + sport, "open", 'ntp/server stratum=' + str(ntp_stratum), ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = 'ntp/server stratum=' + str(ntp_stratum) - else: - ReportId("US", sIP, "UDP_" + sport, "open", 'ntp/server stratum=' + str(ntp_stratum), ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = 'ntp/server stratum=' + str(ntp_stratum) - #ShowPacket(p, meta, "IP/UDP/ntp with null reference:_" + str(ntp_ref_id) + "_", HonorQuit, prefs, dests) #Even after adding 'i' to known_ntp_refs, this still kept tripping. - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NTPHeader layer", HonorQuit, prefs, dests) - -#__ haslayer(NTPPrivate) - elif has_advanced_ntp_headers and p.haslayer(NTPPrivate): - if (dport == "123") and p[NTPPrivate].response == 0: #response == 0 is a request - if p[NTPPrivate].request_code == 42: #REQ_MON_GETLIST_1 - ReportId("UC", sIP, "UDP_123", "open", 'ntp/client REQ_MON_GETLIST_1: Likely spoofed and DDOSed source IP', (['amplification', 'spoofed']), prefs, dests) - elif p[NTPPrivate].request_code == 32: #REQ_REQUEST_KEY - ReportId("UC", sIP, "UDP_123", "open", 'ntp/client', ([]), prefs, dests) + return state_set + + + +def ip_lookup_hostname_handler(ip_lookup_hostname_q, sh_da, prefs, dests): + """Lookup hostname information about IP addresses.""" + + if prefs['active']: + os.nice(nice_raise+2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('ip_lookup_hostname'), prefs, dests) + + if "ips_researched" not in ip_lookup_hostname_handler.__dict__: + ip_lookup_hostname_handler.ips_researched = ['', '0.0.0.0', '::', '0000:0000:0000:0000:0000:0000:0000:0000'] #No point in looking these up + + #Code left in in case we switch to dnspython/dns.resolver + #if "unhandled_h" not in unhandled_handler.__dict__: + # unhandled_handler.unhandled_h = None + # if prefs['unhandled']: + # try: + # unhandled_handler.unhandled_h = PcapWriter(filename=prefs['unhandled'], append=True) + # except: + # debug_out("Unable to open " + prefs['unhandled'] + ", no unhandled packets will be saved.", prefs, dests) + + #if unhandled_handler.unhandled_h is not None: + while True: + try: + out_rec = ip_lookup_hostname_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - ShowPacket(p, meta, "IPv4/UDPv4/ntp Mode 7 request but not REQ_MON_GETLIST_1", HonorQuit, prefs, dests) - elif (sport == "123") and p[NTPPrivate].response == 1: #response == 1 is a reply - if p[NTPPrivate].request_code == 42: #REQ_MON_GETLIST_1 - ReportId("US", sIP, "UDP_123", "open", 'ntp/server REQ_MON_GETLIST_1: Likely middleman in DDOS', (['amplification', 'dos']), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = 'ntp/server REQ_MON_GETLIST_1: Likely middleman in DDOS' + if out_rec is None: + break + out_rec = explode_ip(out_rec, prefs, dests) + + if out_rec not in ip_lookup_hostname_handler.ips_researched: + ip_lookup_hostname_handler.ips_researched.append(out_rec) + + for statement in ip_lookup_hostname_extract(out_rec, prefs, dests): + dests['output'].put(statement) + + Progress('i') + + debug_out('Exiting ip_lookup_hostname', prefs, dests) + + + +def ip_lookup_traceroute_extract(ip_addr, prefs, dests): + """Lookup any information about an IP address and return as a set of tuples.""" + + if "ips_researched" not in ip_lookup_traceroute_extract.__dict__: + ip_lookup_traceroute_extract.ips_researched = ['', '0.0.0.0', '::', '0000:0000:0000:0000:0000:0000:0000:0000', '255.255.255.255'] #No point in looking these up + + + state_set = set([]) + + if ip_addr and not ip_addr.startswith(('127.', '169.254.', 'fe80:', 'FE80:')) and (ip_addr not in ip_lookup_traceroute_extract.ips_researched) and ((':' not in ip_addr) or (prefs['trace_v6'])): + ip_lookup_traceroute_extract.ips_researched.append(ip_addr) + + try: + compressed_path_to_ip = traceroute_hop_list(ip_addr, prefs['forced_interface'], prefs['per_packet_timeout'], prefs['hop_limit'], "") + except: + raise + + path_to_ip = [] + for one_hop in compressed_path_to_ip: + if one_hop is None or ':' not in one_hop: + path_to_ip.append(one_hop) else: - ShowPacket(p, meta, "IPv4/UDPv4/ntp Mode 7 reply but not REQ_MON_GETLIST_1", HonorQuit, prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NTPPrivate layer", HonorQuit, prefs, dests) + path_to_ip.append(explode_ip(one_hop, prefs, dests)) -#__ haslayer(NTPControl) - elif has_advanced_ntp_headers and p.haslayer(NTPControl): - if dport == "123": - ReportId("UC", sIP, "UDP_123", "open", 'ntp_control/client', ([]), prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NTPControl layer", HonorQuit, prefs, dests) - - elif (not has_advanced_ntp_headers) and ((sport == "123") or (dport == "123")): - UnhandledPacket(p, prefs, dests) #Unfortunately, this version of scapy is too old to handle the new NTP headers. -### IP/UDP/pwdgen=129 https://tools.ietf.org/html/rfc972 - elif (dport == "129") and (Payload == b'\n'): - ReportId("UC", sIP, "UDP_" + dport, "open", "pwdgen/client", ([]), prefs, dests) -### IP/UDP/135 - elif sIP.startswith('64.39.99.') and dport == "135" and Payload.endswith(b'QUALYSGUARD123'): - ReportId("UC", sIP, "UDP_" + dport, "open", "epmap/clientscanner", (['scan', ]), prefs, dests) - elif dport == "135" and Payload.find(b'NTLMSSP') > -1: - ReportId("UC", sIP, "UDP_" + dport, "open", "epmap/client", ([]), prefs, dests) - -#__ haslayer(NBNSQueryRequest) -### IP/UDP/netbios-ns=137 query - elif p.haslayer(NBNSQueryRequest): - if dport == "137": - if meta['dMAC'] == "ff:ff:ff:ff:ff:ff": #broadcast - ReportId("UC", sIP, "UDP_" + dport, "open", "netbios-ns/broadcastclient", ([]), prefs, dests) - elif Payload and (Payload.find(b'CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1): #wildcard - ReportId("UC", sIP, "UDP_" + dport, "open", "netbios-ns/wildcardclient", (['amplification', 'spoofed']), prefs, dests) + + for i in range(len(path_to_ip)-1): + #path_to_ip[i] path_to_ip[i+1] are consecutive pairs of hosts on the way to the target ip. Either or both may be None (means no response at this TTL). + if path_to_ip[i] and path_to_ip[i+1] and path_to_ip[i] != path_to_ip[i+1]: + #Both are actual IP addresses. + state_set.add(("PE", path_to_ip[i], "traceroute", "precedes", path_to_ip[i+1], ())) + state_set.add(("PE", path_to_ip[i+1], "traceroute", "is_beyond", path_to_ip[i], ())) + + for i in range(1, len(path_to_ip)-1): #Register all the middle IPs... + if path_to_ip[i]: #...that responded... + state_set.add(("RO", path_to_ip[i], "TTLEx", "router", "", ())) #...as routers. + + if path_to_ip[-1]: #If we got a response as the last entry in the list... + state_set.add(("US", path_to_ip[-1], "UDP_33434", "closed", "udptraceroute/server", ())) #...tag it as a traceroute responder + + for one_hop in path_to_ip: + if one_hop and one_hop not in ip_lookup_traceroute_extract.ips_researched: + ip_lookup_traceroute_extract.ips_researched.append(one_hop) #We add all the IPs on the way to the target as "researched" as we just effectively tracerouted to the intermediate routers as well. + + return state_set + + + +def ip_lookup_traceroute_handler(ilth_name, ip_lookup_traceroute_q, sh_da, prefs, dests): + """Lookup any additional information about IP addresses.""" + #ilth_name is the unique name for this process, such as ip_lookup_traceroute_2 + + if prefs['active'] and prefs['scapy_traceroute_loaded']: + os.nice(nice_raise+2) #Lower priority to give higher priority to critical tasks + debug_out(whatami(ilth_name), prefs, dests) + + while True: + try: + out_rec = ip_lookup_traceroute_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - ReportId("UC", sIP, "UDP_" + dport, "open", "netbios-ns/unicastclient", ([]), prefs, dests) - UnhandledPacket(p, prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NBNSQueryRequest layer", HonorQuit, prefs, dests) - -#__ haslayer(NBNSQueryResponse) -### IP/UDP/netbios-ns=137 response - elif p.haslayer(NBNSQueryResponse): - if sport == "137": - netbios_hostname = p[NBNSQueryResponse].RR_NAME.rstrip().rstrip(nullbyte).decode('UTF-8') - netbios_address = p[NBNSQueryResponse].NB_ADDRESS.rstrip().decode('UTF-8') - ReportId("US", sIP, "UDP_" + sport, "open", "netbios-ns", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "netbios-ns" - ReportId("NA", netbios_address, "PTR", netbios_hostname, "netbios-ns", ([]), prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NBNSQueryResponse layer", HonorQuit, prefs, dests) - -#__ haslayer(NBTDatagram) -### IP/UDP/netbios-dgm=138 query - elif p.haslayer(NBTDatagram): - netbios_hostname = p[NBTDatagram].SourceName.rstrip().decode('UTF-8') - ReportId("NA", sIP, "PTR", netbios_hostname, "netbios-dgm", ([]), prefs, dests) - if (sport == "138") and (dport == "138"): - ReportId("US", sIP, "UDP_" + sport, "open", "netbios-dgm", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "netbios-dgm" - elif sport == "138": - ReportId("US", sIP, "UDP_" + sport, "open", "netbios-dgm", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "netbios-dgm" - elif dport == "138": - ReportId("UC", sIP, "UDP_" + dport, "open", "netbios-dgm/" + meta['cast_type'] + "client", ([]), prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with NBTDatagram layer", HonorQuit, prefs, dests) - -#__ haslayer(SNMP) -### IP/UDP/SNMP=161 - elif p.haslayer(SNMP): - #FIXME - extracting snmp community string? - #type(p[SNMP].community) - #p[SNMP].show() - #snmp_community_string = remove_control_characters(str(p[SNMP].community.decode('utf-8'))).strip(' \t\r\n\0') - #if dport == "161" and (p.haslayer(SNMPget) or p.haslayer(SNMPbulk) or p.haslayer(SNMPvarbind)): - # if ShowCredentials: - # ReportId("UC", sIP, "UDP_" + dport, "open", "snmp/client community string:" + snmp_community_string, (['plaintext', ]), prefs, dests) - # else: - # ReportId("UC", sIP, "UDP_" + dport, "open", 'snmp/client', (['plaintext']), prefs, dests) - #elif sport == "161" and p.haslayer(SNMPresponse): - # if ShowCredentials: - # ReportId("US", sIP, "UDP_" + sport, "open", "snmp/server community string:" + snmp_community_string, (['plaintext', ]), prefs, dests) - # process_udp_ports.UDPManualServerDescription[FromPort] = "snmp/server community string:" + snmp_community_string - # else: - # ReportId("US", sIP, "UDP_" + sport, "open", 'snmp/server', (['plaintext', ]), prefs, dests) - # process_udp_ports.UDPManualServerDescription[FromPort] = "snmp/server" + if out_rec is None: + break + out_rec = explode_ip(out_rec, prefs, dests) + + if not out_rec.startswith(('127.')): + for statement in ip_lookup_traceroute_extract(out_rec, prefs, dests): + dests['output'].put(statement) + + Progress('r') + + debug_out('Exiting ' + ilth_name, prefs, dests) + elif prefs['active'] and not prefs['scapy_traceroute_loaded']: + debug_out('Unable to load scapy_traceroute module, exiting ' + ilth_name, prefs, dests) + + + +def host_lookup_extract(host_name, prefs, dests): + """Lookup any information about a host and return as a set of tuples.""" + + state_set = set([]) + + if host_name: + if not host_name.endswith("."): + host_name += '.' + + try: + for one_result in socket.getaddrinfo(host_name, None, socket.AF_INET): + af, _, _, _, sa = one_result #Don't need socktype, proto, canonname + + if af == socket.AF_INET: + state_set.add(("DN", explode_ip(sa[0], prefs, dests), "A", host_name, "", ())) + elif af == socket.AF_INET6: + state_set.add(("DN", explode_ip(sa[0], prefs, dests), "AAAA", host_name, "", ())) + else: + pass + except (socket.gaierror, KeyboardInterrupt, UnicodeError): + return state_set + + try: + for one_result in socket.getaddrinfo(host_name, None, socket.AF_INET6): + af, _, _, _, sa = one_result #Don't need socktype, proto, canonname + + if af == socket.AF_INET: + state_set.add(("DN", explode_ip(sa[0], prefs, dests), "A", host_name, "", ())) + elif af == socket.AF_INET6: + state_set.add(("DN", explode_ip(sa[0], prefs, dests), "AAAA", host_name, "", ())) + else: + pass + except (socket.gaierror, KeyboardInterrupt, UnicodeError): + return state_set + + + #try: + # resolved_ip_addr = socket.gethostbyname(host_name) + #except: + # pass #else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with SNMP layer", HonorQuit, prefs, dests) - - elif sport == "161" or dport == "161": - UnhandledPacket(p, prefs, dests) -### IP/UDP/svrloc=427 https://tools.ietf.org/html/rfc2608 - elif dport == "427" and Payload and (Payload.find(b'service:') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "svrloc/client", ([]), prefs, dests) -### IP/UDP/isakmp=500 - elif (sport == "500") and (dport == "500") and isinstance(p[ISAKMP], ISAKMP) and (p[ISAKMP].init_cookie != ''): - ReportId("US", sIP, "UDP_" + sport, "open", "isakmp/generic", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "isakmp/generic" -### IP/UDP/biff=512 - elif dport == "512" and Payload and (Payload.find(b'@') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "biff/client", ([]), prefs, dests) -### IP/UDP/syslog=514 https://www.ietf.org/rfc/rfc3164.txt - elif dport == "514" and Payload and Payload.startswith(b'<') and (Payload[2] == b'>' or Payload[3] == b'>' or Payload[4] == b'>'): - ReportId("UC", sIP, "UDP_" + dport, "open", "syslog/client", (['plaintext', ]), prefs, dests) - ReportId("US", dIP, "UDP_" + dport, "open", "syslog/server not confirmed", (['plaintext', ]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "syslog/server not confirmed" - - hostname_and_process = SyslogMatch.search(Payload) - if (hostname_and_process is not None) and (len(hostname_and_process.groups()) >= 2): - syslog_hostname = hostname_and_process.group(1) - ReportId("NA", sIP, "PTR", syslog_hostname, "syslog", (['plaintext', ]), prefs, dests) - process_name = hostname_and_process.group(2) - ReportId("IP", sIP, "IP", "live", 'running process: ' + process_name, (['plaintext', ]), prefs, dests) - else: - #ShowPacket(p, meta, "Syslog that does not match regex", HonorQuit, prefs, dests) - UnhandledPacket(p, prefs, dests) -### IP/UDP/snmp on alternate ports - elif (dport in snmp_altport) and Payload and (Payload.find(b'public') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "snmp-altport/client", (['nonstandardport', ]), prefs, dests) -### IP/UDP/ibm-db2=523 client - elif (dport == "523") and Payload and (Payload.find(b'DB2GETADDR') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "ibm-db2/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/DHCPv6=547 request - elif meta['ip_class'] == '6' and (sport == "546") and (dport == "547") and dIP in ("ff02::1:2", "ff02:0000:0000:0000:0000:0000:0001:0002"): - ReportId("UC", sIP, "UDP_" + dport, "open", "UDP DHCPv6", ([]), prefs, dests) - elif meta['ip_class'] == '6' and (sport == "546") and (dport == "547"): #dhcp request - ShowPacket(p, meta, "IPv6/UDPv6/546-547-ff02::1:2 DHCP Request", HonorQuit, prefs, dests) -### IP/UDP/DHCPv6=547 reply - elif meta['ip_class'] == '6' and (sport == "547") and (dport == "546"): - pass -### IP/UDP/626 serialnumberd https://svn.nmap.org/nmap/nmap-payloads - elif (dport == "626") and (Payload == b'SNQUERY: 127.0.0.1:AAAAAA:xsvr'): #nmap serialnumberd scan - ReportId("UC", sIP, "UDP_" + dport, "open", "serialnumberd/clientscanner likely nmap scan", (['scan', ]), prefs, dests) -### IP/UDP/636,992,993 make sure this follows snmp_altport line Payload contains \x03www\x03163\x03com - elif dport in www163com_ports and Payload and (Payload.find(www163com_payload) > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "scan_www163com/client", (['scan', ]), prefs, dests) -### IP/UDP/udp-ldaps=636 - elif dport in fenull_scan_names and Payload.startswith(b"8") and Payload.endswith(fenulls): - ReportId("UC", sIP, "UDP_" + dport, "open", fenull_scan_names[dport] + "/client", (['scan', ]), prefs, dests) -### IP/UDP/loadav=750 - elif dport == '750' and Payload and Payload.find(nullbyte + 'NESSUS.ORG' + nullbyte) > -1: - if sIP in nessus_scan_ips: - ReportId("UC", sIP, "UDP_" + dport, "open", "loadav/clientscanner nessus scanner", (['scan', ]), prefs, dests) - else: - ReportId("UC", sIP, "UDP_" + dport, "open", "loadav/clientscanner nessus unregistered scanner IP address", (['scan', ]), prefs, dests) -### IP/UDP/winpopup winpopup spam client - elif dport in ("1026", "1027", "1028") and Payload and ((Payload.find(b'Download Registry Update from:') > -1) or (Payload.find(b'CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find(b'Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find(b'CRITICAL SYSTEM ERRORS') > -1)): - ReportId("UC", sIP, "UDP_" + dport, "open", "winpopup/spamclient", (['malicious', ]), prefs, dests) -### IP/UDP/sharemouse=1046 rc_iamhere sharemouse https://www.hybrid-analysis.com/sample/ca51df55d9c938bf0dc2ecbc10b148ec5ab8d259f3ea97f719a1a498e128ee05?environmentId=100 - elif sport == "1046" and dport == "1046" and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff") and Payload and Payload.startswith(b'rc_iamhere:6555:0:0:'): - ReportId("UC", sIP, "UDP_" + dport, "open", "sharemouse/broadcastclient rc_iamhere sharemouse trojan", (['malicious', ]), prefs, dests) - ReportId("NA", sIP, "NA", Payload[20:], "sharemouse trojan", (['malicious', ]), prefs, dests) -### IP/UDP/udp1124=1124 used by printers - elif (dport == "1124") and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff") and Payload and (Payload.find(b'std-scan-discovery-all') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "udp1124/broadcast", ([]), prefs, dests) -### IP/UDP/search-agent=1234 used by stora NAS - elif (dport == "1234") and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff") and Payload and (Payload.find(b'Hello there. I am at ') > -1): - HostnameMatch = StoraHostnameMatch.search(Payload) - if (HostnameMatch is not None) and (len(HostnameMatch.groups()) >= 1): - ReportId("UC", sIP, "UDP_" + dport, "open", "stora_nas_scan/broadcast hostname: " + HostnameMatch.group(1), ([]), prefs, dests) - else: - ReportId("UC", sIP, "UDP_" + dport, "open", "stora_nas_scan/broadcast", ([]), prefs, dests) -### IP/UDP/mssql=1434 Probable mssql attack - elif dport == "1434" and Payload and (Payload.find(b'Qh.dll') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "mssql/clientattack", (['malicious', ]), prefs, dests) - elif dport == "1434" and Payload and Payload in (twobyte, twozero): #https://portunus.net/2015/01/21/mc-sqlr-amplification/ . Text refers to a one-byte \x02, but I've seen \x02\x00 as well. - ReportId("UC", sIP, "UDP_" + dport, "open", "mssql/client nmap ping scan", (['amplification', 'ddos', 'scan']), prefs, dests) -### IP/UDP/kdeconnect=1716 - elif sport == "1716" and dport == "1716" and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff") and Payload and (Payload.find(b'kdeconnect.') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "kdeconnect/broadcast", ([]), prefs, dests) - elif sport == "1716" and dport == "1716" and Payload and (Payload.find(b'kdeconnect.') > -1): - ReportId("US", sIP, "UDP_" + sport, "open", 'kdeconnect/server', ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "kdeconnect/server" - -#__ haslayer(Radius) -### IP/UDP/radius=1812 - elif p.haslayer(Radius): - if sport == "1812": - ReportId("US", sIP, "UDP_" + sport, "open", 'radius/server', ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "radius/server" - elif dport == "1812": - ReportId("UC", sIP, "UDP_" + dport, "open", 'radius/client', ([]), prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with Radius layer", HonorQuit, prefs, dests) + #if resolved_ip_addr: + # state_set.add(("DN", explode_ip(resolved_ip_addr, prefs, dests), "A", host_name, "", ())) - elif (sport == "1813") and (dport == "1900"): #Scapy misparses this as Radius accounting, when it's SSDP. Ignore. - pass -### IP/UDP/ssdp=1900 https://embeddedinn.wordpress.com/tutorials/upnp-device-architecture/ - elif dport in ("1900", "1990", "32412", "32414") and dIP in ("255.255.255.255", "239.255.255.250", "ff02:0000:0000:0000:0000:0000:0000:000c", "ff05:0000:0000:0000:0000:0000:0000:000c", "ff08:0000:0000:0000:0000:0000:0000:000c", "ff0e:0000:0000:0000:0000:0000:0000:000c") and Payload and (Payload.startswith((b'M-SEARCH', b'B-SEARCH'))): #ssdp discover - if dport == "1900": - ssdp_warns = [] - else: - ssdp_warns = ['nonstandardport'] - #FIXME - pull in *cast type from meta - ReportId("UC", sIP, "UDP_" + dport, "open", "ssdp-discovery/broadmulticastclient", (ssdp_warns), prefs, dests) - elif (dport == "1900") and Payload and (Payload.startswith((b'M-SEARCH', b'B-SEARCH'))): #ssdp discover - ReportId("UC", sIP, "UDP_" + dport, "open", "ssdp-discovery/client", ([]), prefs, dests) - elif (dport == "1900") and dIP in ("255.255.255.255", "239.255.255.250", "ff02:0000:0000:0000:0000:0000:0000:000c", "ff05:0000:0000:0000:0000:0000:0000:000c", "ff08:0000:0000:0000:0000:0000:0000:000c", "ff0e:0000:0000:0000:0000:0000:0000:000c") and Payload and (Payload.startswith(b'NOTIFY')): #ssdp announcement - additional_info = '' - LocationMatch = SSDPLocationMatch.search(Payload) - if (LocationMatch is not None) and (len(LocationMatch.groups()) >= 1): - additional_info = additional_info + ' SSDP Location: ' + str(LocationMatch.group(1)).strip() - ServerMatch = SSDPServerMatch.search(Payload) - if (ServerMatch is not None) and (len(ServerMatch.groups()) >= 1): - additional_info = additional_info + ' SSDP Server: ' + str(ServerMatch.group(1)).replace(',', ' ').strip() - ReportId("UC", sIP, "UDP_" + dport, "open", "ssdp-announce/client" + additional_info, ([]), prefs, dests) - elif dport in ("1900", "11211") and Payload and (Payload == b'GET / HTTP/1.1\r\n\r\n'): #bogus GET packet - ReportId("UC", sIP, "UDP_" + dport, "open", "ssdp-bogus-get/clientscanner", (['scan', ]), prefs, dests) - elif (dport == "1900") and dIP in ("239.255.255.250", "ff02:0000:0000:0000:0000:0000:0000:000c", "ff05:0000:0000:0000:0000:0000:0000:000c", "ff08:0000:0000:0000:0000:0000:0000:000c", "ff0e:0000:0000:0000:0000:0000:0000:000c"): #ssdp - ShowPacket(p, meta, "IP/UDP/1900-multicast SSDP unknown method", HonorQuit, prefs, dests) -### IP/UDP/hsrp=1985 https://en.wikipedia.org/wiki/Hot_Standby_Router_Protocol https://tools.ietf.org/html/rfc2281 - elif sport in ("1985", "2029") and dport in ("1985", "2029") and meta['ttl'] == 1 and dIP in ('224.0.0.2', '224.0.0.102', 'ff02::66', 'ff02:0000:0000:0000:0000:0000:0000:0066'): - ReportId("UC", sIP, "UDP_" + dport, "open", "hsrp/multicastclient", ([]), prefs, dests) - ReportId("RO", sIP, "HSRP", "router", "", ([]), prefs, dests) -### IP/UDP/ethernetip=2222 http://kazanets.narod.ru/files/Acro_ethernetIP_747a.pdf , see "CIP Encapsulation Message" - elif (dport == "2222") and Payload and Payload.startswith(ethernetip_list_identity): - ReportId("UC", sIP, "UDP_" + dport, "open", "ethernetip/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/msopid=2223 http://www.crufty.net/sjg/blog/osx-and-office-do-not-mix.htm - elif (dport == "2223") and (meta['cast_type'] == "broadcast") and Payload and Payload.startswith(b'MSOPID'): - ReportId("UC", sIP, "UDP_" + dport, "open", "msopid/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/digiman=2362 - elif (dport == "2362") and Payload and Payload.startswith(b'DIGI'): - ReportId("UC", sIP, "UDP_" + dport, "open", "digiman/client", ([]), prefs, dests) -### IP/UDP/sybase=2638 - elif (dport == "2638") and Payload and (Payload.find(b'CONNECTIONLESS_TDS') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "sybase/client", (['scan', ]), prefs, dests) -### IP/UDP/mdap-port=3235 - elif (dport == "3235") and Payload and Payload.startswith(b'ANT-SEARCH MDAP/1.1'): - ReportId("UC", sIP, "UDP_" + dport, "open", "mdap-port/client", ([]), prefs, dests) -### IP/UDP/enpc=3289 - elif (dport == "3289") and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff"): - if Payload and (Payload.startswith(b'EPSON')): - ReportId("UC", sIP, "UDP_" + dport, "open", "enpc/broadcast", ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/teredo=3544 https://tools.ietf.org/html/rfc4380 - elif (dport == "3544") and Payload: #and Payload.startswith(fournulls): #Signature needs improvement - ReportId("UC", sIP, "UDP_" + dport, "open", "teredo/client", ([]), prefs, dests) - UnhandledPacket(p, prefs, dests) -### IP/UDP/upnp-discovery=3702 - elif (dport == "3702") and Payload and (Payload.startswith(b' -1): - if dIP in ("239.255.255.250", "ff02::c", "ff02:0000:0000:0000:0000:0000:0000:000c"): - ReportId("UC", sIP, "UDP_" + dport, "open", "upnp-discovery/broadcastclient", ([]), prefs, dests) - else: - ReportId("UC", sIP, "UDP_" + dport, "open", "upnp-discovery/client", ([]), prefs, dests) -### IP/UDP/bfd-control=3784 https://tools.ietf.org/html/rfc5881 - elif (dport == "3784") and (meta['ttl'] == 255): - #FIXME - add check that sport must be between 49152 and 65535 - ReportId("UC", sIP, "UDP_" + dport, "open", "bfd-control/client", ([]), prefs, dests) -### IP/UDP/xpl=3865 - elif (dport == "3865") and (dIP == "255.255.255.255"): #XPL, http://wiki.xplproject.org.uk/index.php/Main_Page - ReportId("UC", sIP, "UDP_" + dport, "open", "xpl/client", ([]), prefs, dests) -### IP/UDP/vertx=4070 https://github.com/brad-anton/VertX/blob/master/VertX_Query.py - elif (dport == "4070") and (Payload == b'discover;013;'): - ReportId("UC", sIP, "UDP_" + dport, "open", "vertx/client", (['scan', ]), prefs, dests) - -#__ haslayer(ESP) -### IP/UDP/esp=4500 https://learningnetwork.cisco.com/thread/76175 - elif p.haslayer(ESP): - if dport == "4500": - if p[ESP].data == 'TP/1.1\r\nHost: www\r\n\r\n': - ReportId("UC", sIP, "UDP_" + dport, "open", "esp/client", (['scan', 'tunnel']), prefs, dests) - else: - ReportId("UC", sIP, "UDP_" + dport, "open", "esp/client", (['tunnel', ]), prefs, dests) - elif sport == "4500": - ReportId("US", sIP, "UDP_" + sport, "open", "esp/server", (['tunnel', ]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "esp/server" - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with ESP layer", HonorQuit, prefs, dests) - -### IP/UDP/drobo=5002 used by drobo NAS - elif (dport == "5002") and Payload and Payload.startswith(b'DRINETTM'): - ReportId("UC", sIP, "UDP_" + dport, "open", "drobo_nas_scan/" + meta['cast_type'] + "client", ([]), prefs, dests) -### IP/UDP/vonage - elif (sport == "5061") and (dport == "5061") and (dIP in vonage_sip_servers): #Vonage SIP client - if Payload and (Payload.find(b'.vonage.net:5061 SIP/2.0') > -1): - SipMatch = process_udp_ports.SipPhoneMatch.search(Payload) - if (SipMatch is not None) and (len(SipMatch.groups()) >= 1): - ReportId("UC", sIP, "UDP_" + dport, "open", "sip/vonage_client, phone number: " + SipMatch.group(1), ([]), prefs, dests) + return state_set + + + +def host_lookup_handler(host_lookup_q, sh_da, prefs, dests): + """Lookup any additional information about hostnames.""" + + if prefs['active']: + os.nice(nice_raise+2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('host_lookup'), prefs, dests) + + if "ips_researched" not in host_lookup_handler.__dict__: + host_lookup_handler.ips_researched = [] + + #Code left in in case we switch to dnspython/dns.resolver + #if "unhandled_h" not in unhandled_handler.__dict__: + # unhandled_handler.unhandled_h = None + # if prefs['unhandled']: + # try: + # unhandled_handler.unhandled_h = PcapWriter(filename=prefs['unhandled'], append=True) + # except: + # debug_out("Unable to open " + prefs['unhandled'] + ", no unhandled packets will be saved.", prefs, dests) + + #if unhandled_handler.unhandled_h is not None: + while True: + try: + out_rec = host_lookup_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - ReportId("UC", sIP, "UDP_" + dport, "open", "sip/vonage_client", ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) - elif (sport == "5061") and (dport == "5061") and (sIP in vonage_sip_servers): #Vonage SIP server - if Payload and (Payload.find(b'.vonage.net:5061>') > -1): - ReportId("US", sIP, "UDP_" + sport, "open", "sip/vonage_server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "sip/vonage_server" - else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/nat-pmp=5351 http://miniupnp.free.fr/nat-pmp.html , https://tools.ietf.org/html/rfc6886 - elif dport == "5351": - if Payload and Payload.startswith(nullbyte * 2): #\x00\x00 is Public address request - ReportId("UC", sIP, "UDP_" + dport, "open", "nat-pmp-public-address-discovery/client", (['scan', ]), prefs, dests) - elif Payload and Payload.startswith((zeroone, zerotwo)): #\x00\x0[12] is mapping request - ReportId("UC", sIP, "UDP_" + dport, "open", "nat-pmp-mapping-request/client", ([]), prefs, dests) - else: - ShowPacket(p, meta, "IPv4/UDPv4/5351 nat-pmp unknown payload", HonorQuit, prefs, dests) + if out_rec is None: + break + if out_rec not in host_lookup_handler.ips_researched: + host_lookup_handler.ips_researched.append(out_rec) -#__ haslayer(LLMNRQuery) -### IP/UDP/llmnr=5355 query - elif p.haslayer(LLMNRQuery): - if (dport == "5355") and dIP in ("224.0.0.252", "ff02::1:3", "ff02:0000:0000:0000:0000:0000:0001:0003") and (meta['ttl'] in (1, 255)) and (p[LLMNRQuery].qr == 0): #llmnr (link-local multicast node resolution) - UnhandledPacket(p, prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/unhandled packet with LLMNRQuery layer", HonorQuit, prefs, dests) - -### IP/UDP/llmnr=5355 response - elif (dport == "5355") and dIP in ("224.0.0.252", "ff02::1:3", "ff02:0000:0000:0000:0000:0000:0001:0003") and (meta['ttl'] in (1, 255)): #llmnr (link-local multicast node resolution) - ShowPacket(p, meta, "IP/UDP/5355-224.0.0.252,ff02::1:3 llmnr not query", HonorQuit, prefs, dests) - #Can we pass this off to PUDR? - elif dport == "5355": #unicast fe80->fe80 llmnr (link-local multicast node resolution) - ShowPacket(p, meta, "IP/UDP/5355 unicast llmnr not to 224.0.0.252,1:3", HonorQuit, prefs, dests) -### IP/UDP/corosync=5405 used by corosync - elif (dport == "5405") and (meta['dMAC'] == "ff:ff:ff:ff:ff:ff"): - ReportId("UC", sIP, "UDP_" + dport, "open", "corosync/broadcast", ([]), prefs, dests) -### IP/UDP/pcanywherestat=5632 client - elif (dport == "5632") and Payload and (Payload.find(b'NQ') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "pcanywherestat/clientscanner", (['scan', ]), prefs, dests) - elif (sport == "6515") and (dport == "6514") and (dIP == "255.255.255.255"): #mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp - if Payload and (Payload.find(b' -1)): # '@' confirmable, 'P' non-confirmable, '`' acknowledgement, or 'p' Reset (The acknowledgment and reset may have to go in sport == "5683" instead) - ReportId("UC", sIP, "UDP_" + dport, "open", "coap/client", ([]), prefs, dests) -### IP/UDP/bt-lpd=6771 https://security.stackexchange.com/questions/102766/wireshark-reveals-suspicious-udp-traffic-sending-to-a-bogon-ip-address - elif (dport == "6771") and (dIP == "239.192.152.143") and Payload and (Payload.startswith(b'BT-SEARCH * HTTP/1.1')): - ReportId("UC", sIP, "UDP_" + dport, "open", "bt-lpd/client", ([]), prefs, dests) -### IP/UDP/unreal_status=7778 https://arp242.net/weblog/online_unreal_tournament_server_browser_with_pcntl_fork() - elif (dport == "7778") and Payload and Payload.startswith(b'\\status\\'): - ReportId("UC", sIP, "UDP_" + dport, "open", "unreal_status/client", ([]), prefs, dests) -### IP/UDP/kissdvd=8000 https://www.tapatalk.com/groups/helplinedirect/getting-linksys-kiss-1600-to-work-with-ubuntu-t35.html - elif (dport == "8000") and Payload and Payload == b'ARE_YOU_KISS_PCLINK_SERVER?': - ReportId("UC", sIP, "UDP_" + dport, "open", "kissdvd/client", (['scan', ]), prefs, dests) -### IP/UDP/canon-bjnp2=8610 - elif (dport == "8610") and meta['cast_type'] and Payload and (Payload.startswith(b'MFNP')): - ReportId("UC", sIP, "UDP_" + dport, "open", "udp8610/" + meta['cast_type'], ([]), prefs, dests) -### IP/UDP/canon-bjnp2=8612 https://support.usa.canon.com/kb/index?page=content&id=ART109227 - elif dport in ("8612", "8613") and meta['cast_type'] and Payload and (Payload.startswith(b'BJNP')): - ReportId("UC", sIP, "UDP_" + dport, "open", "canon-bjnp2/" + meta['cast_type'], ([]), prefs, dests) - elif dport in ("8612", "8613") and dIP in ('ff02::1', 'ff02:0000:0000:0000:0000:0000:0000:0001') and Payload and (Payload.startswith(b'BJNP')): - ReportId("UC", sIP, "UDP_" + dport, "open", "canon-bjnp2/client", ([]), prefs, dests) -### IP/UDP/canon-bjnb-bnjb=8612 - elif (dport == "8612") and meta['cast_type'] and Payload and (Payload.startswith((b'BNJB', b'BJNB'))): - ReportId("UC", sIP, "UDP_" + dport, "open", "canon-bjnb-bnjb/" + meta['cast_type'], ([]), prefs, dests) -### IP/UDP/itunesdiscovery=8765 - elif dport == "8765": #XPL, http://wiki.xplproject.org.uk/index.php/Main_Page - ReportId("UC", sIP, "UDP_" + dport, "open", "itunesdiscovery/broadcast", ([]), prefs, dests) #'portonlysignature' -### IP/UDP/sunwebadmin=8800 - elif dport == "8800" and Payload and Payload.startswith(b'DHGET'): #http://sites.ieee.org/neworleans/files/2016/12/12052016-Presentation-IoT-security-website-copy.pdf - ReportId("UC", sIP, "UDP_" + dport, "open", "sunwebadmin/client possibly Mirai", (['dos', ]), prefs, dests) -### IP/UDP/aoldns - elif (sport in ("9052", "9053", "9054")) and (sIP in aol_dns_servers): #Possibly AOL dns response - if Payload and (Payload.find(b'dns-01') > -1): - ReportId("US", sIP, "UDP_" + sport, "open", "aoldns/server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "aoldns/server" - else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/teamspeak3=9987,59596 client https://github.com/TeamSpeak-Systems/ts3init_linux_netfilter_module - elif dport in ("9987", "59596") and Payload and (Payload.startswith(b'TS3INIT1')): - ReportId("UC", sIP, "UDP_" + dport, "open", "teamspeak3/clientscanner", (['scan', 'dos', ]), prefs, dests) -### UP/UDP/ubnt-discover=10001 https://github.com/headlesszeke/ubiquiti-probing - elif dport == "10001" and Payload and (Payload == ubiquiti_discover): - ReportId("UC", sIP, "UDP_" + dport, "open", "ubnt-discover/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/memcached=11211 https://blog.cloudflare.com/memcrashed-major-amplification-attacks-from-port-11211/ https://github.com/memcached/memcached/blob/master/doc/protocol.txt - elif dport in ("1121", "11211") and Payload: - if ((Payload.find(b'gets ') > -1) or (Payload.find(b'stats') > -1)): - ReportId("UC", sIP, "UDP_" + dport, "open", 'memcached/client: Likely spoofed and DDOSed source IP', (['amplification', 'malicious', 'spoofed']), prefs, dests) - elif Payload.find(b'version') > -1: - ReportId("UC", sIP, "UDP_" + dport, "open", 'memcached/client', (['scan', ]), prefs, dests) - else: - ShowPacket(p, meta, "IP/UDP/memcached=1121 or 11211 request but non-gets/stats/version", HonorQuit, prefs, dests) - elif sport == "11211": - ReportId("US", sIP, "UDP_" + sport, "open", 'memcached/server', ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "memcached/server" -### IP/UDP/zmapscanner=1707,3269,3544,6619,1121[45] https://zmap.io/ , https://github.com/zmap/zmap - elif dport in zmap_host_www_ports and (Payload == b'GET / HTTP/1.1\r\nHost: www\r\n\r\n'): - ReportId("UC", sIP, "UDP_" + dport, "open", 'zmapscanner/client', (['scan', ]), prefs, dests) -### IP/UDP/makerbotdiscovery=12307 https://github.com/gryphius/mini-makerbot-hacking/blob/master/doc/makerbotmini.md - elif (sport == "12309") and (dport == "12307") and meta['cast_type']: - if Payload and (Payload.startswith(b'{"command": "broadcast"')): - ReportId("UC", sIP, "UDP_" + dport, "open", "makerbotdiscovery/" + meta['cast_type'], ([]), prefs, dests) -### IP/UDP/12314 - elif dport == "12314" and Payload and Payload.startswith(fournulls): #Actually,lots more nulls than 4. - ReportId("UC", sIP, "UDP_" + dport, "open", 'udp12314/client', (['scan', ]), prefs, dests) -### IP/UDP/dropbox=17500 http://whatportis.com/ports/17500_dropbox-lansync-protocol-db-lsp-used-to-synchronize-file-catalogs-between-dropbox-clients-on-your-local-network - elif (sport == "17500") and (dport == "17500"): - if Payload and (Payload.find(b'"host_int"') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "dropbox/client", ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/googlemeet=19302-19309 - elif (dport in meet_ports) and (dIP in meet_hosts): - ReportId("UC", sIP, "UDP_" + dport, "open", "googlemeet/client", ([]), prefs, dests) - elif (sport in meet_ports) and (sIP in meet_hosts): - ReportId("US", sIP, "UDP_" + sport, "open", "googlemeet/server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "googlemeet/server" - elif dport in meet_ports: - ReportId("UC", sIP, "UDP_" + dport, "open", "googlemeet/client missing dIP:" + dIP, ([]), prefs, dests) #'portonlysignature' - elif sport in meet_ports: - ReportId("US", sIP, "UDP_" + sport, "open", "googlemeet/server missing sIP:" + sIP, ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "googlemeet/server missing sIP:" + sIP -### IP/UDP/develo=19375 https://flambda.de/2013/06/18/audioextender/ https://ubuntuforums.org/showthread.php?t=1942539 https://www2.devolo.com/products/dLAN-Powerline-1485-Mbps/dLAN-Wireless-extender/data/Data-sheet-dLAN-Wireless-extender-Starter-Kit-com.pdf - elif dport == "19375" and meta['cast_type'] and Payload.startswith(b'whoisthere'): - ReportId("UC", sIP, "UDP_" + dport, "open", "develo/" + meta['cast_type'] + "client", ([]), prefs, dests) #Note, payload is "whoisthere\x00' + str(ip.address) + '\x00' + str(subnet_mask) + '\x00\x001\x00' -### IP/UDP/skype=all over the place - elif (dport in skype_ports) and (dIP in skype_hosts): - ReportId("UC", sIP, "UDP_" + dport, "open", "skype/client", ([]), prefs, dests) - elif (sport in skype_ports) and (sIP in skype_hosts): - ReportId("US", sIP, "UDP_" + sport, "open", "skype/server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "skype/server" - elif dIP in skype_hosts: - ReportId("UC", sIP, "UDP_" + dport, "open", "skype/client, missing dport:" + dport, ([]), prefs, dests) - elif sIP in skype_hosts: - ReportId("US", sIP, "UDP_" + sport, "open", "skype/server, missing sport:" + sport, ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "skype/server, missing sport:" + sport - elif dport in skype_ports: - ReportId("UC", sIP, "UDP_" + dport, "open", "skype/client missing dIP:" + dIP, ([]), prefs, dests) #'portonlysignature' - elif sport in skype_ports: - ReportId("US", sIP, "UDP_" + sport, "open", "skype/server missing sIP:" + sIP, ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "skype/server missing sIP:" + sIP -### IP/UDP/pyzor=24441 - elif dport == "24441": #Pyzor - if Payload and (Payload.find(b'User:') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "pyzor/client", ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/unknown26079 - elif (sport == "26079") or (dport == "26079") or sIP in ("52.179.141.141", "100.112.42.45") or dIP in ("52.179.141.141", "100.112.42.45"): - UnhandledPacket(p, prefs, dests) -### IP/UDP/halflife=27005 and others - elif (sport == "27005") and (dport in ('27015', '27016', '27017')): #Halflife client live game - ReportId("UC", sIP, "UDP_" + dport, "open", "halflife/client", ([]), prefs, dests) #'portonlysignature' - elif (dport == "27013") and (dIP == "207.173.177.12"): #variable payload, so can't Payload and (Payload.find(b'Steam.exe') > -1) #Halflife client - ReportId("UC", sIP, "UDP_" + dport, "open", "halflife/client", ([]), prefs, dests) - elif (sport == "27013") and (sIP == "207.173.177.12"): #halflife server - ReportId("US", sIP, "UDP_" + sport, "open", "halflife/server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "halflife/server" - elif (sport in '27015', '27016', '27017') and (dport == "27005"): #halflife server live game - ReportId("US", sIP, "UDP_" + sport, "open", "halflife/server", ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "halflife/server" - elif dport in ("27015", "27016", "27025", "27026"): #Variable payload, so can't: Payload and (Payload.find(b'basic') > -1) #Halflife client - ReportId("UC", sIP, "UDP_" + dport, "open", "halflife/client", ([]), prefs, dests) #'portonlysignature' - elif sport in ("27015", "27016", "27025", "27026"): #Variable payload, so can't: Payload and (Payload.find(b'basic') > -1) #Halflife client - ReportId("US", sIP, "UDP_" + sport, "open", "halflife/server", ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "halflife/server" - elif (dport == "27017") and (dIP in SteamFriendsServers): #Steamfriends client - if Payload and (Payload.find(b'VS01') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "steamfriends/client", ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) - elif (sport == "27017") and (sIP in SteamFriendsServers): #Steamfriends server - if Payload and (Payload.find(b'VS01') > -1): - ReportId("US", sIP, "UDP_" + sport, "open", "steamfriends/server", ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "steamfriends/server" + for statement in host_lookup_extract(out_rec, prefs, dests): + dests['output'].put(statement) + Progress('h') + + debug_out('Exiting host_lookup', prefs, dests) + + + +#def template_handler(task_q, sh_da, prefs, dests): +# """Extracts all needed information from the template layer.""" +# +# os.nice(nice_raise + 2) #Lower priority to give higher priority to critical tasks +# debug_out(whatami('template'), prefs, dests) +# +# while True: +# try: +# (p, meta) = task_q.get(block=True, timeout=None) +# except KeyboardInterrupt: +# break +# else: + +# if p is None: +# break +# #Do processing here +# #p.show() +# #dests['output'].put('template processed: ' + str(p) + '/' + str(meta)) +# +# for statement in template_extract(p, meta, prefs, dests): +# dests['output'].put(statement) +# +# Progress('template') +# +# debug_out('Exiting template', prefs, dests) + + +def ARP_handler(task_q, sh_da, prefs, dests): + """Extracts all needed information from the ARP layer.""" + + os.nice(nice_raise + 2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('ARP'), prefs, dests) + + while True: + try: + (p, meta) = task_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - UnhandledPacket(p, prefs, dests) - elif sport in ("21020", "21250", "27016", "27017", "27018", "27030", "27035", "27040", "28015"): #halflife server - if Payload and (Payload.find(b'Team Fortress') > -1): - ReportId("US", sIP, "UDP_" + sport, "open", "halflife/server", ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "halflife/server" + if p is None: + break + for statement in ARP_extract(p, meta, prefs, dests): + dests['output'].put(statement) + + Progress('A') + + debug_out('Exiting ARP', prefs, dests) + + + +def IP_handler(task_q, sh_da, prefs, dests): + """Extracts all needed information from the IP layer.""" + + os.nice(nice_raise) #Lower priority to give higher priority to critical tasks + debug_out(whatami('IP'), prefs, dests) + + while True: + try: + (p, meta) = task_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - UnhandledPacket(p, prefs, dests) - elif sport == "27019": #halflife server - ReportId("US", sIP, "UDP_" + sport, "open", "halflife/server", ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = "halflife/server" - -### IP/UDP/steam-ihs-discovery=27036 https://codingrange.com/blog/steam-in-home-streaming-discovery-protocol - elif (sport == "27036") and (dport == "27036") and (dIP == "255.255.255.255"): - if Payload and (Payload.startswith(stream_ihs_discovery_header)): - ReportId("UC", sIP, "UDP_" + dport, "open", "stream-ihs-discovery-broadcast/client", ([]), prefs, dests) + if p is None: + break + for statement in IP_extract(p, meta, prefs, dests): + dests['output'].put(statement) + + #Progress('I') + + debug_out('Exiting IP', prefs, dests) + + + +def TCP_handler(task_q, sh_da, prefs, dests): + """Extracts all needed information from the TCP layer.""" + + os.nice(nice_raise) #Lower priority to give higher priority to critical tasks + debug_out(whatami('TCP'), prefs, dests) + + #No longer tracked here - see single_packet_handler + #if 'ack_count' not in TCP_handler.__dict__: + # TCP_handler.ack_count = {} + + while True: + try: + (p, meta) = task_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - UnhandledPacket(p, prefs, dests) - elif (dport == "27036") and Payload and (Payload.startswith(stream_ihs_discovery_header)): - ReportId("UC", sIP, "UDP_" + dport, "open", "stream-ihs-discovery/client", ([]), prefs, dests) - elif dport in halflife_altport: #Halflife client - if Payload and (Payload.find(b'Source Engine Query') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "halflife/client", ([]), prefs, dests) #'portonlysignature' + if p is None: + break + + #port_tuple = (meta['sIP'], meta['sport'], meta['dIP'], meta['dport']) + #if port_tuple not in TCP_handler.ack_count: + # TCP_handler.ack_count[port_tuple] = 0 + + #if (meta['flags'] & 0x17) == 0x10: #ACK (RST, SYN, and FIN off) + # TCP_handler.ack_count[port_tuple] += 1 + # if TCP_handler.ack_count[port_tuple] <= max_processed_acks: + # for statement in TCP_extract(p, meta, prefs, dests): + # dests['output'].put(statement) + # Progress('T') + # else: + # Progress('t') + #else: + for statement in TCP_extract(p, meta, prefs, dests): + dests['output'].put(statement) + Progress('T') + + debug_out('Exiting TCP', prefs, dests) + + +def UDP_handler(task_q, sh_da, prefs, dests): + """Extracts all needed information from the UDP layer.""" + + os.nice(nice_raise + 2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('UDP'), prefs, dests) + + while True: + try: + (p, meta) = task_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break else: - UnhandledPacket(p, prefs, dests) -### IP/UDP/lima=25213 https://support.meetlima.com/hc/en-us/articles/115004950326-README-document - elif dport == "25213" and Payload and (Payload.startswith(b'ZVPN')): - ReportId("UC", sIP, "UDP_" + dport, "open", "limavpn/client", (['tunnel', ]), prefs, dests) -### IP/UDP/openarena=27960 https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=665656 , http://openarena.ws/board/index.php?topic=4391.0 , http://blog.alejandronolla.com/2013/06/24/amplification-ddos-attack-with-quake3-servers-an-analysis-1-slash-2/ - elif (dport == "27960") and Payload and Payload.startswith(eight_fs + b'getstatus'): - ReportId("UC", sIP, "UDP_" + dport, "open", 'openarena-quake3/client getstatus: Likely spoofed and DDOSed source IP', (['amplification', 'dos', 'spoofed']), prefs, dests) -### IP/UDP/hap=28784 https://hal.inria.fr/hal-01456891/document - elif (dport == "28784") and Payload and Payload.startswith(b'HAP'): - ReportId("UC", sIP, "UDP_" + dport, "open", 'hap/client', (['scan', ]), prefs, dests) -### IP/UDP/traceroute - elif ((dport >= "33434") and (dport <= "33524")): #udptraceroute client - ReportId("UC", sIP, "UDP_33434", "open", "udptraceroute/client", ([]), prefs, dests) #'portonlysignature' -### IP/UDP/lima=33612 https://support.meetlima.com/hc/en-us/articles/115004950326-README-document - elif dport == "33612" and Payload and (Payload.startswith(b'LIMA')): - ReportId("UC", sIP, "UDP_" + dport, "open", "lima/client", ([]), prefs, dests) -### IP/UDP/tzsp=37008 https://korniychuk.org.ua/instruction/live-packet-captures-using-mikrotik-routeros-and-wireshark/ - elif dport == "37008": - ReportId("UC", sIP, "UDP_" + dport, "open", "tzsp/client", (['tunnel', ]), prefs, dests) - ShowPacket(p, meta, "IP/UDP/TZSP", HonorQuit, prefs, dests) -### IP/UDP/halflife=40348 - elif dport == "40348" and Payload and (Payload.find(b'HLS') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "halflife/client", ([]), prefs, dests) -### IP/UDP/crestron-cip=41794 https://media.defcon.org/DEF%20CON%2026/DEF%20CON%2026%20presentations/Ricky%20Lawshae/DEFCON-26-Lawshae-Who-Controls-the-Controllers-Hacking-Crestron.pdf - elif (sport == "41794") and (dport == "41794") and Payload and Payload.startswith(crestron_prelude + b'hostname'): - ReportId("UC", sIP, "UDP_" + dport, "open", 'crestron-cip/clientscanner', (['scan', ]), prefs, dests) -### IP/UDP/zengge-bulb=48899 client https://github.com/vikstrous/zengge-lightcontrol/blob/master/README.md - elif (dport == "48899") and Payload and (Payload.find(b'HF-A11ASSISTHREAD') > -1): - ReportId("UC", sIP, "UDP_" + dport, "open", "zengge-bulb/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/linkproof=49153 client https://eromang.zataz.com/2010/04/28/suc007-activities-on-49153udp-linkproof-proximity-advanced/ - elif (dport == "49153") and Payload and (Payload.startswith(b'linkproof.proximity.advanced')): - ReportId("UC", sIP, "UDP_" + dport, "open", "radware-linkproof/clientscanner", (['scan', ]), prefs, dests) -### IP/UDP/netis-backdoor-53413=53413 client, exploting Netis router backdoor: https://isc.sans.edu/forums/diary/Surge+in+Exploit+Attempts+for+Netis+Router+Backdoor+UDP53413/21337/ - elif dport == "53413": #To limit this signature to just shellcode, add the following tests to this line: and Payload and (Payload.find(b'; chmod 777 ') > -1) - ReportId("UC", sIP, "UDP_" + dport, "open", "netis-backdoor-53413/client", (['malicious', ]), prefs, dests) #'portonlysignature' -### IP/UDP/logitech-arx=54915 http://support.moonpoint.com/network/udp/port_54915/ - elif sport == "54915" and dport == "54915" and meta['cast_type']: - ReportId("UC", sIP, "UDP_" + dport, "open", "logitech-arx/" + meta['cast_type'] + "client", ([]), prefs, dests) #'portonlysignature' -### IP/UDP/brother-announce=54925 and 54926 used by brother printers http://ww2.chemistry.gatech.edu/software/Drivers/Brother/MFC-9840CDW/document/ug/usa/html/sug/index.html?page=chapter7.html - elif (dport in ("54925", "54926")) and meta['cast_type'] and Payload and (Payload.find(b'NODENAME=') > -1): - BrotherMatch = BrotherAnnounceMatch.search(Payload) - if (BrotherMatch is not None) and (len(BrotherMatch.groups()) >= 4): - #In the packets I've seen, groups 1, 2, and 3 are ip addresses (1 ipv4 and 2 ipv6). Group 4 is a nodename ("BRWF" + uppercase mac address, no colons) - ReportId("UC", sIP, "UDP_" + dport, "open", "brother-announce/" + meta['cast_type'] + " nodename: " + BrotherMatch.group(4), ([]), prefs, dests) - ReportId("UC", BrotherMatch.group(1), "UDP_" + dport, "open", "brother-announce/" + meta['cast_type'] + " nodename: " + BrotherMatch.group(4), ([]), prefs, dests) - ReportId("UC", BrotherMatch.group(2), "UDP_" + dport, "open", "brother-announce/" + meta['cast_type'] + " nodename: " + BrotherMatch.group(4), ([]), prefs, dests) - ReportId("UC", BrotherMatch.group(3), "UDP_" + dport, "open", "brother-announce/" + meta['cast_type'] + " nodename: " + BrotherMatch.group(4), ([]), prefs, dests) + + if p is None: + break + + for statement in UDP_extract(p, meta, prefs, dests): + dests['output'].put(statement) + + Progress('U') + + debug_out('Exiting UDP', prefs, dests) + + +def DNS_handler(task_q, sh_da, prefs, dests): + """Extracts all needed information from the DNS layer.""" + + os.nice(nice_raise + 2) #Lower priority to give higher priority to critical tasks + debug_out(whatami('DNS'), prefs, dests) + + while True: + try: + (p, meta) = task_q.get(block=True, timeout=None) + except KeyboardInterrupt: + break + except struct.error: #We're getting odd unpacking errors here. + pass + #debug_out("DNS Unpacking error?", prefs, dests) + ## File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/scapy/layers/dns.py", line 143, in decodeRR + ##type,cls,ttl,rdlen = struct.unpack("!HHIH", ret) + ##error: unpack requires a string argument of length 10 + #raise + except ValueError: + pass + #File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/scapy/layers/dns.py", line 337, in m2i + # s = inet_ntop(family, s) + #File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/scapy/pton_ntop.py", line 132, in inet_ntop + # return socket.inet_ntop(af, addr) + #ValueError: invalid length of packed IP address string else: - ReportId("UC", sIP, "UDP_" + dport, "open", "brother-announce/" + meta['cast_type'], ([]), prefs, dests) -### IP/UDP/spotify-broadcast=57621 https://mrlithium.blogspot.com/2011/10/spotify-and-opting-out-of-spotify-peer.html - elif (dport == "57621") and Payload and (Payload.startswith(b'SpotUdp')): - ReportId("UC", sIP, "UDP_" + dport, "open", "spotify/" + meta['cast_type'] + "client", ([]), prefs, dests) -### IP/UDP/probes with empty payloads - elif dport in empty_payload_ports and Payload == b'': - ReportId("UC", sIP, "UDP_" + dport, "open", "empty-payload/client", ([]), prefs, dests) - elif Payload == b'': - ReportId("UC", sIP, "UDP_" + dport, "open", "empty-payload/client Port not registered", ([]), prefs, dests) - UnhandledPacket(p, prefs, dests) -### IP/UDP/quake3 disconnect amplification http://blog.alejandronolla.com/2013/08/05/amplification-ddos-attack-with-quake3-servers-an-analysis-2-slash-2/ - elif Payload == quake3_disconnect: - ReportId("UC", sIP, "UDP_" + dport, "open", 'quake3/client: Disconnect, likely spoofed and DDOSed source IP', (['amplification', 'malicious', 'spoofed']), prefs, dests) - UnhandledPacket(p, prefs, dests) -### IP/UDP/bt-dht http://www.bittorrent.org/beps/bep_0005.html , https://isc.sans.edu/forums/diary/Identifying+applications+using+UDP+payload/6031/ - elif Payload and Payload.find(b':id') > -1 and ((Payload.find(b':info_hash') > -1 and Payload.find(b':get_peers') > -1) or Payload.find(b':ping') > -1 or Payload.find(b'9:find_node') > -1): #Unfortunately, can run on any port - ReportId("UC", sIP, "UDP_" + dport, "open", 'bt-dht-scan/clientscanner', (['scan', ]), prefs, dests) - elif Payload and Payload.find(b':id') > -1 and Payload.find(b':token') > -1 and (Payload.find(b':nodes') > -1 or Payload.find(b':values')): - ReportId("US", sIP, "UDP_" + sport, "open", 'bt-dht/server', ([]), prefs, dests) - process_udp_ports.UDPManualServerDescription[FromPort] = "bt-dht/server" - elif Payload and Payload.find(b'; wget ') > -1 and Payload.find(b'; sh ') > -1 and Payload.find(b'; rm -rf ') > -1: - ReportId("UC", sIP, "UDP_" + dport, "open", 'shellcode/clientscanner', (['scan', 'malicious']), prefs, dests) - elif Payload and Payload.startswith(a0_string): #Payload starting with A\x00 - UnhandledPacket(p, prefs, dests) - elif dport in SecUDPPortNames: - warning_list = [] - if dport in udp_port_warnings: - warning_list = [udp_port_warnings[dport]] - UnhandledPacket(p, prefs, dests) - ReportId("UC", sIP, "UDP_" + dport, "open", str(SecUDPPortNames[dport]) + "/client", (warning_list), prefs, dests) #'portonlysignature' - elif sport in SecUDPPortNames: - warning_list = [] - if sport in udp_port_warnings: - warning_list = [udp_port_warnings[sport]] - UnhandledPacket(p, prefs, dests) - ReportId("US", sIP, "UDP_" + sport, "open", str(SecUDPPortNames[sport]) + "/server", (warning_list), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = str(SecUDPPortNames[sport]) + "/server" - elif meta['ip_class'] == '4' and p[IP].frag > 0: - UnhandledPacket(p, prefs, dests) - elif (sport == "53") and not p.haslayer(DNS): #source port 53, but no parsed DNS layer. Seen this in large packets with Raw immediately following UDP. - UnhandledPacket(p, prefs, dests) - elif sport == "53": #source port 53. I've seen some coming back from port 53 with qr=0, request. Hmmm. - UnhandledPacket(p, prefs, dests) - elif sIP in shodan_hosts and Payload == fournulls + b'abcdefgh': - ReportId("UC", sIP, "UDP_" + dport, "open", "shodan_host/clientscanner abcdefgh", (['scan', ]), prefs, dests) - elif sIP in shodan_hosts: - ReportId("UC", sIP, "UDP_" + dport, "open", "shodan_host/clientscanner", (['scan', ]), prefs, dests) - elif Payload == fournulls + b'abcdefgh': - ReportId("UC", sIP, "UDP_" + dport, "open", "shodan_host/clientscanner abcdefgh Unlisted host", (['scan', ]), prefs, dests) - elif sIP in known_scan_ips: - ReportId("UC", sIP, "UDP_" + dport, "open", "udp/clientscanner known scanner", (['scan', ]), prefs, dests) - - elif meta['dMAC'] == "ff:ff:ff:ff:ff:ff" and dport in broadcast_udp_ports: - ReportId("UC", sIP, "UDP_" + dport, "open", "udp" + dport + "/broadcastclient", ([]), prefs, dests) #'portonlysignature' - elif sport in broadcast_udp_ports: - ReportId("US", sIP, "UDP_" + sport, "open", 'udp' + sport + '/server', ([]), prefs, dests) #'portonlysignature' - process_udp_ports.UDPManualServerDescription[FromPort] = 'udp' + sport + '/server' - #elif meta['dMAC'] == "ff:ff:ff:ff:ff:ff": - # ShowPacket(p, meta, "IP/UDP/unhandled broadcast", HonorQuit, prefs, dests) - #else: - # ShowPacket(p, meta, "IP/UDP/unhandled port", HonorQuit, prefs, dests) - - -def processpacket(p): - """Extract information from a single packet off the wire.""" - - #Because this is called from scapy.sniff with no apparent way to hand down params other than the raw packet, we have to pull these two from main by hand. - prefs = cl_args - dests = destinations - - #Persistent variables - #These 4 hold the seconds_since_the_epoch and human readable UTC versions of the earliest and latest packets - if "start_stamp" not in processpacket.__dict__: - processpacket.start_stamp = None - if "start_string" not in processpacket.__dict__: - processpacket.start_string = '' - if "end_stamp" not in processpacket.__dict__: - processpacket.end_stamp = None - if "end_string" not in processpacket.__dict__: - processpacket.end_string = '' - if "current_stamp" not in processpacket.__dict__: - processpacket.current_stamp = None - if "current_string" not in processpacket.__dict__: - processpacket.current_string = '' - - if "ClosedUDPPortsReceived" not in processpacket.__dict__: - processpacket.ClosedUDPPortsReceived = {} #Dictionary of sets. Key is expanded IP address, value is a set of "IP,Proto_Port" strings that sent back "closed". High counts of these are systems that are scanning for ports. - - if debug_known_layer_lists: - p_layers = list(ReturnLayers(p)) - if p_layers not in known_layer_lists: - debug_out('>>>>>>>> ' + str(p_layers), prefs, dests) - ShowPacket(p, meta, "Unknown layer list", HonorQuit, prefs, dests) - quit() - for one_layer in p_layers: - if one_layer not in layer_label_to_key: - debug_out('>>>>>>>> ' + str(one_layer) + ' not in layer_label_to_key', prefs, dests) - ShowPacket(p, meta, "Unknown layer list", HonorQuit, prefs, dests) - quit() - - processpacket.current_stamp, processpacket.current_string = packet_timestamps(p) - if not processpacket.start_stamp or processpacket.current_stamp < processpacket.start_stamp: - processpacket.start_stamp = processpacket.current_stamp - processpacket.start_string = processpacket.current_string - if not processpacket.end_stamp or processpacket.current_stamp > processpacket.end_stamp: - processpacket.end_stamp = processpacket.current_stamp - processpacket.end_string = processpacket.current_string - - meta = generate_meta_from_packet(p, prefs, dests) - #Convert: - #sMac -> meta['sMAC'] - #dMac -> meta['dMAC'] - #meta['cast_type'] - #pp_ttl -> meta['ttl'] - - #Transitional variables - sIP = meta['sIP'] - dIP = meta['dIP'] - sport = meta['sport'] - dport = meta['dport'] - - if p.getlayer(Raw): - Payload = p.getlayer(Raw).load - else: - Payload = b"" - - -### Spanning Tree Protocol - if isinstance(p, Dot3) and p.haslayer(LLC) and isinstance(p[LLC], LLC): - pass #Nothing really to learn from it. -### 802.3 without LLC - elif isinstance(p, Dot3): - pass #Nothing really to learn from it. -### Need more details on how to handle. - elif p.haslayer(Ether) and p[Ether] is None: - ShowPacket(p, meta, "non-ethernet packet: " + str(type(p)), HonorQuit, prefs, dests) -### ARP - elif (p.haslayer(Ether) and p[Ether].type == 0x0806) and p.haslayer(ARP) and isinstance(p[ARP], ARP): #ARP - #pull arp data from here instead of tcp/udp packets, as these are all local - if p[ARP].op == 1: #1 is request ("who-has") + if p is None: + break + + for statement in DNS_extract(p, meta, prefs, dests): + dests['output'].put(statement) + + Progress('D') + + debug_out('Exiting DNS', prefs, dests) + + +def single_packet_handler(highpri_task_q, lowpri_task_q, sh_da, layer_qs, prefs, dests): #pylint: disable=unused-argument + """This gets a single packet and doles it out to the available layers. We totally drain the low priority queue before handling a single high priority packet, then go back to check the high priority queue.""" + + if 'ack_count' not in single_packet_handler.__dict__: + single_packet_handler.ack_count = {} + + exit_once_queues_drained = False + + debug_out(whatami('single_packet'), prefs, dests) + #Layers that we won't send off for processing (though they may be used as part of processing their parent or other ancestor) + nosubmit_layers = ('DHCP options', 'DHCP6 Client Identifier Option', 'DHCP6 Elapsed Time Option', 'DHCP6 Identity Association for Non-temporary Addresses Option', 'DHCP6 Option Request Option', 'Ethernet', 'ICMPv6 Neighbor Discovery Option - Prefix Information', 'ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option', 'ICMPv6 Neighbor Discovery Option - Route Information Option', 'ICMPv6 Neighbor Discovery Option - Source Link-Layer Address', 'Padding', 'Raw') + + while True: + #FIXME - the logic for when to exit is not correct. + if exit_once_queues_drained and highpri_task_q.empty() and lowpri_task_q.empty(): + break + + while highpri_task_q.empty() and lowpri_task_q.empty(): #We only do this if there a _no packets at all_ waiting to be processed, so we should rarely hit this if ever. + Progress('.') + try: + time.sleep(0.05) + except KeyboardInterrupt: + exit_once_queues_drained = True + + pkt = '' + try: + pkt = highpri_task_q.get(block=False, timeout=None) + Progress('+') + except Empty: + try: + pkt = lowpri_task_q.get(block=False, timeout=None) + Progress('_') + except Empty: + pass + + if pkt is None or pkt == (None, None): + exit_once_queues_drained = True + elif pkt == '': #Neither attempt to retrieve a packet passed, skip and try again. pass - elif p[ARP].op == 2: #2 is reply ("is-at") - if (p[ARP].psrc is not None) and (p[ARP].hwsrc is not None): - IPAddr = p[ARP].psrc - MyMac = p[ARP].hwsrc.upper() - ReportId("MA", IPAddr, 'Ethernet', MyMac, '', ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) + #We don't process ack packets past the first few . Flags & 0x17 = 0x10 is ACK (RST, SYN, and FIN off): + #We perform the check here to avoid any queue processing or generate_meta work + elif pkt.haslayer('IP') and pkt.haslayer('TCP') and ((pkt['TCP'].flags & 0x17) == 0x10) and single_packet_handler.ack_count.get((pkt['IP'].src, pkt['TCP'].sport, pkt['IP'].dst, pkt['TCP'].dport), 0) > max_processed_acks: + Progress('t') + elif pkt.haslayer('IPv6') and pkt.haslayer('TCP') and ((pkt['TCP'].flags & 0x17) == 0x10) and single_packet_handler.ack_count.get((pkt['IPv6'].src, pkt['TCP'].sport, pkt['IPv6'].dst, pkt['TCP'].dport), 0) > max_processed_acks: + Progress('t') else: - UnhandledPacket(p, prefs, dests) -### ARP, truncated - elif p.haslayer(Ether) and p[Ether].type == 0x0806: #2054: ARP, apparently truncated - UnhandledPacket(p, prefs, dests) -### IPv4 ethertype but not ipv4 in the ip header - elif ((p.haslayer(CookedLinux) and p[CookedLinux].proto == 0x800) or (p.haslayer(Ether) and ((p[Ether].type == 0x0800) or (p[Ether].type == 0x8100))) or not p.haslayer(Ether)) and p.haslayer(IP) and isinstance(p[IP], IP) and p[IP].version != 4: - #ShowPacket(p, meta, "IPV4 packet with version != 4", HonorQuit, prefs, dests) - UnhandledPacket(p, prefs, dests) -### IPv4 - elif ((p.haslayer(CookedLinux) and p[CookedLinux].proto == 0x800) or (p.haslayer(Ether) and ((p[Ether].type == 0x0800) or (p[Ether].type == 0x8100))) or not p.haslayer(Ether)) and p.haslayer(IP) and isinstance(p[IP], IP): - - if meta['sMAC'] == 'ff:ff:ff:ff:ff:ff': - ReportId("IP", sIP, "Broadcast_source_mac", "open", "Source mac address is broadcast", (['noncompliant', ]), prefs, dests) - - #Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses. - #ReportId("MA", sIP, "Ethernet", meta['sMAC'], '', ([]), prefs, dests) - #ReportId("MA", dIP, "Ethernet", dMAC, '', ([]), prefs, dests) - -### IPv4/IP - if p[IP].proto == 0: - ShowPacket(p, meta, "IPv4/Protocol 0", HonorQuit, prefs, dests) -### IPv4/ICMPv4 - elif (p[IP].proto == 1) and p.haslayer(ICMP) and isinstance(p[ICMP], ICMP): - Type = p[ICMP].type - Code = p[ICMP].code - -### IPv4/ICMPv4/Echo Reply=0 - if Type == 0: - ReportId("IP", sIP, "IP", "live", 'icmp echo reply', ([]), prefs, dests) -### IPv4/ICMPv4/Unreachable=3 - elif (Type == 3) and p.haslayer(IPerror) and isinstance(p[IPerror], IPerror): #Unreachable, check that we have an actual embedded packet - if type(p[IPerror]) != IPerror: - ShowPacket(p, meta, "IPv4/ICMPv4/Unreachable=type3/Not IPError: " + str(type(p[IPerror])), HonorQuit, prefs, dests) - - if Code == 0: #Net unreachable - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'net unreachable', ([]), prefs, dests) - ReportId("RO", sIP, "NetUn", "router", "client_ip=" + dIP, ([]), prefs, dests) - elif Code == 1: #Host unreachable - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'host unreachable', ([]), prefs, dests) - ReportId("RO", sIP, "HostUn", "router", "client_ip=" + dIP, ([]), prefs, dests) - elif Code == 2: #Protocol unreachable - ReportId("RO", sIP, "ProtoUn", "router", "client_ip=" + dIP, ([]), prefs, dests) - #Following codes are Port unreachable, Network/Host Administratively Prohibited, Network/Host unreachable for TOS, Communication Administratively prohibited - elif Code in (3, 9, 10, 11, 12, 13) and (p[IPerror].proto == 17) and p.haslayer(UDPerror) and isinstance(p[UDPerror], UDPerror): #Port unreachable and embedded protocol = 17, UDP, as it should be - DNSServerLoc = meta['OrigsIP'] + ",UDP_53" - if (p[UDPerror].sport == 53) and (DNSServerLoc in process_udp_ports.UDPManualServerDescription) and (process_udp_ports.UDPManualServerDescription[DNSServerLoc] == "dns/server"): #Cross-function variable - #If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect) - #Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways. + packet_meta = generate_meta_from_packet(pkt, prefs, dests) + + if pkt.haslayer('TCP') and (packet_meta['flags'] & 0x17) == 0x10: #ACK (RST, SYN, and FIN off) + port_tuple = (packet_meta['sIP'], packet_meta['sport'], packet_meta['dIP'], packet_meta['dport']) + if port_tuple not in single_packet_handler.ack_count: + single_packet_handler.ack_count[port_tuple] = 0 + + single_packet_handler.ack_count[port_tuple] += 1 + + for packet_layer in packet_meta['pkt_layers']: + if packet_layer in layer_qs: + if layer_qs[packet_layer].full(): + debug_out(str(packet_layer) + ' layer is full.', prefs, dests) + try: + #FIXME if coming from a file, Block. If coming from an interface, don't block. ? + layer_qs[packet_layer].put((pkt[packet_layer], packet_meta), block=False) #COMMENT NOT CURRENTLY CORRECT: Default is block=True, timeout=None , so if the queue is full we'll get held up here until space is available. + except Full: pass + elif packet_layer not in nosubmit_layers: + if Verbose: + debug_out('\nMissing layer: ' + packet_layer, prefs, dests) else: - #If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed - OrigDPort = str(p[UDPerror].dport) - ReportId("US", meta['OrigdIP'], "UDP_" + OrigDPort, "closed", "port unreachable", ([]), prefs, dests) - - if include_udp_errors_in_closed_ports: - #Prober is dIP. Probed port is: meta['OrigdIP'] + ",UDP_" + OrigDPort - if dIP not in processpacket.ClosedUDPPortsReceived: - processpacket.ClosedUDPPortsReceived[dIP] = set() - processpacket.ClosedUDPPortsReceived[dIP].add(meta['OrigdIP'] + ",UDP_" + OrigDPort) - if len(processpacket.ClosedUDPPortsReceived[dIP]) >= min_closed_ports_for_scanner: - ReportId("IP", dIP, "IP", "suspicious", 'Scanned UDP closed ports.', (['scan', ]), prefs, dests) - elif Code in (3, 9, 10, 11, 12, 13) and (p[IPerror].proto == 6) and isinstance(p[TCPerror], TCPerror): #Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's - pass + debug_out('Missing layer: ' + packet_layer, prefs, dests) - #Following code disabled as it needs cross-process dictionaries, and isn't valid in the first place. - ##Now we _could_ claim the machine sending the error is a linux firewall. - #OrigDPort = str(p[TCPerror].dport) - #Service = meta['OrigdIP'] + ",TCP_" + OrigDPort - #if Service in processpacket.SynSentToTCPService and ((Service not in processpacket.LiveTCPService) or processpacket.LiveTCPService[Service]): - # processpacket.LiveTCPService[Service] = False - # ReportId("TS", meta['OrigdIP'], "TCP_" + OrigDPort, "closed", '', ([]), prefs, dests) - - #if Service in processpacket.SynSentToTCPService: - # #Prober is dIP. Probed port is Service (= meta['OrigdIP'] + ",TCP_" + OrigDPort) - # if dIP not in processpacket.ClosedTCPPortsReceived: - # processpacket.ClosedTCPPortsReceived[dIP] = set() - # processpacket.ClosedTCPPortsReceived[dIP].add(Service) - # if len(processpacket.ClosedTCPPortsReceived[dIP]) >= min_closed_ports_for_scanner: - # ReportId("IP", dIP, "IP", "suspicious", 'Scanned closed ports.', (['scan', ]), prefs, dests) - elif (Code == 3) and (p[IPerror].proto == 1) and isinstance(p[ICMPerror], ICMPerror): #Port unreachable and embedded protocol = 1, ICMP; not sure if this is legit or not. - #Now we _could_ claim the machine sending the error is a linux firewall. - pass - elif Code == 3: #Port unreachable, but we do not have (complete) underlying layers below IPerror or IPerror6 - pass - elif Code == 4: #Fragmentation needed - pass - elif Code == 6: #Net unknown - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'net unknown', ([]), prefs, dests) - elif Code == 7: #Host unknown - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'host unknown', ([]), prefs, dests) - elif Code == 9: #Network Administratively Prohibited - pass #Can't tell much from this type of traffic. Possibly list as firewall? - elif Code == 10: #Host Administratively Prohibited - pass - elif Code == 11: #Network unreachable for TOS - pass - elif Code == 12: #Host unreachable for TOS - pass - elif Code == 13: #Communication Administratively prohibited - pass - else: - ShowPacket(p, meta, "IPv4/ICMPv4/Type=3/unhandled code: " + str(Code), HonorQuit, prefs, dests) -### IPv4/ICMPv3/Source Quench=4 https://tools.ietf.org/html/rfc6633 - ipv4 source quench deprecated since 2012, does not exist in ipv6 - elif Type == 4: - UnhandledPacket(p, prefs, dests) -### IPv4/ICMPv4/Redirect=5 - elif (Type == 5) and isinstance(p[IPerror], IPerror): #Unreachable, check that we have an actual embedded packet - if type(p[IPerror]) != IPerror: - ShowPacket(p, meta, "IPv4/ICMPv4/Redirect=type5/Not IPError: " + str(type(p[IPerror])), HonorQuit, prefs, dests) - elif Code in (0, 1, 2, 3): #Network, Host, TOS+Network, TOS+Host - ReportId("RO", sIP, "Redirect", "router", "attempted_router client_ip=" + dIP, ([]), prefs, dests) - better_router = p[ICMP].gw - ReportId("RO", better_router, "Redirect", "router", "recommended_router client_ip=" + dIP, ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) -### IPv4/ICMPv4/Echo Request=8 - elif Type == 8: - #FIXME - check payload for ping sender type, perhaps - if Payload.find(b'liboping -- ICMP ping library') > -1: - ReportId("IP", sIP, "IP", "live", 'oping icmp echo request scanner', (['scan', ]), prefs, dests) - else: - ReportId("IP", sIP, "IP", "live", 'icmp echo request scanner', (['scan', ]), prefs, dests) -### IPv4/ICMPv4/Router Advertisement=9 https://tools.ietf.org/html/rfc1256 - elif Type == 9: - ReportId("RO", sIP, "RouterAdv", "router", '', ([]), prefs, dests) -### IPv4/ICMPv4/Time exceeded=11 - elif Type == 11: - if Code == 0: #TTL exceeded - #FIXME - put original target IP as column 5? - ReportId("RO", sIP, "TTLEx", "router", "client_ip=" + dIP, ([]), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) - elif Type in (6, 15, 16, 17, 18, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39): #https://tools.ietf.org/html/rfc6918 - ReportId("IP", sIP, "ICMP_type_" + str(Type), "open", "Deprecated ICMP type scanner", (['noncompliant', 'scan']), prefs, dests) - elif Type >= 44 and Type <= 252: #https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml - ReportId("IP", sIP, "ICMP_type_" + str(Type), "open", "Reserved ICMP type scanner", (['noncompliant', 'scan']), prefs, dests) - else: - UnhandledPacket(p, prefs, dests) - ShowPacket(p, meta, 'Unhandled ipv4 ICMP packet', HonorQuit, prefs, dests) - elif p[IP].proto == 1: - UnhandledPacket(p, prefs, dests) -### IPv4/IGMPv4 - elif p[IP].proto == 2: #IGMP - UnhandledPacket(p, prefs, dests) -### IPv4/TCPv4 - elif p[IP].proto == 6 and p.haslayer(TCP) and isinstance(p[TCP], TCP): #TCP - ReportAll(TCP_extract(p, meta, prefs, dests), prefs, dests) - -### IPv4/TCPv4, probably truncated/fragmented - elif p[IP].proto == 6: #TCP, but haslayer fails. Quite possibly a fragment; either way we can't do anything with it. - UnhandledPacket(p, prefs, dests) - #ShowPacket(p, meta, "IPv4/TCPv4/no TCP layer", HonorQuit, prefs, dests) -### IPv4/UDPv4 - elif p[IP].proto == 17 and p.haslayer(UDP): #old form: (type(p[UDP]) == UDP): - #UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport' - #Change over to p.getlayer(ICMPv6DestUnreach) ? We're getting crashes on elif p[IP].proto == 17 and (type(p[UDP]) == UDP): - #FIXME - possibly run udp packets through processpacket.ServiceFPs as well? - process_udp_ports(meta, p, prefs, dests) - -### IPv4/UDPv4, probably truncated/fragmented - elif p[IP].proto == 17: #This is the case where the protocol is listed as 17, but there's no complete UDP header. Quite likely a 2nd or future fragment. - UnhandledPacket(p, prefs, dests) -### IPv4/UDPv4/ipencap - elif p[IP].proto == 4: #ipencap, IP encapsulated in IP. - outer_ip = p.getlayer(IP, nb=1) - inner_layer = outer_ip.payload - if isinstance(inner_layer, IP): - #FIXME - submit the inner packet for processing? - if inner_layer.getlayer(Raw).load == "this is not an attack": - ReportId("IP", sIP, "ipencap", "open", 'ipencap/client', (['tunnel', 'scan']), prefs, dests) - else: - ReportId("IP", sIP, "ipencap", "open", 'ipencap/client', (['tunnel', ]), prefs, dests) - else: - ShowPacket(p, meta, "ipencap with non-IP inner layer", HonorQuit, prefs, dests) - -### IPv4/IPSecv4/GRE #GRE - elif p[IP].proto == 47 and p.haslayer(GRE): - ReportId("PC", sIP, "PROTO_" + str(p[IP].proto), "open", "gre/client", (['tunnel', ]), prefs, dests) - if p[GRE].proto == 2048: #0x800==2048==IPv4 - if p[GRE].payload: - encap_packet = p[GRE].payload - processpacket(encap_packet) - else: - UnhandledPacket(p, prefs, dests) - elif p[GRE].proto == 25944: #0x6558==25944==Trans Ether Bridging - if p.haslayer(Raw): - encap_packet_raw = p[Raw].load - encap_packet = Ether(encap_packet_raw) - processpacket(encap_packet) - else: - UnhandledPacket(p, prefs, dests) - elif p[GRE].proto == 34827: #0x880B==34827==PPP - if p.haslayer(Raw): - #Sample payload: \x00\x08:v\xff\x03\x00!E\x00\x00c\x00\x00 Hack; strip off first 8 bytes and interpret the rest as IP. Similar packet has 4 byte intro, so we test that too. - encap_packet_raw = None - if p[GRE].load[4:6] == ip_start_bytes: - encap_packet_raw = p[GRE].load[4:] - elif p[GRE].load[8:10] == ip_start_bytes: - encap_packet_raw = p[GRE].load[8:] - if encap_packet_raw: - encap_packet = IP(encap_packet_raw) - processpacket(encap_packet) - else: - ShowPacket(p, meta, "GRE raw does not appear to have E\x00\x00", HonorQuit, prefs, dests) - else: - UnhandledPacket(p, prefs, dests) + debug_out('Exiting single_packet', prefs, dests) + + + +def packet_stream_processor(name_param, pcap_interface, pcap_source_file, highpri_single_packet_q, lowpri_single_packet_q, sh_da, prefs, dests): #pylint: disable=unused-argument + """This starts a scapy.sniff() process on either an input file or interface (only request one, the other should be None). Packets are handed off to single_packet_handler. If both None, sniff from all interfaces.""" + + #Note - we do not lower priority if sniffing from an interface, only if reading from a pcap file; see below for "nice" statement + debug_out(whatami(name_param), prefs, dests) + + if pcap_interface and pcap_source_file: + debug_out('Both pcap_interface: ' + str(pcap_interface) + ' and pcap_source_file: ' + str(pcap_source_file) + ' requested at the same time, exiting.', prefs, dests) + elif pcap_interface: + try: + if prefs['bpf']: + sniff(store=0, filter=prefs['bpf'], iface=pcap_interface, prn=highpri_single_packet_q.put) #Default is block=True, timeout=None , so if the queue is full we'll get held up here until space is available. else: - ShowPacket(p, meta, "GRE unhandled proto", HonorQuit, prefs, dests) - -### IPv4/IPSecv4/ESP #ESP (IPSEC) - elif p[IP].proto == 50: - ReportId("PC", sIP, "PROTO_" + str(p[IP].proto), "open", "ipsec-esp/client", (['tunnel', ]), prefs, dests) - ReportId("PS", dIP, "PROTO_" + str(p[IP].proto), "open", "ipsec-esp/server unconfirmed", (['tunnel', ]), prefs, dests) - UnhandledPacket(p, prefs, dests) -### IPv4/IPSecv4/AH #AH (IPSEC) - elif p[IP].proto == 51: - ReportId("PC", sIP, "PROTO_" + str(p[IP].proto), "open", "ipsec-ah/client", (['tunnel', ]), prefs, dests) - ReportId("PS", dIP, "PROTO_" + str(p[IP].proto), "open", "ipsec-ah/server unconfirmed", (['tunnel', ]), prefs, dests) - UnhandledPacket(p, prefs, dests) -### IPv4/EIGRPv4 EIGRP = Enhanced Interior Gateway Routing Protocol - elif (p[IP].proto == 88) and dIP in ("224.0.0.10", "FF02:0:0:0:0:0:0:A"): - #224.0.0.10 for IPv4 EIGRP Routers, FF02:0:0:0:0:0:0:A for IPv6 EIGRP Routers - ReportId("RO", sIP, "EIGRP", "router", "", ([]), prefs, dests) - elif p[IP].proto == 88: #Different target address format, perhaps? - ShowPacket(p, meta, "IPv4/EIGRP unknown target IP", HonorQuit, prefs, dests) -### IPv4/OSPFv4 - elif (p[IP].proto == 89) and (dIP == "224.0.0.5"): #OSPF = Open Shortest Path First - UnhandledPacket(p, prefs, dests) -### IPv4/PIMv4 - elif (p[IP].proto == 103) and (dIP == "224.0.0.13"): #PIM = Protocol Independent Multicast - UnhandledPacket(p, prefs, dests) -### IPv4/VRRPv4 - elif (p[IP].proto == 112) and (dIP == "224.0.0.18"): #VRRP = virtual router redundancy protocol - UnhandledPacket(p, prefs, dests) -### IPv4/SSCOPMCE - elif p[IP].proto == 128: - UnhandledPacket(p, prefs, dests) - else: #http://www.iana.org/assignments/protocol-numbers - #Look up protocol in /etc/protocols - ShowPacket(p, meta, "Other IP protocol (" + meta['sIP'] + "->" + meta['dIP'] + "): " + str(p[IP].proto), HonorQuit, prefs, dests) - #Look up other ethernet types in: - # http://en.wikipedia.org/wiki/EtherType - # /etc/ethertypes - # http://www.iana.org/assignments/ethernet-numbers - # http://standards.ieee.org/develop/regauth/ethertype/eth.txt - # http://www.cavebear.com/archive/cavebear/Ethernet/type.html - if "SuspiciousIPs" in ReportId.__dict__ and ReportId.SuspiciousIPs and (sIP in ReportId.SuspiciousIPs or dIP in ReportId.SuspiciousIPs): #Cross-function variable - SuspiciousPacket(p, prefs, dests) - elif ((p.haslayer(CookedLinux) and p[CookedLinux].proto == 0x800) or (p.haslayer(Ether) and ((p[Ether].type == 0x0800) or (p[Ether].type == 0x8100)))): - #Like above, but has no IP layer. Probably truncated packet at the end of a still-running capture. - UnhandledPacket(p, prefs, dests) -### 2114: Wake-on-lan - elif p.haslayer(Ether) and p[Ether].type == 0x0842: - UnhandledPacket(p, prefs, dests) -### 9728: Unknown - elif p.haslayer(Ether) and p[Ether].type == 0x2600: - UnhandledPacket(p, prefs, dests) - #FIXME - add checks for CookedLinux and Ipv6 as well as Ether+IPv6 -### IPv6 ethertype but not ipv6 in the ip header - elif (p.haslayer(Ether) and p[Ether].type == 0x86DD) and p.haslayer(IPv6) and isinstance(p[IPv6], IPv6) and p[IPv6].version != 6: - #ShowPacket(p, meta, "IPV6 packet with version != 6", HonorQuit, prefs, dests) - UnhandledPacket(p, prefs, dests) -### IPv6 - elif (p.haslayer(Ether) and p[Ether].type == 0x86DD) and p.haslayer(IPv6) and isinstance(p[IPv6], IPv6): - if meta['sMAC'] == 'ff:ff:ff:ff:ff:ff': - ReportId("IP", sIP, "Broadcast_source_mac", "open", "Source mac address is broadcast", (['noncompliant', ]), prefs, dests) - -### IPv6/IPv6ExtHdrHopByHop=0 Hop-by-hop option header - if p[IPv6].nh == 0 and meta['ttl'] == 1 and p.getlayer(IPv6ExtHdrHopByHop) and p[IPv6ExtHdrHopByHop].nh == 58 and (p.haslayer(ICMPv6MLQuery) or p.haslayer(ICMPv6MLReport) or p.haslayer(ICMPv6MLDone)): #0 is Hop-by-hop options - UnhandledPacket(p, prefs, dests) - #FIXME - try to extract Multicast info later. - #if p[ICMPv6MLQuery].type == 130: #MLD Query - # if p[ICMPv6MLQuery].mladdr == '::' #General query - # pass - # else: #Multicast-address-specific query - # pass - #elif p[ICMPv6MLQuery].type == 131: #Multicast Listener Report - # pass - #elif p[ICMPv6MLQuery].type == 132: #Multicast Listener Done - # pass - #else: - # pass - elif p[IPv6].nh == 0 and p.getlayer(IPv6ExtHdrHopByHop) and p[IPv6ExtHdrHopByHop].nh == 58 and (isinstance(p[IPv6ExtHdrHopByHop].payload, Raw) or p[IPv6ExtHdrHopByHop].payload.type == 135): - #The packet claims to have an ICMPv6 layer, but the following layer is Raw. Ignore. Any chance that scapy is not interpreting the next layer down when it encounters a hop-by-hop? - #Or, the inner packet is a neighbor solicitation. - UnhandledPacket(p, prefs, dests) - elif p[IPv6].nh == 0: - ShowPacket(p, meta, "IPv6/IPv6ExtHdrHopByHop = 0; FIXME, intermediate header on its way to the real header", HonorQuit, prefs, dests) - #https://tools.ietf.org/html/rfc2711 (router alert option) - #Specifically "router contains a MLD message": https://tools.ietf.org/html/rfc2710 -### IPv6/TCPv6=6 - elif p[IPv6].nh == 6 and p.haslayer(TCP): - ReportAll(TCP_extract(p, meta, prefs, dests), prefs, dests) - elif p[IPv6].nh == 6: - ShowPacket(p, meta, "IPv6/nh==6 but no TCP layer", HonorQuit, prefs, dests) -### IPv6/UDPv6=17 - elif (p[IPv6].nh == 17) and p.haslayer(UDP): - process_udp_ports(meta, p, prefs, dests) - -### IPv6/Fragmentation=44 - elif p[IPv6].nh == 44: #Fragment header. Not worth trying to extract info from following headers. - #https://tools.ietf.org/html/rfc5798 - UnhandledPacket(p, prefs, dests) -### IPv6/ICMPv6=58 - elif p[IPv6].nh == 58: - #Layer names; see layers/inet6.py ( /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/scapy/layers/inet6.py ), hash named icmp6typescls -### IPv6/ICMPv6=58/DestUnreach=1 - if p.getlayer(ICMPv6DestUnreach) and p.getlayer(IPerror6) and isinstance(p[IPerror6], IPerror6): #https://tools.ietf.org/html/rfc4443#section-3.1 - Code = p[ICMPv6DestUnreach].code -### IPv6/ICMPv6=58/DestUnreach=1/No route to dest=0 No route to destination; appears equivalent to IPv4 net unreachable - if Code == 0: - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'net unreachable', ([]), prefs, dests) - ReportId("RO", sIP, "NetUn", "router", "client_ip=" + dIP, ([]), prefs, dests) -### IPv6/ICMPv6=58/DestUnreach=1/AdminProhib=1 Communication with destination administratively prohibited (blocked by firewall) - elif Code == 1: - pass -### IPv6/ICMPv6=58/DestUnreach=1/BeyondScope=2 Beyond scope of source address https://tools.ietf.org/html/rfc4443 - elif Code == 2: - pass -### IPv6/ICMPv6=58/DestUnreach=1/AddressUnreach=3 Address unreachable (general, used when there is no more specific reason); appears equivalent to host unreachable - elif Code == 3: - ReportId("IP", meta['OrigdIP'], "IP", "dead", 'host unreachable', ([]), prefs, dests) - ReportId("RO", sIP, "HostUn", "router", "client_ip=" + dIP, ([]), prefs, dests) -### IPv6/ICMPv6=58/DestUnreach=1/PortUnreach=4 Port unreachable and embedded protocol = 17, UDP, as it should be. Appears equivalent to port unreachable - elif (Code == 4) and (p[IPerror6].nh == 17) and p.haslayer(UDPerror) and isinstance(p[UDPerror], UDPerror): - DNSServerLoc = meta['OrigsIP'] + ",UDP_53" - if (p[UDPerror].sport == 53) and (DNSServerLoc in process_udp_ports.UDPManualServerDescription) and (process_udp_ports.UDPManualServerDescription[DNSServerLoc] == "dns/server"): #Cross-function variable - #If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect) - #Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways. - pass - else: - #If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed - OrigDPort = str(p[UDPerror].dport) - OrigDstService = meta['OrigdIP'] + ",UDP_" + OrigDPort - ReportId("US", meta['OrigdIP'], "UDP_" + OrigDPort, "closed", "port unreachable", ([]), prefs, dests) - - if include_udp_errors_in_closed_ports: - #Prober is dIP. Probed port is: meta['OrigdIP'] + ",UDP_" + OrigDPort - if dIP not in processpacket.ClosedUDPPortsReceived: - processpacket.ClosedUDPPortsReceived[dIP] = set() - processpacket.ClosedUDPPortsReceived[dIP].add(OrigDstService) - if len(processpacket.ClosedUDPPortsReceived[dIP]) >= min_closed_ports_for_scanner: - ReportId("IP", dIP, "IP", "suspicious", 'Scanned UDP closed ports.', (['scan', ]), prefs, dests) - elif (Code == 4) and (p[IPerror6].nh == 6) and p.haslayer(TCPerror) and isinstance(p[TCPerror], TCPerror): #Port unreachable and embedded protocol = 6, TCP, which it shouldn't. - pass + sniff(store=0, iface=pcap_interface, prn=highpri_single_packet_q.put) + debug_out('Finished processing packets from ' + str(pcap_interface), prefs, dests) + except Scapy_Exception: + debug_out('Attempt to listen on an interface failed: are you running this as root or under sudo?', prefs, dests) + elif pcap_source_file: + os.nice(nice_raise + 2) #Lower priority to give higher priority to critical tasks + work_filename = None + delete_temp = False - #Following code disabled because it depends on cross-process dictionaries and it's not legal in the first place. - #OrigDPort = str(p[TCPerror].dport) - #Service = meta['OrigdIP'] + ",TCP_" + OrigDPort - #if Service in processpacket.SynSentToTCPService and ((Service not in processpacket.LiveTCPService) or processpacket.LiveTCPService[Service]): - # processpacket.LiveTCPService[Service] = False - # ReportId("TS", str(p[IPerror6].dst), "TCP_" + str(p[TCPerror].dport), "closed", '', ([]), prefs, dests) - - #if Service in processpacket.SynSentToTCPService: - # #Prober is dIP. Probed port is meta['OrigdIP'] + ",TCP_" + OrigDPort - # if dIP not in processpacket.ClosedTCPPortsReceived: - # processpacket.ClosedTCPPortsReceived[dIP] = set() - # processpacket.ClosedTCPPortsReceived[dIP].add(Service) - # if len(processpacket.ClosedTCPPortsReceived[dIP]) >= min_closed_ports_for_scanner: - # ReportId("IP", dIP, "IP", "suspicious", 'Scanned TCP closed ports.', (['scan', ]), prefs, dests) - elif (Code == 4) and (p[IPerror6].nh == 58): #Port unreachable and embedded protocol = 58, ICMP. Seen in response to pings - pass -### IPv6/ICMPv6=58/DestUnreach=1/FailedPolicy=5 Source address failed ingress/egress policy (subset of code 1) https://tools.ietf.org/html/rfc4443 - elif Code == 5: - pass -### IPv6/ICMPv6=58/DestUnreach=1/RejectRoute=6 Reject route to destination (subset of code 1) https://tools.ietf.org/html/rfc4443 - elif Code == 6: - pass -### IPv6/ICMPv6=58/DestUnreach=1/HeaderError=7 Error in source routing header https://tools.ietf.org/html/rfc6550 https://tools.ietf.org/html/rfc6554 - elif Code == 7: - pass -### IPv6/ICMPv6=58/DestUnreach=1/Unknown - else: - ShowPacket(p, meta, "IPV6/ICMPv6/Dest Unreach=1/Unknown code", HonorQuit, prefs, dests) -### IPv6/ICMPv6=58/PacktTooBig=2 - elif p.getlayer(ICMPv6PacketTooBig): - ReportId("RO", sIP, "TooBig", "router", "client_ip=" + dIP, ([]), prefs, dests) -### IPv6/ICMPv6=58/TimeExceeded=3 - elif p.getlayer(ICMPv6TimeExceeded): - Code = p[ICMPv6TimeExceeded].code - if Code == 0: #hop limit exceeded in transit - ReportId("RO", sIP, "TTLEx", "router", "client_ip=" + dIP, ([]), prefs, dests) - else: - ShowPacket(p, meta, "IPv6/ICMPv6/ICMPv6TimeExceeded = type 3/Code = " + str(Code), HonorQuit, prefs, dests) -### IPv6/ICMPv6=58/EchoRequest=128 - elif p.getlayer(ICMPv6EchoRequest): - pass -### IPv6/ICMPv6=58/EchoReply=129 - elif p.getlayer(ICMPv6EchoReply): - ReportId("IP", sIP, "IP", "live", 'icmp echo reply', ([]), prefs, dests) -### IPv6/ICMPv6=58/ND_RouterSolicitation=133 - elif p.getlayer(ICMPv6ND_RS) and (dIP == "ff02:0000:0000:0000:0000:0000:0000:0002"): - pass -### IPv6/ICMPv6=58/ND_RouterAdvertisement=134 - elif p.getlayer(ICMPv6ND_RA) and (dIP == "ff02:0000:0000:0000:0000:0000:0000:0001"): - AdditionalInfo = 'hop_limit=' + str(p[ICMPv6ND_RA].chlim) - if p.getlayer(ICMPv6NDOptPrefixInfo): - AdditionalInfo = AdditionalInfo + ' net=' + str(p[ICMPv6NDOptPrefixInfo].prefix) + '/' + str(p[ICMPv6NDOptPrefixInfo].prefixlen) - if p.getlayer(ICMPv6NDOptRDNSS): - for one_dns in p[ICMPv6NDOptRDNSS].dns: - AdditionalInfo = AdditionalInfo + ' dns=' + str(one_dns) - ReportId("RO", sIP, "RouterAdv", "router", AdditionalInfo, ([]), prefs, dests) - - if p.getlayer(ICMPv6NDOptSrcLLAddr): - router_mac_addr = str(p[ICMPv6NDOptSrcLLAddr].lladdr) - ReportId("MA", sIP, 'Ethernet', router_mac_addr, '', ([]), prefs, dests) -### IPv6/ICMPv6=58/ND_NeighborSolicitation=135 https://tools.ietf.org/html/rfc4861 - elif p.getlayer(ICMPv6ND_NS) and meta['ttl'] == 255 and p[ICMPv6ND_NS].code == 0: - host_mac_addr = '' - if p.getlayer(ICMPv6NDOptSrcLLAddr): - host_mac_addr = str(p[ICMPv6NDOptSrcLLAddr].lladdr) - elif p.getlayer(Ether): - host_mac_addr = meta['sMAC'] - #else: - # pass #No source for ethernet mac addr, ignore - if host_mac_addr: - ReportId("MA", sIP, 'Ethernet', host_mac_addr, '', ([]), prefs, dests) -### IPv6/ICMPv6=58/ND_NeighborAdvertisement=136 https://tools.ietf.org/html/rfc4861 - elif p.getlayer(ICMPv6ND_NA) and p.getlayer(Ether) and meta['ttl'] == 255 and p[ICMPv6ND_NA].code == 0: - if p[ICMPv6ND_NA].R == 1: - ReportId("RO", sIP, "NeighborAdvRouterFlag", "router", '', ([]), prefs, dests) - host_mac_addr = meta['sMAC'] - ReportId("MA", sIP, 'Ethernet', host_mac_addr, '', ([]), prefs, dests) -### IPv6/ICMPv6=58/ND_Redirect=137 http://www.tcpipguide.com/free/t_ICMPv6RedirectMessages-2.htm - elif p.getlayer(ICMPv6ND_Redirect) and p.getlayer(Ether) and meta['ttl'] == 255 and p[ICMPv6ND_Redirect].code == 0: - ReportId("RO", sIP, "ND_Redirect_source", "router", "client_ip=" + dIP, ([]), prefs, dests) #the original complaining router - ReportId("RO", p[ICMPv6ND_Redirect].tgt, "ND_Redirect_target", "router", "client_ip=" + dIP, ([]), prefs, dests) #the better router to use - if p.getlayer(ICMPv6NDOptDstLLAddr): - ReportId("MA", p[ICMPv6ND_Redirect].tgt, 'Ethernet', p[ICMPv6NDOptDstLLAddr].lladdr, '', ([]), prefs, dests) #packet probably includes the mac address of the better router too. + if not os.path.exists(pcap_source_file): + debug_out(pcap_source_file + ' does not exist, skipping.', prefs, dests) + elif not os.access(pcap_source_file, os.R_OK): + debug_out(pcap_source_file + ' is not readable, skipping.', prefs, dests) + elif pcap_source_file.endswith('.bz2'): + os.nice(4) #Lower priority a little more for processing a compressed file + work_filename = open_bzip2_file_to_tmp_file(pcap_source_file) + delete_temp = True + elif pcap_source_file.endswith('.gz'): + os.nice(4) + work_filename = open_gzip_file_to_tmp_file(pcap_source_file) + delete_temp = True + else: #File exists and is neither a bzip2 file nor a gzip file. Process as is. + work_filename = pcap_source_file + + try: + if prefs['bpf']: + sniff(store=0, filter=prefs['bpf'], offline=work_filename, prn=lowpri_single_packet_q.put) #Default is block=True, timeout=None , so if the queue is full we'll get held up here until space is available. else: - ShowPacket(p, meta, "IPv6/ICMPv6/unhandled type", HonorQuit, prefs, dests) -### IPv6/SATNET-EXPAK=64 - elif p[IPv6].nh == 64: - UnhandledPacket(p, prefs, dests) -### IPv6/EIGRPv4 EIGRP = Enhanced Interior Gateway Routing Protocol - elif (p[IPv6].nh == 88) and dIP in ("224.0.0.10", "FF02:0:0:0:0:0:0:A"): - #224.0.0.10 for IPv4 EIGRP Routers, FF02:0:0:0:0:0:0:A for IPv6 EIGRP Routers - ReportId("RO", sIP, "EIGRP", "router", "", ([]), prefs, dests) - elif p[IPv6].nh == 88: #Different target address format, perhaps? - ShowPacket(p, meta, "IPv6/EIGRP unknown target IP", HonorQuit, prefs, dests) -### IPv6/OSPF=89 - elif (p[IPv6].nh == 89) and (dIP == "ff02:0000:0000:0000:0000:0000:0000:0005"): #OSPF - #https://tools.ietf.org/html/rfc5340 - UnhandledPacket(p, prefs, dests) -### IPv6/VRRP=112 - elif (p[IPv6].nh == 112) and (dIP == "ff02:0000:0000:0000:0000:0000:0000:0012"): #VRRPv6 VRRP = virtual router redundancy protocol - #https://tools.ietf.org/html/rfc5798 - UnhandledPacket(p, prefs, dests) -### IPv6/other - else: - ShowPacket(p, meta, "IPV6 unknown protocol; Next header:" + str(p[IPv6].nh), HonorQuit, prefs, dests) - - if "SuspiciousIPs" in ReportId.__dict__ and ReportId.SuspiciousIPs and (sIP in ReportId.SuspiciousIPs or dIP in ReportId.SuspiciousIPs): #Cross-function variable - SuspiciousPacket(p, prefs, dests) -### No ethernet layer - elif not p.haslayer(Ether): -### 802.11 wireless - if p.haslayer(RadioTap): - if p.haslayer(Dot11) and p.haslayer(Dot11Deauth) and p[Dot11Deauth].reason == 7: #"class3-from-nonass" - if p[Dot11].addr1 == p[Dot11].addr3: #These should be the AP mac address - ReportId("WI", "0.0.0.0", "802.11_Deauth", "Deauthentication: client=" + p[Dot11].addr2 + " AP=" + p[Dot11].addr1, "", ([]), prefs, dests) - elif p[Dot11].addr2 == p[Dot11].addr3: #These should be the AP mac address - ReportId("WI", "0.0.0.0", "802.11_Deauth", "Deauthentication: client=" + p[Dot11].addr1 + " AP=" + p[Dot11].addr2, "", ([]), prefs, dests) - else: - ShowPacket(p, meta, "802.11 Deauth", HonorQuit, prefs, dests) - elif p.haslayer(Dot11) and p.haslayer(Dot11Elt): - current_element = None - if p.haslayer(Dot11Beacon): - current_element = p.getlayer(Dot11Beacon).payload - elif p.haslayer(Dot11ProbeReq): - current_element = p.getlayer(Dot11ProbeReq).payload - elif p.haslayer(Dot11ProbeResp): - current_element = p.getlayer(Dot11ProbeResp).payload - elif p.haslayer(Dot11AssoReq): - current_element = p.getlayer(Dot11AssoReq).payload - elif p.haslayer(Dot11AssoResp): - current_element = p.getlayer(Dot11AssoResp).payload - elif p.haslayer(Dot11Auth): - if p[Dot11Auth].status == 0: #success - ReportId("WI", "0.0.0.0", "802.11_Auth", "success", "", ([]), prefs, dests) - else: - ShowPacket(p, meta, "802.11 Elt with unknown intermediate header", HonorQuit, prefs, dests) - if current_element: - while isinstance(current_element, Dot11Elt): #Somewhat equivalent: while not isinstance(current_element, NoPayload): - if current_element.ID == 0 and current_element.info.strip(): #ESSID - ReportId("WI", "0.0.0.0", "802.11 ESSID", current_element.info.strip().replace('\n', '').replace('\r', '').replace(',', ' '), "", ([]), prefs, dests) - current_element = current_element.payload - elif p.haslayer(Dot11) and p[Dot11].type == 0: #0 == Management - UnhandledPacket(p, prefs, dests) - elif p.haslayer(Dot11) and p[Dot11].type == 1: #1 == Control - UnhandledPacket(p, prefs, dests) - elif p.haslayer(Dot11) and p[Dot11].type == 2 and p.haslayer(LLC): #2 == Data - UnhandledPacket(p, prefs, dests) - elif p.haslayer(Dot11) and p[Dot11].type == 2 and p.haslayer(Dot11WEP): #2 == Data - ReportId("WI", "0.0.0.0", "802.11 WEP", "", "", ([]), prefs, dests) - elif p.haslayer(Dot11): - ShowPacket(p, meta, "802.11", HonorQuit, prefs, dests) - UnhandledPacket(p, prefs, dests) + sniff(store=0, offline=work_filename, prn=lowpri_single_packet_q.put) + except Scapy_Exception: + if delete_temp: + debug_out('Error opening ' + pcap_source_file + ' (temp decompressed file: ' + work_filename + ' )', prefs, dests) else: - UnhandledPacket(p, prefs, dests) - elif p.haslayer(Raw): - #Sample payload from Mac lo0 packet: \x02\x00\x00\x00E\x00\x00 Hack; strip off first 4 bytes and interpret the rest as IP. - encap_packet_raw = None - if p[Raw].load[0:6] in two_prelude_ip_start: - encap_packet_raw = p[Raw].load[4:] - if encap_packet_raw: - encap_packet = IP(encap_packet_raw) - processpacket(encap_packet) + debug_out('Error opening ' + pcap_source_file, prefs, dests) + + if delete_temp and work_filename != pcap_source_file and os.path.exists(work_filename): + os.remove(work_filename) + + debug_out('Finished processing packets from ' + str(pcap_source_file), prefs, dests) + else: #Neither specified, so this means sniff from all interfaces + try: + if prefs['bpf']: + sniff(store=0, filter=prefs['bpf'], prn=highpri_single_packet_q.put) else: - ShowPacket(p, meta, "Non-ethernet raw does not appear to have E\x00\x00", HonorQuit, prefs, dests) - else: - UnhandledPacket(p, prefs, dests) - #ShowPacket(p, meta, "packet has no ethernet layer", HonorQuit, prefs, dests) - elif p[Ether].type == 0x4860: #18528: ? - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x6002: #24578: MOP Remote Console - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8001: #32769: ? - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8035: #32821: Reverse ARP https://en.wikipedia.org/wiki/Reverse_Address_Resolution_Protocol - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8100: #33024 = IEEE 802.1Q VLAN-tagged frames (initially Wellfleet) - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x872D: #34605 ? - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8809: #34825 LACP (builds multiple links into a trunk) - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x888E: #34958 EAPOL, EAP over LAN (IEEE 802.1X) - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8899: #34969 Unknown - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x88A2: #34978 ATA over ethernet - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x88A7: #34983 Unknown - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x88CC: #35020 LLDP Link Layer Discovery Protocol - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x88E1: #35041 HomePlug AV MME - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x8912: #35090 unknown - UnhandledPacket(p, prefs, dests) - elif p[Ether].type == 0x9000: #36864 = Ethernet loopback protocol. http://wiki.wireshark.org/Loop - UnhandledPacket(p, prefs, dests) - else: - ShowPacket(p, meta, "Unregistered ethernet type:" + str(p[Ether].type), HonorQuit, prefs, dests) - #For a good reference on new ethernet types, see: - #http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml - #http://www.iana.org/assignments/ethernet-numbers - #http://en.wikipedia.org/wiki/EtherType + sniff(store=0, prn=highpri_single_packet_q.put) + except Scapy_Exception: + debug_out('Attempt to listen on all interfaces failed: are you running this as root or under sudo?', prefs, dests) + raise + debug_out('Finished processing packets from ANY', prefs, dests) + -#======== Start of main code. ======== -if __name__ == "__main__": +if __name__ == '__main__': import argparse - #List of IP addresses that should never be tagged as suspicious - if "TrustedIPs" not in __main__.__dict__: - __main__.TrustedIPs = load_json_from_file(trusted_ips_file) - if not __main__.TrustedIPs: - sys.stderr.write("Problem reading/parsing " + trusted_ips_file + ", setting to default list.\n") - sys.stderr.flush - __main__.TrustedIPs = default_trusted_ips - #write_object(trusted_ips_file, json.dumps(__main__.TrustedIPs)) - - signal.signal(signal.SIGINT, signal_handler) - - parser = argparse.ArgumentParser(description='Passer version ' + str(passerVersion)) - input_options = parser.add_mutually_exclusive_group() - input_options.add_argument('-i', '--interface', help='Interface from which to read packets (default is all interfaces)', required=False, default=None) - #input_options.add_argument('-r', '--read', help='Pcap file(s) from which to read packets (use - for stdin)', required=False, default=[], nargs='*') #Not supporting stdin at the moment - input_options.add_argument('-r', '--read', help='Pcap file(s) from which to read packets', required=False, default=[], nargs='*') + parser = argparse.ArgumentParser(description='Passer version ' + str(p_test_version)) + parser.add_argument('-i', '--interface', help='Interface(s) from which to read packets (default is all interfaces)', required=False, default=[], nargs='*') + #parser.add_argument('-r', '--read', help='Pcap file(s) from which to read packets (use - for stdin)', required=False, default=[], nargs='*') #Not supporting stdin at the moment + parser.add_argument('-r', '--read', help='Pcap file(s) from which to read packets', required=False, default=[], nargs='*') parser.add_argument('-l', '--log', help='File to which to write output csv lines', required=False, default=None) parser.add_argument('-s', '--suspicious', help='File to which to write packets to/from suspicious IPs', required=False, default=None) parser.add_argument('-u', '--unhandled', help='File to which to write unhandled packets', required=False, default=None) - parser.add_argument('--acks', help=argparse.SUPPRESS, required=False, default=False, action='store_true') #Left in to allows calling scripts to continue to work, not used. Old help: 'Save unhandled ack packets as well' + #parser.add_argument('--acks', help='Save unhandled ack packets as well', required=False, default=False, action='store_true') parser.add_argument('-d', '--devel', help='Enable development/debug statements', required=False, default=False, action='store_true') parser.add_argument('-q', '--quit', help='With -d, force passer to quit when debug packets are shown', required=False, default=False, action='store_true') parser.add_argument('--nxdomain', help='Show NXDomain DNS answers', required=False, default=False, action='store_true') - parser.add_argument('--creds', help='Show credentials as well', required=False, default=False, action='store_true') + #parser.add_argument('--creds', help='Show credentials as well', required=False, default=False, action='store_true') parser.add_argument('-b', '--bpf', help='BPF to restrict which packets are processed', required=False, default='') - parser.add_argument('--timestamp', help='Show timestamp and time string in 6th and 7th fields', required=False, default=False, action='store_true') - parser.add_argument('--debuglayers', required=False, default=False, action='store_true', help=argparse.SUPPRESS) #Debug scapy layers, hidden option + #parser.add_argument('--debuglayers', required=False, default=False, action='store_true', help=argparse.SUPPRESS) #Debug scapy layers, hidden option + parser.add_argument('-a', '--active', help='Perform active scanning to look up additional info', required=False, default=False, action='store_true') + parser.add_argument('--forced_interface', help='Interface to which to write active scan packets (not needed on Linux)', required=False, default=None) (parsed, unparsed) = parser.parse_known_args() cl_args = vars(parsed) - - debug_known_layer_lists = cl_args['debuglayers'] + cl_args['geolite_loaded'] = geolite_loaded + cl_args['scapy_traceroute_loaded'] = scapy_traceroute_loaded + cl_args['ip2asn_loaded'] = ip2asn_loaded if cl_args['bpf']: if len(unparsed) > 0: @@ -2352,151 +1023,161 @@ def processpacket(p): sys.stderr.write('Too many arguments that do not match a parameter. Any chance you did not put the bpf expression in quotes? Exiting.\n') quit() - InterfaceName = cl_args['interface'] - #Not currently used - all blocks that would use this have been commented out. - #SaveUnhandledAcks = cl_args['acks'] - ShowCredentials = cl_args['creds'] #If True , we'll include passwords in the output lines. At the time of this writing, only the snmp community string is logged when True - - destinations = {} #Do we need destinations at all, or could we just use cl_args? - destinations['unhandled'] = cl_args['unhandled'] - destinations['suspicious'] = cl_args['suspicious'] - - debug_out("Passer version " + str(passerVersion), cl_args, destinations) - - if not os.path.exists(config_dir): - os.makedirs(config_dir) - - if not has_advanced_ntp_headers: - debug_out('The version of scapy on your system does not appear to be new enough to include advanced NTP processing. If possible, please upgrade scapy.', cl_args, destinations) - - debug_out("BPFilter is " + cl_args['bpf'], cl_args, destinations) - #Hmmm, setting bpf appears not to work. It loads correctly into the variable, but the sniff command appears to ignore it. - - - - #To set scapy options: - #conf.verb = 0 - #conf.iface = 'eth1' #Default: every interface - #conf.nmap_base = '/usr/share/nmap/nmap-os-fingerprints' - #conf.p0f_base = '/etc/p0f.fp' - #conf.promisc = 1 - - try: - conf.sniff_promisc = 1 - except: - config.sniff_promisc = 1 + #Not currently offered as actual user-supplied parameters, but could easily be done so + cl_args['per_packet_timeout'] = 1 #_Could_ shorten this to speed up traceroutes, but if too low we may miss responses. Better to have more parallel traceroutes, below. + cl_args['hop_limit'] = 30 + cl_args['trace_v6'] = True + cl_args['max_traceroutes'] = 4 + + mkdir_p(config_dir) + mkdir_p(cache_dir) + mkdir_p(cache_dir + '/ipv4/') + mkdir_p(cache_dir + '/ipv6/') + mkdir_p(cache_dir + '/dom/') + + + + mgr = Manager() #This section sets up a shared data dictionary; all items in it must be Manager()-based shared data structures + shared_data = {} + shared_data['suspects'] = mgr.dict() + + destinations = {} #These allow us to pass down queues to lower level functions + destinations['output'] = multiprocessing.Queue(maxsize=output_qsz) + destinations['unhandled'] = multiprocessing.Queue(maxsize=unhandled_qsz) + destinations['suspicious'] = multiprocessing.Queue(maxsize=unhandled_qsz) + if cl_args['active']: + destinations['ip_lookup_asn'] = multiprocessing.Queue(maxsize=ip_lookup_qsz) + destinations['ip_lookup_geoip'] = multiprocessing.Queue(maxsize=ip_lookup_qsz) + destinations['ip_lookup_hostname'] = multiprocessing.Queue(maxsize=ip_lookup_qsz) + destinations['ip_lookup_traceroute'] = multiprocessing.Queue(maxsize=ip_lookup_qsz) + destinations['host_lookup'] = multiprocessing.Queue(maxsize=host_lookup_qsz) + #FIXME - removeme + #else: + # destinations['ip_lookup_asn'] = multiprocessing.Queue(maxsize=2) + # destinations['ip_lookup_geoip'] = multiprocessing.Queue(maxsize=2) + # destinations['ip_lookup_hostname'] = multiprocessing.Queue(maxsize=2) + # destinations['ip_lookup_traceroute'] = multiprocessing.Queue(maxsize=2) + # destinations['host_lookup'] = multiprocessing.Queue(maxsize=2) + + output_p = Process(name='output_p', target=output_handler, args=(shared_data, cl_args, destinations)) + output_p.start() + + debug_out(whatami('main'), cl_args, destinations) + + unhandled_p = Process(name='unhandled_p', target=unhandled_handler, args=(shared_data, cl_args, destinations)) + unhandled_p.start() + + suspicious_p = Process(name='suspicious_p', target=suspicious_handler, args=(shared_data, cl_args, destinations)) + suspicious_p.start() + + if cl_args['active']: + ip_lookup_asn_p = Process(name='ip_lookup_asn_p', target=ip_lookup_asn_handler, args=(destinations['ip_lookup_asn'], shared_data, cl_args, destinations)) + ip_lookup_asn_p.start() + + ip_lookup_geoip_p = Process(name='ip_lookup_geoip_p', target=ip_lookup_geoip_handler, args=(destinations['ip_lookup_geoip'], shared_data, cl_args, destinations)) + ip_lookup_geoip_p.start() + + ip_lookup_hostname_p = Process(name='ip_lookup_hostname_p', target=ip_lookup_hostname_handler, args=(destinations['ip_lookup_hostname'], shared_data, cl_args, destinations)) + ip_lookup_hostname_p.start() + + ip_lookup_traceroute_p = [None] * cl_args['max_traceroutes'] + for tr_index in list(range(0, cl_args['max_traceroutes'])): + ip_lookup_traceroute_p[tr_index] = Process(name='ip_lookup_traceroute_p' + str(tr_index), target=ip_lookup_traceroute_handler, args=('ip_lookup_traceroute_' + str(tr_index), destinations['ip_lookup_traceroute'], shared_data, cl_args, destinations)) + ip_lookup_traceroute_p[tr_index].start() + + host_lookup_p = Process(name='host_lookup_p', target=host_lookup_handler, args=(destinations['host_lookup'], shared_data, cl_args, destinations)) + host_lookup_p.start() + + layer_queues = {} + + #layer_queues['template'] = multiprocessing.Queue(maxsize=max_handler_qsz) + #template_p = Process(name='template_p', target=template_handler, args=(layer_queues['template'], shared_data, cl_args, destinations)) + #template_p.start() + + layer_queues['ARP'] = multiprocessing.Queue(maxsize=max_handler_qsz) + ARP_p = Process(name='ARP_p', target=ARP_handler, args=(layer_queues['ARP'], shared_data, cl_args, destinations)) + ARP_p.start() + + layer_queues['IP'] = multiprocessing.Queue(maxsize=max_handler_qsz) + IP_p = Process(name='IP_p', target=IP_handler, args=(layer_queues['IP'], shared_data, cl_args, destinations)) + IP_p.start() + + layer_queues['TCP'] = multiprocessing.Queue(maxsize=max_handler_qsz) + TCP_p = Process(name='TCP_p', target=TCP_handler, args=(layer_queues['TCP'], shared_data, cl_args, destinations)) + TCP_p.start() + + layer_queues['UDP'] = multiprocessing.Queue(maxsize=max_handler_qsz) + UDP_p = Process(name='UDP_p', target=UDP_handler, args=(layer_queues['UDP'], shared_data, cl_args, destinations)) + UDP_p.start() + + layer_queues['DNS'] = multiprocessing.Queue(maxsize=max_handler_qsz) + DNS_p = Process(name='DNS_p', target=DNS_handler, args=(layer_queues['DNS'], shared_data, cl_args, destinations)) + DNS_p.start() + + #Note that single_packet_handler not only reads from highpri_single_packet_queue and lowpri_single_packet_queue but also writes to one or the other when nested packets are found. Consider deadlocks. + highpri_single_packet_queue = multiprocessing.Queue(maxsize=highpri_packet_qsz) + lowpri_single_packet_queue = multiprocessing.Queue(maxsize=lowpri_packet_qsz) + layer_queues['single_packet_high'] = highpri_single_packet_queue #At the moment we only use this for shutdown, so we don't have to add both queues + single_packet_p = Process(name='single_packet_p', target=single_packet_handler, args=(highpri_single_packet_queue, lowpri_single_packet_queue, shared_data, layer_queues, cl_args, destinations)) + single_packet_p.start() + + #All _handler processes should be started above before any packet_stream_processor(s) are started below. + all_packet_stream_processors = [] + for one_interface in cl_args['interface']: + new_psp = Process(name='packet_stream_processor_p', target=packet_stream_processor, args=('packet_stream_processor_interface_' + str(one_interface), one_interface, None, highpri_single_packet_queue, lowpri_single_packet_queue, shared_data, cl_args, destinations)) + new_psp.start() + all_packet_stream_processors.append(new_psp) + + if cl_args['interface'] == [] and cl_args['read'] == []: + #If the user didn't specify any files or interfaces to read from, read from all interfaces. + new_psp = Process(name='packet_stream_processor_p', target=packet_stream_processor, args=('packet_stream_processor_interface_ANY', None, None, highpri_single_packet_queue, lowpri_single_packet_queue, shared_data, cl_args, destinations)) + new_psp.start() + all_packet_stream_processors.append(new_psp) + + for one_file in cl_args['read']: + if os.path.exists(one_file): + if os.access(one_file, os.R_OK): + new_psp = Process(name='packet_stream_processor_p', target=packet_stream_processor, args=('packet_stream_processor_file_' + os.path.split(one_file)[1], None, one_file, highpri_single_packet_queue, lowpri_single_packet_queue, shared_data, cl_args, destinations)) + new_psp.start() + all_packet_stream_processors.append(new_psp) + else: + debug_out(str(one_file) + ' unreadable.', cl_args, destinations) + else: + debug_out('Cannot find ' + str(one_file), cl_args, destinations) - #Neither this nor adding "filter=cl_args['bpf']" to each sniff line seems to actually apply the bpf. Hmmm. try: - conf.filter = cl_args['bpf'] - except: - config.filter = cl_args['bpf'] - - #if exit_now: - # quit(1) - - - #read_from_stdin = False #If stdin requested, it needs to be processed last, so we remember it here. We also handle the case where the user enters '-' more than once by simply remembering it. - - #if cl_args['interface'] is None and cl_args['read'] == []: - #debug_out("No source specified with -i or -r, exiting.", cl_args, destinations) - #quit(1) - #debug_out('No source specified, reading from stdin.', cl_args, destinations) - #read_from_stdin = True - - - #Process normal files first - for PcapFilename in cl_args['read']: - work_filename = None - delete_temp = False - - if not PcapFilename: - debug_out("Skipping empty filename.", cl_args, destinations) - elif PcapFilename == '-': - #read_from_stdin = True - debug_out("Unable to read from stdin, exiting.", cl_args, destinations) - quit(1) - elif not os.path.exists(PcapFilename): - debug_out("No file named " + str(PcapFilename) + ", skipping.", cl_args, destinations) - elif not os.access(PcapFilename, os.R_OK): - debug_out(str(PcapFilename) + " is unreadable, skipping.", cl_args, destinations) - #By this point we have an existing, readable, non-empty, non-stdin file. Now check to see if we need to decompress it, and finally process the pcap file. - elif PcapFilename.endswith('.bz2'): - work_filename = open_bzip2_file_to_tmp_file(PcapFilename) - delete_temp = True - elif PcapFilename.endswith('.gz'): - work_filename = open_gzip_file_to_tmp_file(PcapFilename) - delete_temp = True - else: #File exists and is neither a bzip2 file nor a gzip file. Process as is. - work_filename = PcapFilename - - - if work_filename: - if False: #New scapy "stopper" feature to exit if needed; doesn't work yet, disabled. - #https://github.com/secdev/scapy/wiki/Contrib:-Code:-PatchSelectStopperTimeout - sniff(store=0, offline=work_filename, filter=cl_args['bpf'], stopperTimeout=5, stopper=exit_now, prn=lambda x: processpacket(x)) - elif False: #Old scapy "stop_filter" feature to exit if needed; doesn't work yet, disabled. - sniff(store=0, offline=work_filename, filter=cl_args['bpf'], stop_filter=exit_now_packet_param, prn=lambda x: processpacket(x)) - else: #No attempt to exit sniff loop for the moment. - sniff(store=0, offline=work_filename, filter=cl_args['bpf'], prn=lambda x: processpacket(x)) - - if delete_temp and work_filename != PcapFilename and os.path.exists(work_filename): - os.remove(work_filename) - + #Wait until all packet sources finish: + for one_p in all_packet_stream_processors: + one_p.join() + except KeyboardInterrupt: + pass - #Now that we've done all files, sniff from a specific interface. - if InterfaceName: - if False: #New scapy "stopper" feature to exit if needed; doesn't work yet, disabled. - #https://github.com/secdev/scapy/wiki/Contrib:-Code:-PatchSelectStopperTimeout - sniff(store=0, iface=InterfaceName, filter=cl_args['bpf'], stopperTimeout=5, stopper=exit_now, prn=lambda x: processpacket(x)) - elif False: #Old scapy "stop_filter" feature to exit if needed; doesn't work yet, disabled. - sniff(store=0, iface=InterfaceName, filter=cl_args['bpf'], stop_filter=exit_now_packet_param, prn=lambda x: processpacket(x)) - else: #No attempt to exit sniff loop for the moment. - sniff(store=0, iface=InterfaceName, filter=cl_args['bpf'], prn=lambda x: processpacket(x)) - - - #If the user didn't specify any files or interfaces to read from, read from all interfaces. - if not InterfaceName and cl_args['read'] == []: - if False: #New scapy "stopper" feature to exit if needed; doesn't work yet, disabled. - #https://github.com/secdev/scapy/wiki/Contrib:-Code:-PatchSelectStopperTimeout - sniff(store=0, filter=cl_args['bpf'], stopperTimeout=5, stopper=exit_now, prn=lambda x: processpacket(x)) - elif False: #Old scapy "stop_filter" feature to exit if needed; doesn't work yet, disabled. - sniff(store=0, filter=cl_args['bpf'], stop_filter=exit_now_packet_param, prn=lambda x: processpacket(x)) - else: #No attempt to exit sniff loop for the moment. - sniff(store=0, filter=cl_args['bpf'], prn=lambda x: processpacket(x)) - - #To limit to the first 500 packets, add ", count=500" at the end of the "sniff" command inside the last paren - - - generate_summary_lines() - - #Only write out if changes have been made (if no changes have been made, no point in writing the dictionary out). To test this, see if there are any entries in ReportId.NewSuspiciousIPs. - if "NewSuspiciousIPs" in ReportId.__dict__ and ReportId.NewSuspiciousIPs: #Cross-function variable - #If NewSuspiciousIPs has been initialized, so has SuspiciousIPs; no need to test for it. - - #We may be in a situation where two copies of this program running at the same time may both have changes to write. Just before writing this out, we re-read the on-disk version to pull in any changes made by other copies that finished before us. - SuspiciousIPs_at_end = load_json_from_file(suspicious_ips_file) - if SuspiciousIPs_at_end: - for one_trusted in __main__.TrustedIPs: - if one_trusted in SuspiciousIPs_at_end: - del SuspiciousIPs_at_end[one_trusted] - - #Now we copy all entries from the on-disk version (which may contain more than we originally read) into ReportId.SuspiciousIPs just before writing it back out. - for one_ip in SuspiciousIPs_at_end: - if one_ip not in ReportId.SuspiciousIPs: #Cross-function variable - ReportId.SuspiciousIPs[one_ip] = [] #Cross-function variable - for one_warning in SuspiciousIPs_at_end[one_ip]: - if one_warning not in ReportId.SuspiciousIPs[one_ip]: #Cross-function variable - ReportId.SuspiciousIPs[one_ip].append(one_warning) #Cross-function variable - - #Yes, this is shaky and still has race conditions. It's worse than using a database, and better than doing nothing at all. Worst case we lose some entries from one of the copies. - write_object(suspicious_ips_file, json.dumps(ReportId.SuspiciousIPs)) #Cross-function variable - - if "start_stamp" in processpacket.__dict__ and "start_string" in processpacket.__dict__ and "end_stamp" in processpacket.__dict__ and "end_string" in processpacket.__dict__: #Cross-function variable - #FIXME - move to just after sniffing done for a given source and add up the deltas into a cumulative time for all captures. - if processpacket.start_stamp and processpacket.end_stamp: #Cross-function variable - pcap_delta = processpacket.end_stamp - processpacket.start_stamp #Cross-function variable - #FIXME - switch to "verbose", and look for others like versions - debug_out("The packets processed ran from " + processpacket.start_string + " to " + processpacket.end_string + " for " + str(pcap_delta) + " seconds.", cl_args, destinations) #Cross-function variable - else: - debug_out("It does not appear the start and end stamps were set - were any packets processed?", cl_args, destinations) + #Shutdown other processes by submitting None to their respective input queues. Ideally this should be done starting with the processes that feed queues, then moving down the handler list. + for shutdown_layer in layer_queues: + layer_queues[shutdown_layer].put((None, None)) + for one_q in ('ip_lookup_asn', 'ip_lookup_geoip', 'ip_lookup_hostname', 'host_lookup'): + if one_q in destinations: + destinations[one_q].put(None) + + if 'ip_lookup_traceroute' in destinations: #Because we start multiple traceroutes, we have to queue a "None" shutdown signal for each + for tr_index in list(range(0, cl_args['max_traceroutes'])): + destinations['ip_lookup_traceroute'].put(None) + + + #Wait until all other processes finish: + #template_p.join() + ARP_p.join() + IP_p.join() + TCP_p.join() + UDP_p.join() + DNS_p.join() + single_packet_p.join() + + time.sleep(1) + destinations['output'].put(None) + if 'unhandled' in destinations: + destinations['unhandled'].put(None) + if 'suspicious' in destinations: + destinations['suspicious'].put(None) + + sys.stderr.write('\nDone.\n') From 0276387bf4d31b281e46e9983b4e1986c1d9042f Mon Sep 17 00:00:00 2001 From: William Stearns Date: Mon, 26 Oct 2020 23:50:46 -0400 Subject: [PATCH 02/10] Add required utility scripts --- ip2asn.py | 172 +++++++++++++++++++++++ normalize_ip.py | 91 +++++++++++++ scapy_traceroute.py | 324 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 587 insertions(+) create mode 100755 ip2asn.py create mode 100755 normalize_ip.py create mode 100755 scapy_traceroute.py diff --git a/ip2asn.py b/ip2asn.py new file mode 100755 index 0000000..da812f6 --- /dev/null +++ b/ip2asn.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +"""Takes ip addresses provided on stdin (one per line) and outputs info about the ASNs that contain them.""" +#Appears to work just fine under both python2 and python3. + +#Download https://iptoasn.com/data/ip2asn-combined.tsv.gz to current directory, gunzip it. + +import os +import sys +import csv +import fileinput +from ipaddress import summarize_address_range, IPv4Address, IPv6Address #Not needed: ip_address, AddressValueError + + +#Note: netaddr removed as standard python library does not include it, but appears to include ipaddress. See v0.1 for comparison between the two - both provide equal results for all ip ranges. +ip2asn_version = '0.6.1' + + +def load_asn_table(source_file): + """Loads the subnets from ip2asn-combined.tsv.""" + #real 0m46.232s to load + + Paranoid = False #Setting this to True enables additional checks on the ip2asn-combined raw data (that descriptions and countries stay consistent) + + as_info_struct = {} #Dictionary of Lists of dictionaries. + #Top level dictionary has keys = 4_firstoctet for ipv4, 6_firsthexbyte for ipv6. Values are the lists on the next line + #next level lists have keys 0-128; their values are dictionaries. For most specific subnet, search from 128 back to 1 (32 back to 1 for ipv4) + #second level dictionaries; key = IP object, value is as_num + #Adding the first_octet level makes a significant and visual performance increase in lookup time. + + asn_country = {} #Key= as_num, value=2 letter country code + asn_description = {} #Key= as_num, value=asn description + + + if os.path.exists(source_file): + with open(source_file, 'r') as aih: + reader = csv.reader(aih, delimiter='\t') + #Format: range_start range_end AS_number country_code AS_description + for first_ip, last_ip, as_num, country, as_description in reader: + if sys.version_info < (3, 0): + first_ip = unicode(first_ip) + last_ip = unicode(last_ip) + country = unicode(country) + try: + as_description = as_description.decode('utf-8') + except UnicodeDecodeError: + sys.stderr.write("Unable to convert: " + as_description + "\n") + + #Load country and description values into dictionaries for later use. + if as_num in asn_country: + if Paranoid and asn_country[as_num] != country: + sys.stderr.write("country mismatch: for asnum: " + str(as_num) + ", " + asn_country[as_num] + " != " + country + "\n") + else: + asn_country[as_num] = country + + if as_num in asn_description: + if Paranoid and asn_description[as_num] != as_description: + sys.stderr.write("description mismatch: for asnum: " + str(as_num) + ", " + asn_description[as_num] + " != " + as_description + "\n") + else: + asn_description[as_num] = as_description + + #print(first_ip + ',' + last_ip + ',' + as_num + ',' + country + ',' + as_description) + if as_num == '0' and as_description == 'Not routed': + pass + #elif as_num == '0' and as_description != 'Not routed': + # sys.stderr.write('as == 0, desc != not routed\n') + #elif as_num != '0' and as_description == 'Not routed': + # sys.stderr.write('as != 0, desc == not routed\n') + else: + if first_ip.find(':') > -1: + first_addr = IPv6Address(first_ip) + last_addr = IPv6Address(last_ip) + else: + first_addr = IPv4Address(first_ip) + last_addr = IPv4Address(last_ip) + #except: # (AddressValueError, ipaddress.AddressValueError): + + sar_cidrs = list(summarize_address_range(first_addr, last_addr)) + for one_cidr in sar_cidrs: + if one_cidr.version == 4: + first_octet = '4_' + one_cidr.exploded.split('.')[0] + elif one_cidr.version == 6: + first_octet = '6_' + one_cidr.exploded[0:2] + + if first_octet not in as_info_struct: + as_info_struct[first_octet] = [] + for cidr_len in range(0, 129): #Numbers 0 to 128 + as_info_struct[first_octet].append({}) + + one_netmask = one_cidr.prefixlen + #print("Prefixlen: " + str(one_netmask)) + if one_cidr in as_info_struct[first_octet][one_netmask]: + if Paranoid and as_info_struct[one_netmask][one_cidr] != as_num: + sys.stderr.write("For subnet " + str(one_cidr) + ", " + as_info_struct[one_netmask][one_cidr] + " != " + str(as_num) + "\n") + else: + as_info_struct[first_octet][one_netmask][one_cidr] = as_num + else: + sys.stderr.write("ASN Source file " + source_file + " does not exist, unable to lookup ASNs.\n") + + return as_info_struct, asn_country, asn_description + + + +def ip_asn_lookup(ip_string, as_num_d): + """Find the ASN for the given IP address or None if no match found. This returns the most specific subnet in case there are multiple matching cidr blocks.""" + #Approx 0.7 secs/lookup + + if sys.version_info < (3, 0): + ip_string = unicode(ip_string) + + if ip_string.find(':') > -1: + try: + lookup_obj = IPv6Address(ip_string) + first_octet = '6_' + lookup_obj.exploded[0:2] + except: + return None + max_index = 128 + else: + try: + lookup_obj = IPv4Address(ip_string) + first_octet = '4_' + lookup_obj.exploded.split('.')[0] + except: + return None + max_index = 32 + + + if first_octet in as_num_d: + for search_netmask in range(max_index, -1, -1): + for one_net in as_num_d[first_octet][search_netmask]: + if lookup_obj in one_net: + return as_num_d[first_octet][search_netmask][one_net] + #else: + # return None + + return None + + + +def formatted_asn_output(orig_ip_string, out_format, as_num_d, as_country_d, as_descriptions_d): + """Take supplied ip string, look up its ASN, and return a formatted output string.""" + + formatted_output = [] #List of output strings/dictionaries + + clean_ip_string = orig_ip_string.rstrip() + found_as_num = ip_asn_lookup(clean_ip_string, as_num_d) + if out_format == 'passer': + if found_as_num: + formatted_output.append('AS,' + clean_ip_string + ',AS,' + str(found_as_num) + ',' + as_descriptions_d[found_as_num].replace(',', ' ')) + if as_country_d[found_as_num] not in ('', 'Unknown'): + formatted_output.append('GE,' + clean_ip_string + ',CC,' + as_country_d[found_as_num] + ',') + elif out_format == 'json': + if found_as_num: + formatted_output.append({'Type': 'AS', 'IPAddr': clean_ip_string, 'Proto': 'AS', 'State': str(found_as_num), 'Description': as_descriptions_d[found_as_num].replace(',', ' ')}) + if as_country_d[found_as_num] not in ('', 'Unknown'): + formatted_output.append({'Type': 'GE', 'IPAddr': clean_ip_string, 'Proto': 'CC', 'State': as_country_d[found_as_num], 'Description': ''}) + else: + if found_as_num: + formatted_output.append('IP: ' + clean_ip_string + ' ASN: ' + str(found_as_num) + ' Country: ' + as_country_d[found_as_num] + ' Description: ' + as_descriptions_d[found_as_num]) + else: + formatted_output.append('IP: ' + clean_ip_string + ' is not in any asn') + + return formatted_output + + +if __name__ == "__main__": + asn_info_file = './ip2asn-combined.tsv' + requested_format = 'passer' + + as_nums, asn_countries, asn_descriptions = load_asn_table(asn_info_file) + + for line in fileinput.input(): + for one_out in formatted_asn_output(line, requested_format, as_nums, asn_countries, asn_descriptions): + print(one_out) diff --git a/normalize_ip.py b/normalize_ip.py new file mode 100755 index 0000000..a19507f --- /dev/null +++ b/normalize_ip.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +"""Converts ip addresses (ipv4 or ipv6) on stdin to fully exploded ip addresses.""" + +import ipaddress +import sys + + +Devel = False + + +def Debug(DebugStr): + """Prints a note to stderr""" + if Devel != False: + sys.stderr.write(DebugStr + '\n') + + +def ip_addr_obj(raw_addr): + """Returns an ip obj for the input string. The raw_addr string should already have leading and trailing whitespace removed before being handed to this function.""" + + try: + if sys.version_info > (3, 0): + raw_addr_string = str(raw_addr) + else: + raw_addr_string = unicode(raw_addr) + except UnicodeDecodeError: + raw_addr_string = '' + + #if Devel: + # Debug('Cannot convert:' + # Debug(raw_addr) + # raise + #else: + # pass + + ip_obj = None + + if raw_addr_string != '' and not raw_addr_string.endswith(('.256', '.257', '.258', '.259', '.260')): #raw_addr_string.find('.256') == -1 + try: + ip_obj = ipaddress.ip_address(raw_addr_string) + except ValueError: + #See if it's in 2.6.0.0.9.0.0.0.5.3.0.1.B.7.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1 or 260090005301B7000000000000000001 format + hex_string = raw_addr_string.replace('.', '') + colon_hex_string = hex_string[0:4] + ':' + hex_string[4:8] + ':' + hex_string[8:12] + ':' + hex_string[12:16] + ':' + hex_string[16:20] + ':' + hex_string[20:24] + ':' + hex_string[24:28] + ':' + hex_string[28:32] + try: + ip_obj = ipaddress.ip_address(colon_hex_string) + except ValueError: + if Devel: + Debug(raw_addr_string) + raise + else: + pass + + return ip_obj + + +def explode_ip(ip_obj): + """Converts the input IP object to its exploded form (type "unicode" in python2) ready for printing. If the IP/IP object is invalid, returns an empty string.""" + + if ip_obj is None: + return '' + else: + return ip_obj.exploded + + + +if __name__ == "__main__": + AllSucceeded = True + + for InLine in sys.stdin: + InLine = InLine.replace('\n', '').replace('\r', '') + #Debug('======== ' + InLine) + user_ip_obj = ip_addr_obj(InLine) + + if user_ip_obj is None: + AllSucceeded = False + if Devel: + print('Invalid: ' + InLine) + else: + print('') + else: + print(explode_ip(user_ip_obj)) + + #If not interested in detailed error checking, can also do: + #print(explode_ip(ip_addr_obj(InLine))) + + + if AllSucceeded: + quit(0) + else: + Debug('One or more input lines were not recognized as cidr networks or hosts') + quit(1) diff --git a/scapy_traceroute.py b/scapy_traceroute.py new file mode 100755 index 0000000..2c54628 --- /dev/null +++ b/scapy_traceroute.py @@ -0,0 +1,324 @@ +#!/usr/bin/env python3 +"""Traceroute to a remote host and return the list of IPs transited (specific entries in that list may be None if no reply from that hop). +In the case of an error such as an unresolvable target hostname, a list of (30, by default) Nones will come back.""" +#Program works fine under python2 and python3. +#Many thanks to https://jvns.ca/blog/2013/10/31/day-20-scapy-and-traceroute/ for the initial idea. + + +import os +import sys +import socket +import random +import json +import ipaddress +import errno +#from scapy.all import * +from scapy.all import ICMP, ICMPv6TimeExceeded, IP, IPv6, Raw, Scapy_Exception, UDP, sr1 # pylint: disable=no-name-in-module + + +def ip_addr_obj(raw_addr): + """Returns an ip obj for the input string. The raw_addr string should already have leading and trailing whitespace removed before being handed to this function.""" + + try: + if sys.version_info > (3, 0): + raw_addr_string = str(raw_addr) + else: + raw_addr_string = unicode(raw_addr) + except UnicodeDecodeError: + raw_addr_string = '' + + #if Devel: + # Debug('Cannot convert:' + # Debug(raw_addr) + # raise + #else: + # pass + + ip_obj = None + + if raw_addr_string != '' and not raw_addr_string.endswith(('.256', '.257', '.258', '.259', '.260')): #raw_addr_string.find('.256') == -1 + try: + ip_obj = ipaddress.ip_address(raw_addr_string) + except ValueError: + #See if it's in 2.6.0.0.9.0.0.0.5.3.0.1.B.7.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1 or 260090005301B7000000000000000001 format + hex_string = raw_addr_string.replace('.', '') + colon_hex_string = hex_string[0:4] + ':' + hex_string[4:8] + ':' + hex_string[8:12] + ':' + hex_string[12:16] + ':' + hex_string[16:20] + ':' + hex_string[20:24] + ':' + hex_string[24:28] + ':' + hex_string[28:32] + try: + ip_obj = ipaddress.ip_address(colon_hex_string) + except ValueError: + #if Devel: + # Debug(raw_addr_string) + # raise + #else: + pass + + return ip_obj + + +def explode_ip(ip_obj): + """Converts the input IP object to its exploded form (type "unicode" in python2) ready for printing. If the IP/IP object is invalid, returns an empty string.""" + + if ip_obj is None: # pylint: disable=no-else-return + return '' + else: + return ip_obj.exploded + + +def is_valid_ipv4_address(address): + """Returns True or False based on whether the address is a valid IPv4 address.""" + + try: + socket.inet_pton(socket.AF_INET, address) + except AttributeError: + try: + socket.inet_aton(address) + except socket.error: + return False + return address.count('.') == 3 + except socket.error: # not a valid address + return False + + return True + + +def is_valid_ipv6_address(address): + """Returns True or False based on whether the address is a valid IPv6 address.""" + + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: # not a valid address + return False + return True + + +def load_json_from_file(json_filename): + """Bring in json content from a file and return it as a python data structure (or None if not successful for any reason).""" + + ljff_return = None + + if os.path.exists(json_filename) and os.access(json_filename, os.R_OK): + try: + with open(json_filename) as json_h: + ljff_return = json.loads(json_h.read()) + except: + pass + + return ljff_return + + +def write_object(filename, generic_object): + """Write out an object to a file.""" + + try: + with open(filename, "wb") as write_h: + write_h.write(generic_object.encode('utf-8')) + except: + sys.stderr.write("Problem writing " + filename + ", skipping.") + raise + + #return + + +def mkdir_p(path): + """Create an entire directory branch. Will not complain if the directory already exists.""" + + if not os.path.isdir(path): + try: + os.makedirs(path) + except FileExistsError: + pass + except OSError as exc: + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + + +def cache_file(parent_cache_dir, ip_addr): + """Returns the correct filename that would hold the path to that IP. Does not care if the file exists or not, but does create the directory that would hold it.""" + + if ':' in ip_addr: #ipv6 address + cache_obj_path = parent_cache_dir + '/ipv6/' + '/'.join(ip_addr.split(':')) + '/' + else: #ipv4 address + cache_obj_path = parent_cache_dir + '/ipv4/' + '/'.join(ip_addr.split('.')) + '/' + + mkdir_p(cache_obj_path) + + return cache_obj_path + ip_addr + '.traceroute.json' + + +def ips_of(one_target): + """Finds a list of IP addresses of the given target, which could be a hostname, an IPv4 address, or an IPv6 address.""" + + ip_list = set([]) + + if is_valid_ipv4_address(one_target): + ip_list.add(one_target) + elif is_valid_ipv6_address(one_target): + ip_list.add(explode_ip(ip_addr_obj(one_target))) + else: + if not one_target.endswith("."): + one_target += '.' + + try: + for one_result in socket.getaddrinfo(one_target, None, socket.AF_INET): + af, _, _, _, sa = one_result #Don't need socktype, proto, canonname + + if af == socket.AF_INET: + ip_list.add(sa[0]) + elif af == socket.AF_INET6: + ip_list.add(explode_ip(ip_addr_obj(sa[0]))) + else: + sys.stderr.write(str(af) + '\n') + #pass + except (socket.gaierror, KeyboardInterrupt, UnicodeError): + return ip_list + + try: + for one_result in socket.getaddrinfo(one_target, None, socket.AF_INET6): + af, _, _, _, sa = one_result #Don't need socktype, proto, canonname + + if af == socket.AF_INET: + ip_list.add(sa[0]) + elif af == socket.AF_INET6: + ip_list.add(explode_ip(ip_addr_obj(sa[0]))) + else: + sys.stderr.write(str(af) + '\n') + #pass + except (socket.gaierror, KeyboardInterrupt, UnicodeError): + return ip_list + + return ip_list + + +def traceroute_hop_list(compressed_target, required_interface, max_packet_wait, max_hops, tr_cache_dir): # pylint: disable=too-many-branches,too-many-statements + """Traceroute to the target IP address (NOT hostname) and return a list of all hops with their IPs (or None if no response).""" + #If you have a hostname, use "for one_ip in ips_of(target_host):" around this function. + #If tr_cache_dir is None, do not cache. If tr_cache_dir is "", use traceroute_cache_dir_default . + + target = explode_ip(ip_addr_obj(compressed_target)) + + hop_list = [None for j in range(max_hops)] + loaded_cached_list = False + + if tr_cache_dir == "": + tr_cache_dir = traceroute_cache_dir_default + if tr_cache_dir: + mkdir_p(tr_cache_dir) + + if os.path.exists(cache_file(tr_cache_dir, target)): + try: + hop_list = load_json_from_file(cache_file(tr_cache_dir, target)) + loaded_cached_list = True + except: + raise + + if not loaded_cached_list: + flowlabel_value = random.randrange(1, 2**20) + + for i in range(0, max_hops): + #sys.stderr.write('.') + + #payload_string = r"abcdefghijklmnopqrstuvwabcdefghi" #Windows ICMP traceroute + payload_string = r"@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_" #Linux UDP traceroute + + pkt = None + try: + pkt = IP(dst=target, ttl=i)/UDP(sport=random.randrange(32768, 65534), dport=33434+i)/Raw(load=payload_string) + address_layer = IP + del pkt[IP].chksum + except socket.gaierror: #We weren't able to find an IPv4 address for this host, retry with IPv6 + try: + pkt = IPv6(dst=target, hlim=i, fl=flowlabel_value)/UDP(sport=random.randrange(32768, 65534), dport=33434+i)/Raw(load=payload_string) + address_layer = IPv6 + del pkt[IPv6].chksum + except socket.gaierror: #Couldn't find IPv6 either, so assume this is a nonexistant hostname. + sys.stderr.write("No IP found for " + str(target) + ", exiting.\n") + break + del pkt[UDP].chksum + + reply = None + if required_interface: + try: + reply = sr1(pkt, verbose=0, timeout=max_packet_wait, iface=required_interface) + except Scapy_Exception: + sys.stderr.write("Unable to write to " + str(required_interface) + ". Are you running as root? Exiting.\n") + break + except KeyError: + pass + except IndexError: #Working around a bug in scapy's sendrecv.py/supersocket.py which gives an IndexError: pop from empty list + pass + else: + try: + reply = sr1(pkt, verbose=0, timeout=max_packet_wait) + except KeyError: + pass + except IndexError: #Working around a bug in scapy's sendrecv.py/supersocket.py which gives an IndexError: pop from empty list + pass + + #sys.stderr.write("intermediate reply: " + str(reply) + '\n') + #sys.stderr.flush() + + if reply is None: #No response received + pass #No need to fill in, we already have None's there. + elif reply.haslayer(ICMPv6TimeExceeded) or (reply.haslayer(ICMP) and reply[ICMP].type == 11): #Intermediate host (Type is time-exceeded) + hop_list[i] = explode_ip(ip_addr_obj(reply[address_layer].src)) + elif reply.haslayer('ICMPv6 Destination Unreachable') or (reply.haslayer(ICMP) and reply[ICMP].type == 3): #Reached target (Type is dest-unreach) + hop_list[i] = explode_ip(ip_addr_obj(reply[address_layer].src)) + del hop_list[i+1:] #Truncate any following nulls + break + else: #Unknown + sys.stderr.write("Unknown reply type:\n") + reply.show() + break + + #sys.stderr.write('\n') + + try: + write_object(cache_file(tr_cache_dir, target), json.dumps(hop_list)) + except: + pass + + #Loop that truncates the list by one element, saves each sublist under the intermediate IP address (and mkdir_p that address too) + truncated_path_to_ip = list(hop_list) #Make a shallow copy of the list so we don't affect the original + del truncated_path_to_ip[-1] #Loop, dropping the rightmost entry each time. Working back through the list of routers, save any that are actually routers for which we don't already have a path to that router. + while truncated_path_to_ip: + if truncated_path_to_ip[-1]: #If not null, i.e., we have an actual router IP address: + router_ip = truncated_path_to_ip[-1] + if not os.path.exists(cache_file(tr_cache_dir, router_ip)): + #sys.stderr.write("____ writing router path:" + router_ip + ":" + str(truncated_path_to_ip) + #sys.stderr.flush + try: + write_object(cache_file(tr_cache_dir, router_ip), json.dumps(truncated_path_to_ip)) + except: + pass + + del truncated_path_to_ip[-1] + + return hop_list + + + +scapy_traceroute_version = '0.2.5' +per_packet_timeout_default = 1 +forced_interface_default = None +ttl_default = 30 +traceroute_cache_dir_default = os.environ["HOME"] + '/.cache/scapy_traceroute/' + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='scapy_traceroute version ' + str(scapy_traceroute_version)) + parser.add_argument('-p', '--per_packet_timeout', help='Time to wait for a reply for a single packet, can be fractional (default: ' + str(per_packet_timeout_default) + ' ).', required=False, default=per_packet_timeout_default) + parser.add_argument('-f', '--forced_interface', help='Force packets through this interface (needed on macos, default: ' + str(forced_interface_default) + ' ).', required=False, default=forced_interface_default) + parser.add_argument('-t', '--ttl', help='Maximum number of hops to try (default: ' + str(ttl_default) + ')', required=False, default=ttl_default) + parser.add_argument('-c', '--cache_dir', help='Directory tree to hold cached traceroutes (default: ' + str(traceroute_cache_dir_default) + ' ). Use None to not cache results.', required=False, default=traceroute_cache_dir_default) + #parser.add_argument('--debug', help='Show additional debugging information on stderr', required=False, default=False, action='store_true') + (parsed, unparsed) = parser.parse_known_args() + cl_args = vars(parsed) + + for target_host in unparsed: + for one_ip in ips_of(target_host): + sys.stderr.write("==== Traceroute to: " + one_ip + '\n') + sys.stderr.flush() + print(traceroute_hop_list(one_ip, cl_args['forced_interface'], cl_args['per_packet_timeout'], int(cl_args['ttl']), cl_args['cache_dir'])) From 6e123b6e93f9f915558434aa5782d7c3837cf740 Mon Sep 17 00:00:00 2001 From: Ethan Robish Date: Tue, 27 Oct 2020 15:35:32 -0500 Subject: [PATCH 03/10] Updates for passer-ng --- Dockerfile | 6 +++--- README.md | 10 +++------- passer | 3 ++- requirements.txt | 3 ++- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7d8d94e..394f2e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:2.7-alpine +FROM python:3.9-alpine # Shorten common strings ARG GH=https://raw.githubusercontent.com @@ -9,6 +9,7 @@ ADD $GH/nmap/nmap/master/nmap-mac-prefixes $USR/nmap/nmap-mac-prefi ADD $GH/wireshark/wireshark/master/manuf $USR/wireshark/manuf ADD $GH/royhills/arp-scan/master/ieee-oui.txt $USR/arp-scan/ieee-oui.txt ADD $GH/nmap/nmap/master/nmap-service-probes $USR/nmap/nmap-service-probes +ADD $GH/p0f/p0f/v2.0.8/p0f.fp /etc/p0f/p0f.fp # tcpdump is needed by scapy to replay pcaps RUN apk update && apk add --no-cache tcpdump @@ -22,8 +23,7 @@ echo 'noenum = [ Resolve(), TCP_SERVICES, UDP_SERVICES ]' >> $HOME/.scapy_startu mkdir $HOME/.passer/ VOLUME $HOME/.passer/ -COPY passer.py /passer.py -COPY passer_lib.py /passer_lib.py +COPY *.py / ENTRYPOINT ["python", "/passer.py"] diff --git a/README.md b/README.md index 72008a6..7f1368f 100644 --- a/README.md +++ b/README.md @@ -18,9 +18,10 @@ glad to update the script. ## Installation ### Requirements -- Python >=2.4 and <3.0 +- Python >=2.4 - Python libraries (see [requirements.txt](/requirements.txt)) - ipaddress + - maxminddb-geolite2 - pytz - scapy>=2.4.0 @@ -81,12 +82,7 @@ You can then use this script just as you would in any of the examples below. For docker run --rm --name=passer -i --init --net=host --cap-add=net_raw activecm/passer -i eth0 ``` -In order to stop passer run: - -```bash -docker stop passer -``` - +In order to stop passer, press `Ctrl-C`. ## Examples diff --git a/passer b/passer index 34cae93..a62cff7 100755 --- a/passer +++ b/passer @@ -12,6 +12,7 @@ function cpasser() { docker_cmd+=("--name" "passer") # allow easy controlling of the container docker_cmd+=("--rm") # remove the container after passer exits docker_cmd+=("--interactive") # allow sending keystrokes to passer (e.g. to shut down) and piping in a pcap + docker_cmd+=("--tty") # allow passer to write to the screen as it runs docker_cmd+=("--init") # needs tini init to properly shut down passer docker_cmd+=("--net" "host") # allow capturing on host network interfaces docker_cmd+=("--cap-add" "net_raw") # allow listening in promiscuous mode @@ -60,7 +61,7 @@ function cpasser() { esac done - docker_cmd+=("activecm/passer") + docker_cmd+=("activecm/passer:ng") # Print out the final arguments and exit for debugging #echo "docker_cmd: ${docker_cmd[@]}"; echo "passer_args: ${passer_args[@]}"; exit # debug diff --git a/requirements.txt b/requirements.txt index 20e6bc0..010ed8b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ ipaddress +maxminddb-geolite2 pytz -scapy>=2.4.0 +scapy>=2.4.0 \ No newline at end of file From 358f7f4d25b2d22618433a66d2a27ea1a06461dd Mon Sep 17 00:00:00 2001 From: Hannah Date: Sat, 21 Nov 2020 10:31:29 -0600 Subject: [PATCH 04/10] initial commit --- analysis/README.md | 27 +++++++++ analysis/analyzer.py | 133 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 analysis/README.md create mode 100644 analysis/analyzer.py diff --git a/analysis/README.md b/analysis/README.md new file mode 100644 index 0000000..0ff53a1 --- /dev/null +++ b/analysis/README.md @@ -0,0 +1,27 @@ +# Analyzing Passer Results +### Running the program +If you're working wiwth the source code just cd to the directory of the analyser file and run +```Python3 analyzer.py -i ``` +The file extension does not matter, but the file must be comma separated +### Filters +The filter options are listed below. After running the program type ```filter``` followed by any combination of the following +- type={TC, TS, UC, US, RO, DN, MA} +- ip={127.0.0.1, or whatever} + - set 'ippref=true' to do searches such as 10.0.* +- ipv={4, 6, 0 for all} +- state={open, suspicious, etc...} + +example: If you wanted to show all ipv4 addresses that were flagged as suspicious, you would type the following +```filter ipv=4 state=suspicious``` +or to see all TCP clients starting wih address 10.0.0.* +```filter type=TC ip=10.0.0 ippref=true``` +to reset the filters type ```reset``` at the command prompt + +### Commands +- reset --resets the filters +- show --shows the results (shrinks to fit on screen) +- show-all --shows all results (use with caution) +- quit --gracefully exits the program + +### Bugz +feel free to report bugs or suggestions to hcartier@activecountermeasures.com \ No newline at end of file diff --git a/analysis/analyzer.py b/analysis/analyzer.py new file mode 100644 index 0000000..5ca64e5 --- /dev/null +++ b/analysis/analyzer.py @@ -0,0 +1,133 @@ +import pandas as pd +from numpy import sum +import sys + + +class Options: + def __init__(self): + self.type = '' + self.ip = '' + self.state = '' + self.port = '' + self.ippref = False + self.protocol = '' + self.ip_version = 0 # 0 == any + self.des = '' + + def reset(self): + self.type = '' + self.ip = '' + self.state = '' + self.port = '' + self.ippref = False + self.protocol = '' + self.ip_version = 0 + self.des = '' + + +# TODO: implement buffering for large files? +def load(filename): + df = pd.read_csv(filename, names=['Type', 'IPAddress', 'Port/info', 'State', 'description'], + header=None, error_bad_lines=False) + op = (df.State.values == 'open').sum() + warnings = (df['description'].str.startswith('Warning')).sum() + suspicious = (df.State.values == 'suspicious').sum() + n = len(pd.unique(df['IPAddress'])) + print(len(df), "records,", n, "distinct addresses,", op, "open ports", suspicious, "suspicious entries,", warnings, + "warnings") + return df + + +# shows every entry in the dataframe as a string. Output can be a lot... +def show_all(dframe): + pd.reset_option('max_columns') + sys.stdout.flush() + if len(dframe) == 0: # faster than the builtin .empty function + print("Nothing to see here :)") + return + df_string = dframe.to_string(index=False) + print(df_string) + + +def show(dframe): + if len(dframe) == 0: # faster than the builtin .empty function + print("Nothing to see here :)") + return + + warnings = (dframe['description'].str.startswith('Warning')).sum() + suspicious = (dframe.State.values == 'suspicious').sum() + n = len(pd.unique(dframe['IPAddress'])) + print(len(dframe), "records,", n, "distinct addresses,", suspicious, "suspicious entries,", warnings, "warnings") + + print(dframe) + + +def wraper_function(dframe, options): + if options.state != '': + dframe = dframe.loc[dframe['State'] == options.state] + + if options.port != '': + dframe = dframe[dframe['Port/info'].str.contains(options.port, na=False)] + + if options.ip_version == 6: + dframe = dframe[dframe['IPAddress'].str.contains(':', na=False)] + elif options.ip_version == 4: + dframe = dframe[~dframe['IPAddress'].str.contains(':', na=False)] + + if options.type != '': + dframe = dframe[dframe['Type'] == (options.type.upper())] + + if options.ippref: + dframe = dframe[dframe['IPAddress'].str.startswith(options.ip, na=False)] + elif options.ip != '': + dframe = dframe[dframe['IPAddress'] == options.ip] + + if options.des != '': + dframe = dframe[dframe['description'].str.contains(options.des, na=False)] + + return dframe + +# TODO: add sorting and exporting +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='Passer analytics tool.') + parser.add_argument('-i', '--logfile', help='file to ingest', required=True, default='', nargs=1) + (parsed, unparsed) = parser.parse_known_args() + cl_args = vars(parsed) + df = load(cl_args['logfile'][0]) + opts = Options() + while True: + command = (input('>')).lower() + if command[:6] == 'filter': + rol = (command[6:]).split() + for item in rol: + if item[:5] == 'type=': + opts.type = (item[5:]).upper() + if item[:5] == 'port=': + opts.port = (item[5:]).upper() + if item[:6] == 'state=': + opts.state = item[6:] + if item[:4] == 'ipv=': + opts.ip_version = int(item[4:]) + if item[:3] == 'ip=': + opts.ip = item[3:] + if item[:7] == 'ippref=': + if item[7] == 't': + opts.ippref = True + else: + opts.ippref = False + if item[:12] == 'description=': + opts.des = item[12:] + elif command[:8] == 'show-all': + ndf = wraper_function(df, opts) + show_all(ndf) + elif command[:4] == 'show': + ndf = wraper_function(df, opts) + show(ndf) + elif command == 'reset': + opts.reset() + elif command == 'quit': + exit(0) + else: + print("Unrecognised command") From a99463bdfcc295ce20ab8cf38973b27f6730b53d Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Tue, 18 Jan 2022 16:39:41 -0500 Subject: [PATCH 05/10] Create Database and Insert Methods --- passive/__init__.py | 0 passive/passive_data.py | 123 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 passive/__init__.py create mode 100644 passive/passive_data.py diff --git a/passive/__init__.py b/passive/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/passive/passive_data.py b/passive/passive_data.py new file mode 100644 index 0000000..f0f5be3 --- /dev/null +++ b/passive/passive_data.py @@ -0,0 +1,123 @@ +from os.path import exists +import sqlite3 + + + +class passive_data: + + @staticmethod + def create_con(): + '''Create Database Connection''' + return sqlite3.connect('signature.db') + + + @staticmethod + def setup_db(): + '''Create Sqlite3 DB with all required tables''' + if exists('signature.db'): + pass + else: + with open('signature.db', 'x') as fp: + pass + conn = sqlite3.connect('signature.db') + # Create Signature Table + conn.execute('''CREATE TABLE "author" ( + "id" INTEGER NOT NULL UNIQUE, + "name" TEXT NOT NULL, + "email" TEXT, + "github" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + #Create Device Table + conn.execute('''CREATE TABLE "device" ( + "id" INTEGER NOT NULL UNIQUE, + "type" TEXT NOT NULL, + "vendor" TEXT, + "url" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + #Create OS Table + conn.execute('''CREATE TABLE "os" ( + "id" INTEGER NOT NULL, + "name" TEXT, + "version" TEXT, + "class" TEXT, + "vendor" TEXT, + "url" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + # Create Signatures Table + conn.execute('''CREATE TABLE "signatures" ( + "id" INTEGER NOT NULL UNIQUE, + "acid" INTEGER UNIQUE, + "tcp_flag" TEXT, + "ver" TEXT NOT NULL, + "ittl" INTEGER, + "olen" INTEGER, + "mss" TEXT, + "wsize" TEXT, + "scale" TEXT, + "olayout" TEXT, + "quirks" TEXT, + "pclass" TEXT, + "comments" TEXT, + "os_id" INTEGER, + "device_id" INTEGER, + "author_id" INTEGER, + FOREIGN KEY("os_id") REFERENCES "os"("id"), + FOREIGN KEY("author_id") REFERENCES "author"("id"), + FOREIGN KEY("device_id") REFERENCES "device"("id"), + PRIMARY KEY("id" AUTOINCREMENT) + );''') + conn.close() + return True + + @staticmethod + def author_insert(conn, name, email, github): + '''Insert Statement for the Author Table''' + entry = conn.execute('SELECT id FROM author WHERE (name=? AND email=?)', (name, email)) + entry = entry.fetchone() + if entry is None: + author_id = conn.execute("insert into author (name, email, github) values (?, ?, ?)", (name, email, github)) + conn.commit() + author_id = author_id.lastrowid + else: + author_id = entry[0] + return author_id + + @staticmethod + def os_insert(conn, name, version, os_class, vendor, url): + '''Insert Statement for the OS Table''' + entry = conn.execute('SELECT id FROM os WHERE (name=? AND version=? AND class=? AND vendor=?', (name, version, os_class, vendor)) + entry = entry.fetchone() + if entry is None: + os_id = conn.execute("insert into os (name, version, class, vendor, url) values (?, ?, ?, ?, ?)", (name, version, os_class, vendor, url)) + conn.commit() + os_id = os_id.lastrowid + else: + os_id = entry[0] + return os_id + + @staticmethod + def device_insert(conn, device_type, vendor, url): + '''Insert Statement for the Device Table''' + entry = conn.execute('SELECT id FROM device WHERE (type=? AND vendor=? AND url=?', (device_type, vendor, url)) + entry = entry.fetchone() + if entry is None: + device_id = conn.execute("insert into device (type, vendor, url) values (?, ?, ?)",(device_type, vendor, url)) + conn.commit() + device_id = device_id.lastrowid + else: + device_id = entry[0] + return device_id + + @staticmethod + def signature_insert(conn, acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id): + '''Insert Statement for the Signature Table''' + entry = conn.execute('SELECT id FROM signatures WHERE (acid=?)', (acid)) + entry = entry.fetchone() + if entry is None: + conn.execute("insert into signatures (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id)) + conn.commit() + return True From 8a8e102f57235f2e7558b4ad5a0a02815e8fa565 Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Fri, 28 Jan 2022 20:21:16 -0500 Subject: [PATCH 06/10] Add VScode and SQlite files to gitignore --- .gitignore | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index 894a44c..b1cadc6 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,9 @@ venv.bak/ # mypy .mypy_cache/ + +# Sqlite DB files +*.db + +# VScode Launch.json files +.vscode/ \ No newline at end of file From 99f3b03c3b10af2db648c93669fa24a78a624bab Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Sun, 30 Jan 2022 13:25:05 -0500 Subject: [PATCH 07/10] Create Database for passive fingerprinting. --- passer.py | 23 +++ passive_fingerprinting/__init__.py | 6 + passive_fingerprinting/passive_data.py | 193 +++++++++++++++++++ passive_fingerprinting/tests/__init__.py | 0 passive_fingerprinting/tests/test_passive.py | 45 +++++ tcp_extract.v0.2.5.py | 105 ++++++++++ 6 files changed, 372 insertions(+) create mode 100644 passive_fingerprinting/__init__.py create mode 100644 passive_fingerprinting/passive_data.py create mode 100644 passive_fingerprinting/tests/__init__.py create mode 100644 passive_fingerprinting/tests/test_passive.py create mode 100644 tcp_extract.v0.2.5.py diff --git a/passer.py b/passer.py index 014a12d..1b10a40 100755 --- a/passer.py +++ b/passer.py @@ -19,6 +19,11 @@ import codecs #For python2 utf-8 writing #from scapy.all import sniff, Scapy_Exception, PcapWriter from scapy.all import * #Please make sure you have an up-to-date version of scapy, at least 2.4.0 +# Imports for passive_fingerprinting feature. +from passive_fingerprinting.passive_data import passive_data +from passive_fingerprinting.passive_data import pull_data +from passive_fingerprinting.passive_data import tcp_sig + sys.path.insert(0, '.') #Allows us to load from the current directory (There was one claim that we need to create an empty file __init__.py , but this does not appear to be required.) @@ -1003,6 +1008,7 @@ def packet_stream_processor(name_param, pcap_interface, pcap_source_file, highpr #parser.add_argument('--debuglayers', required=False, default=False, action='store_true', help=argparse.SUPPRESS) #Debug scapy layers, hidden option parser.add_argument('-a', '--active', help='Perform active scanning to look up additional info', required=False, default=False, action='store_true') parser.add_argument('--forced_interface', help='Interface to which to write active scan packets (not needed on Linux)', required=False, default=None) + parser.add_argument('-p', '--passive-fingerprinting', help='Enable Passive Fingerprinting Capabilities.', required=False, default=False, action='store_true') (parsed, unparsed) = parser.parse_known_args() cl_args = vars(parsed) @@ -1035,6 +1041,23 @@ def packet_stream_processor(name_param, pcap_interface, pcap_source_file, highpr mkdir_p(cache_dir + '/ipv6/') mkdir_p(cache_dir + '/dom/') + # If Passive Finger Printing Capability is enabled. + if cl_args['passive_fingerprinting']: + passive_data.setup_db() + conn = passive_data.create_con() + if passive_data.test_github_con(): + tcp_sig_data = pull_data.import_data() + + # Iterate over JSON Objects + for i in tcp_sig_data['signature_list']: + try: + signature = tcp_sig(i) + author_id = passive_data.author_insert(conn, signature.author, signature.author_email, signature.author_github) + os_id = passive_data.os_insert(conn, signature.os_name, signature.os_version, signature.os_class, signature.os_vendor, signature.os_url) + device_id = passive_data.device_insert(conn, signature.device_type, signature.device_vendor, signature.device_url) + passive_data.signature_insert(conn, signature.sig_acid, signature.sig_tcp_flag, signature.signature['ver'], signature.signature['ittl'], signature.signature['olen'], signature.signature['mss'], signature.signature['wsize'], signature.signature['scale'], signature.signature['olayout'], signature.signature['quirks'], signature.signature['pclass'], signature.sig_comments, os_id, device_id, author_id) + except Exception as e: + print(e) mgr = Manager() #This section sets up a shared data dictionary; all items in it must be Manager()-based shared data structures diff --git a/passive_fingerprinting/__init__.py b/passive_fingerprinting/__init__.py new file mode 100644 index 0000000..2569fa0 --- /dev/null +++ b/passive_fingerprinting/__init__.py @@ -0,0 +1,6 @@ +from os.path import exists +import sqlite3 + +from .passive_data import passive_data +from .passive_data import pull_data +from .passive_data import tcp_sig \ No newline at end of file diff --git a/passive_fingerprinting/passive_data.py b/passive_fingerprinting/passive_data.py new file mode 100644 index 0000000..c9ba547 --- /dev/null +++ b/passive_fingerprinting/passive_data.py @@ -0,0 +1,193 @@ +from os.path import exists +import sqlite3 +import urllib.request + + + +class passive_data: + """ + A class filled with static methods that interacts with the sqlite database. + """ + + @staticmethod + def test_github_con(): + '''Tests Internet Connection to Github.com''' + test_result = urllib.request.urlopen("https://www.github.com").getcode() + if test_result == 200: + return True + else: + return False + + @staticmethod + def create_con(): + '''Create Database Connection''' + return sqlite3.connect('signature.db') + + + @staticmethod + def setup_db(): + '''Create Sqlite3 DB with all required tables''' + if exists('signature.db'): + pass + else: + with open('signature.db', 'x') as fp: + pass + conn = sqlite3.connect('signature.db') + # Create Signature Table + conn.execute('''CREATE TABLE "author" ( + "id" INTEGER NOT NULL UNIQUE, + "name" TEXT NOT NULL, + "email" TEXT, + "github" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + #Create Device Table + conn.execute('''CREATE TABLE "device" ( + "id" INTEGER NOT NULL UNIQUE, + "type" TEXT NOT NULL, + "vendor" TEXT, + "url" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + #Create OS Table + conn.execute('''CREATE TABLE "os" ( + "id" INTEGER NOT NULL, + "name" TEXT, + "version" TEXT, + "class" TEXT, + "vendor" TEXT, + "url" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + )''') + # Create Signatures Table + conn.execute('''CREATE TABLE "signatures" ( + "id" INTEGER NOT NULL UNIQUE, + "acid" INTEGER UNIQUE, + "tcp_flag" TEXT, + "ver" TEXT NOT NULL, + "ittl" INTEGER, + "olen" INTEGER, + "mss" TEXT, + "wsize" TEXT, + "scale" TEXT, + "olayout" TEXT, + "quirks" TEXT, + "pclass" TEXT, + "comments" TEXT, + "os_id" INTEGER, + "device_id" INTEGER, + "author_id" INTEGER, + FOREIGN KEY("os_id") REFERENCES "os"("id"), + FOREIGN KEY("author_id") REFERENCES "author"("id"), + FOREIGN KEY("device_id") REFERENCES "device"("id"), + PRIMARY KEY("id" AUTOINCREMENT) + );''') + conn.close() + return True + + @staticmethod + def author_insert(conn, name, email, github): + '''Insert Statement for the Author Table''' + entry = conn.execute('SELECT id FROM author WHERE (name=? AND email=?)', (name, email)) + entry = entry.fetchone() + if entry is None: + author_id = conn.execute("insert into author (name, email, github) values (?, ?, ?)", (name, email, github)) + conn.commit() + author_id = author_id.lastrowid + else: + author_id = entry[0] + return author_id + + @staticmethod + def os_insert(conn, name, version, os_class, vendor, url): + '''Insert Statement for the OS Table''' + entry = conn.execute('SELECT id FROM os WHERE (name=? AND version=? AND class=? AND vendor=?)', (name, version, os_class, vendor)) + entry = entry.fetchone() + if entry is None: + os_id = conn.execute("insert into os (name, version, class, vendor, url) values (?, ?, ?, ?, ?)", (name, version, os_class, vendor, url)) + conn.commit() + os_id = os_id.lastrowid + else: + os_id = entry[0] + return os_id + + @staticmethod + def device_insert(conn, device_type, vendor, url): + '''Insert Statement for the Device Table''' + entry = conn.execute('SELECT id FROM device WHERE (type=? AND vendor=? AND url=?)', (device_type, vendor, url)) + entry = entry.fetchone() + if entry is None: + device_id = conn.execute("insert into device (type, vendor, url) values (?, ?, ?)",(device_type, vendor, url)) + conn.commit() + device_id = device_id.lastrowid + else: + device_id = entry[0] + return device_id + + @staticmethod + def signature_insert(conn, acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id): + '''Insert Statement for the Signature Table''' + entry = conn.execute('SELECT id FROM signatures WHERE (acid=?)', ([acid])) + entry = entry.fetchone() + if entry is None: + conn.execute("insert into signatures (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id)) + conn.commit() + return True + + + +class pull_data: + """ + A class that contains a method that: + * Loads a json file from github into memory. + * Dumps the json into the sqlite database. + + The use of class methods is used so that class variables can be overrided for testing. + ... + + Class Variables + ---------- + url : str + URL of raw json file that contains TCP Signatures. + """ + + import json + import urllib.request + url = "https://raw.githubusercontent.com/activecm/tcp-sig-json/testing-data/tcp-sig.json" + + @classmethod + def import_data(cls): + """Imports TCP Signatures from raw JSON file hosted on Github.""" + with cls.urllib.request.urlopen(cls.url) as f: + data = cls.json.load(f) + return data + + + +class tcp_sig: + """ + Data mapping class that takes a TCP Signature object and inserts it into the sqlite database. + """ + + def __init__(self, tcp_sig_obj): + self.version = tcp_sig_obj['version']['name'] + self.rev = tcp_sig_obj['version']['rev'] + self.date = tcp_sig_obj['version']['date'] + self.os_name = tcp_sig_obj['os']['name'] + self.os_version = tcp_sig_obj['os']['version'] + self.os_class = tcp_sig_obj['os']['class'] + self.os_vendor = tcp_sig_obj['os']['vendor'] + self.os_url = tcp_sig_obj['os']['url'] + self.device_type = tcp_sig_obj['device']['type'] + self.device_vendor = tcp_sig_obj['device']['vendor'] + self.device_url = tcp_sig_obj['device']['url'] + self.sig_acid = tcp_sig_obj['signatures']['acid'] + self.sig_tcp_flag = tcp_sig_obj['signatures']['tcp_flag'] + self.sig_tcp_sig = tcp_sig_obj['signatures']['tcp_sig'] + self.sig_comments = tcp_sig_obj['signatures']['comments'] + self.author = tcp_sig_obj['author']['name'] + self.author_email = tcp_sig_obj['author']['email'] + self.author_github = tcp_sig_obj['author']['github'] + self.signature = dict(zip(['ver', 'ittl', 'olen', 'mss', 'wsize', 'scale', 'olayout', 'quirks', 'pclass'], self.sig_tcp_sig.split(':'))) + + diff --git a/passive_fingerprinting/tests/__init__.py b/passive_fingerprinting/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/passive_fingerprinting/tests/test_passive.py b/passive_fingerprinting/tests/test_passive.py new file mode 100644 index 0000000..aa5c117 --- /dev/null +++ b/passive_fingerprinting/tests/test_passive.py @@ -0,0 +1,45 @@ +import unittest +import pathlib as pl +import sqlite3 + +from passive_fingerprinting import passive_data + + +class passive_data_testcase(unittest.TestCase): + def test_setup_db_1(self): + '''UT to ensure DB file gets created.''' + passive_data.setup_db() + path = pl.Path('signature.db') + self.assertTrue(path.is_file()) + + def test_setup_db_2(self): + '''UT to ensure author table was created.''' + con = sqlite3.connect('signature.db') + cur = con.cursor() + list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='author'; """).fetchall() + self.assertTrue(('author',) in list_of_tables) + + def test_setup_db_3(self): + '''UT to ensure device table was created.''' + con = sqlite3.connect('signature.db') + cur = con.cursor() + list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='device'; """).fetchall() + self.assertTrue(('device',) in list_of_tables) + + def test_setup_db_4(self): + '''UT to ensure os table was created.''' + con = sqlite3.connect('signature.db') + cur = con.cursor() + list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='os'; """).fetchall() + self.assertTrue(('os',) in list_of_tables) + + def test_setup_db_5(self): + '''UT to ensure signatures table was created.''' + con = sqlite3.connect('signature.db') + cur = con.cursor() + list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='signatures'; """).fetchall() + self.assertTrue(('signatures',) in list_of_tables) + + def test_test_github_con_1(self): + '''UT to ensure Github connection test is successful.''' + self.assertTrue(passive_data.test_github_con()) \ No newline at end of file diff --git a/tcp_extract.v0.2.5.py b/tcp_extract.v0.2.5.py new file mode 100644 index 0000000..bf4cbe7 --- /dev/null +++ b/tcp_extract.v0.2.5.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +"""Listen for packets on an interface and do something with the TCP header fields on SYN packets.""" +#Copyright 2018-2021 William Stearns + + +__version__ = '0.2.5' + +__author__ = 'William Stearns' +__copyright__ = 'Copyright 2018-2021, William Stearns' +__credits__ = ['William Stearns'] +__email__ = 'william.l.stearns@gmail.com' +__license__ = 'GPL 3.0' +__maintainer__ = 'William Stearns' +__status__ = 'Prototype' #Prototype, Development or Production + + +import os +import sys +from scapy.all import sniff, Raw, Scapy_Exception, IP, IPv6, TCP # pylint: disable=no-name-in-module + + + +def debug_out(output_string): + """Send debuging output to stderr.""" + + if cl_args['devel']: + sys.stderr.write(output_string + '\n') + sys.stderr.flush() + + +def processpacket(p): + """Process a single packet; for this tool that means extracting the TCP fields and TCP payload and doing something with them.""" + + + if ((p.haslayer(IP) and p[IP].proto == 6) or (p.haslayer(IPv6) and p[IPv6].nh == 6)) and p.haslayer(TCP) and isinstance(p[TCP], TCP): # pylint: disable=too-many-boolean-expressions + if (p[TCP].flags & 0x17) == 0x02: #SYN (ACK, RST, and FIN off) + tcp_attributes = {} + tcp_attributes['sport'] = p[TCP].sport + tcp_attributes['dport'] = p[TCP].dport + tcp_attributes['seq'] = p[TCP].seq + tcp_attributes['ack'] = p[TCP].ack + tcp_attributes['dataofs'] = p[TCP].dataofs + tcp_attributes['reserved'] = p[TCP].reserved + tcp_attributes['flags'] = p[TCP].flags + tcp_attributes['window'] = p[TCP].window + tcp_attributes['chksum'] = p[TCP].chksum + tcp_attributes['urgptr'] = p[TCP].urgptr + tcp_attributes['options'] = p[TCP].options + + if p.getlayer(Raw): + Payload = p.getlayer(Raw).load #Note, binary value. Use force_string to make a string + else: + Payload = b"" + + #At this point we have the fields from the TCP header in tcp_attributes and the Payload - if any - in Payload. (Payload will normally be empty on a SYN, but this is not a rule (BSD's actually stuff early payload into syn packets so as soon as the handshake is done the server end has data to work with.) + #Where would you like to send it? + #p[TCP].show() + #p.show() + print(tcp_attributes) + if Payload: + print("Payload: " + str(Payload)) + #sys.exit(2) + + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='tcp_extract version ' + str(__version__)) + parser.add_argument('-i', '--interface', help='Interface from which to read packets', required=False, default=None) + parser.add_argument('-r', '--read', help='Pcap file(s) from which to read packets', required=False, default=[], nargs='*') + parser.add_argument('-d', '--devel', help='Enable development/debug statements', required=False, default=False, action='store_true') + parser.add_argument('-b', '--bpf', help='BPF to restrict which packets are processed', required=False, default='') + parser.add_argument('-c', '--count', help='Number of packets to sniff (if not specified, sniff forever/until end of pcap file)', type=int, required=False, default=None) + (parsed, unparsed) = parser.parse_known_args() + cl_args = vars(parsed) + + debug_out("BPF we'll use is: " + cl_args['bpf']) + + if cl_args['interface']: + try: + if cl_args['count']: + sniff(store=0, iface=cl_args['interface'], filter=cl_args['bpf'], count=cl_args['count'], prn=lambda x: processpacket(x)) # pylint: disable=unnecessary-lambda + else: + sniff(store=0, iface=cl_args['interface'], filter=cl_args['bpf'], prn=lambda x: processpacket(x)) # pylint: disable=unnecessary-lambda + except Scapy_Exception: + debug_out('Attempt to listen on an interface failed: are you running this as root or under sudo?') + sys.stderr.write('\n') + sys.stderr.flush() + elif cl_args['read']: + for one_pcap in cl_args['read']: + if os.path.exists(one_pcap): + if os.access(one_pcap, os.R_OK): + if cl_args['count']: + sniff(store=0, offline=one_pcap, filter=cl_args['bpf'], count=cl_args['count'], prn=lambda x: processpacket(x)) # pylint: disable=unnecessary-lambda + else: + sniff(store=0, offline=one_pcap, filter=cl_args['bpf'], prn=lambda x: processpacket(x)) # pylint: disable=unnecessary-lambda + else: + debug_out(str(one_pcap) + ' unreadable, skipping.') + else: + debug_out("Unable to read " + one_pcap + ", skipping.") + sys.stderr.write('\n') + sys.stderr.flush() + else: + debug_out("No interface or pcap file specified, exiting.") From e2604dba9b6d6990a8af91dc19394f6308b2e441 Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Tue, 8 Feb 2022 16:32:45 -0500 Subject: [PATCH 08/10] Added Quirks --- passive/__init__.py | 0 passive/passive_data.py | 123 ----------------- passive_fingerprinting/passive_data.py | 2 - passive_fingerprinting/signature_matching.py | 132 +++++++++++++++++++ test.py | 28 ++++ 5 files changed, 160 insertions(+), 125 deletions(-) delete mode 100644 passive/__init__.py delete mode 100644 passive/passive_data.py create mode 100644 passive_fingerprinting/signature_matching.py create mode 100644 test.py diff --git a/passive/__init__.py b/passive/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/passive/passive_data.py b/passive/passive_data.py deleted file mode 100644 index f0f5be3..0000000 --- a/passive/passive_data.py +++ /dev/null @@ -1,123 +0,0 @@ -from os.path import exists -import sqlite3 - - - -class passive_data: - - @staticmethod - def create_con(): - '''Create Database Connection''' - return sqlite3.connect('signature.db') - - - @staticmethod - def setup_db(): - '''Create Sqlite3 DB with all required tables''' - if exists('signature.db'): - pass - else: - with open('signature.db', 'x') as fp: - pass - conn = sqlite3.connect('signature.db') - # Create Signature Table - conn.execute('''CREATE TABLE "author" ( - "id" INTEGER NOT NULL UNIQUE, - "name" TEXT NOT NULL, - "email" TEXT, - "github" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - #Create Device Table - conn.execute('''CREATE TABLE "device" ( - "id" INTEGER NOT NULL UNIQUE, - "type" TEXT NOT NULL, - "vendor" TEXT, - "url" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - #Create OS Table - conn.execute('''CREATE TABLE "os" ( - "id" INTEGER NOT NULL, - "name" TEXT, - "version" TEXT, - "class" TEXT, - "vendor" TEXT, - "url" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - # Create Signatures Table - conn.execute('''CREATE TABLE "signatures" ( - "id" INTEGER NOT NULL UNIQUE, - "acid" INTEGER UNIQUE, - "tcp_flag" TEXT, - "ver" TEXT NOT NULL, - "ittl" INTEGER, - "olen" INTEGER, - "mss" TEXT, - "wsize" TEXT, - "scale" TEXT, - "olayout" TEXT, - "quirks" TEXT, - "pclass" TEXT, - "comments" TEXT, - "os_id" INTEGER, - "device_id" INTEGER, - "author_id" INTEGER, - FOREIGN KEY("os_id") REFERENCES "os"("id"), - FOREIGN KEY("author_id") REFERENCES "author"("id"), - FOREIGN KEY("device_id") REFERENCES "device"("id"), - PRIMARY KEY("id" AUTOINCREMENT) - );''') - conn.close() - return True - - @staticmethod - def author_insert(conn, name, email, github): - '''Insert Statement for the Author Table''' - entry = conn.execute('SELECT id FROM author WHERE (name=? AND email=?)', (name, email)) - entry = entry.fetchone() - if entry is None: - author_id = conn.execute("insert into author (name, email, github) values (?, ?, ?)", (name, email, github)) - conn.commit() - author_id = author_id.lastrowid - else: - author_id = entry[0] - return author_id - - @staticmethod - def os_insert(conn, name, version, os_class, vendor, url): - '''Insert Statement for the OS Table''' - entry = conn.execute('SELECT id FROM os WHERE (name=? AND version=? AND class=? AND vendor=?', (name, version, os_class, vendor)) - entry = entry.fetchone() - if entry is None: - os_id = conn.execute("insert into os (name, version, class, vendor, url) values (?, ?, ?, ?, ?)", (name, version, os_class, vendor, url)) - conn.commit() - os_id = os_id.lastrowid - else: - os_id = entry[0] - return os_id - - @staticmethod - def device_insert(conn, device_type, vendor, url): - '''Insert Statement for the Device Table''' - entry = conn.execute('SELECT id FROM device WHERE (type=? AND vendor=? AND url=?', (device_type, vendor, url)) - entry = entry.fetchone() - if entry is None: - device_id = conn.execute("insert into device (type, vendor, url) values (?, ?, ?)",(device_type, vendor, url)) - conn.commit() - device_id = device_id.lastrowid - else: - device_id = entry[0] - return device_id - - @staticmethod - def signature_insert(conn, acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id): - '''Insert Statement for the Signature Table''' - entry = conn.execute('SELECT id FROM signatures WHERE (acid=?)', (acid)) - entry = entry.fetchone() - if entry is None: - conn.execute("insert into signatures (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id)) - conn.commit() - return True diff --git a/passive_fingerprinting/passive_data.py b/passive_fingerprinting/passive_data.py index c9ba547..5dba24b 100644 --- a/passive_fingerprinting/passive_data.py +++ b/passive_fingerprinting/passive_data.py @@ -189,5 +189,3 @@ def __init__(self, tcp_sig_obj): self.author_email = tcp_sig_obj['author']['email'] self.author_github = tcp_sig_obj['author']['github'] self.signature = dict(zip(['ver', 'ittl', 'olen', 'mss', 'wsize', 'scale', 'olayout', 'quirks', 'pclass'], self.sig_tcp_sig.split(':'))) - - diff --git a/passive_fingerprinting/signature_matching.py b/passive_fingerprinting/signature_matching.py new file mode 100644 index 0000000..fca45c7 --- /dev/null +++ b/passive_fingerprinting/signature_matching.py @@ -0,0 +1,132 @@ +class quirk: + """ + Creates quirks - comma-delimited properties and quirks observed in IP or TCP headers. + If a signature scoped to both IPv4 and IPv6 contains quirks valid + for just one of these protocols, such quirks will be ignored for + on packets using the other protocol. For example, any combination + of 'df', 'id+', and 'id-' is always matched by any IPv6 packet. + """ + + def __init__(self, p): + self.p = p + self.df = self.set_df() + self.id_plus = self.set_id_plus() + self.id_minus = self.set_id_minus() + self.ecn = self.set_ecn() + self.zero_plus = self.set_zero_plus() + self.flow = self.set_flow() + self.seq_minus = self.set_seq_minus() + self.ack_plus = self.set_ack_plus() + self.ack_minus = self.set_ack_minus() + self.urtr_plus = self.set_uptr_plus() + self.urgf_plus = self.set_urgf_plus() + self.ts1_minus = self.set_ts1_minus() + self.ts2_plus = self.set_ts2_plus() + self.opt_plus = self.set_opt_plus() + self.exws = self.set_exws() + self.bad = self.set_bad() + + def set_df(self): + '''Sets df attribute based on flag - "don't fragment" set (probably PMTUD); ignored for IPv6.''' + df = False + if 'DF' in self.p['IP'].flags: + df = True + return df + + def set_id_plus(self): + '''Sets id+ attribute based on flag and IPID - DF set but IPID non-zero; ignored for IPv6.''' + id_plus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id != 0: + id_plus = True + return id_plus + + def set_id_minus(self): + '''Sets id- attribute based on flag and IPID - DF not set but IPID is zero; ignored for IPv6.''' + id_minus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id == 0: + id_minus = True + return id_minus + + def set_ecn(self): + '''Sets ecn attribute - explicit congestion notification support.''' + ecn = False + if 'E' in self.p['TCP'].flag: + ecn = True + return ecn + + def set_zero_plus(self): + '''Sets 0+ Attribute - "must be zero" field not zero; ignored for IPv6.''' + zero_plus = False + if self.p.reserved != 0: + zero_plus = True + return False + + def set_flow(self): + '''Sets flow Attribute - non-zero IPv6 flow ID; ignored for IPv4.''' + #TODO IPv6 support + return False + + def set_seq_minus(self): + '''Sets seq- attribute - sequence number is zero.''' + seq_minus = False + if self.p['TCP'].seq == 0: + seq_minus = True + return seq_minus + + def set_ack_plus(self): + '''Sets ack+ - ACK number is non-zero, but ACK flag not set.''' + ack_plus = False + if self.p['TCP'].ack != 0: + ack_plus = True + return ack_plus + + def set_ack_minus(self): + '''Sets ack- - ACK number is zero, but ACK flag set.''' + ack_minus = False + if self.p['TCP'].ack == 0: + ack_minus = True + return ack_minus + + def set_uptr_plus(self): + '''Sets uptr+ attribute - URG pointer is non-zero, but URG flag not set.''' + uptr_plus = False + return uptr_plus + + def set_urgf_plus(self): + '''Sets urgf+ attribute - URG flag used.''' + urgf_plus = False + if 'URG' in self.p['IP'].flags: + urgf_plus = True + return urgf_plus + + def set_pushf_plus(self): + '''Sets pushf+ attribute - PUSH flag used.''' + if 'PUSH' in self.p['IP'].flags: + pushf_plus = True + return pushf_plus + + def set_ts1_minus(self): + '''Sets ts1- attribute - own timestamp specified as zero.''' + ts1_minus = False + return ts1_minus + + def set_ts2_plus(self): + '''Sets ts2+ attribute - non-zero peer timestamp on initial SYN.''' + ts2_plus = False + return ts2_plus + + def set_opt_plus(self): + '''Sets opt+ attribute - trailing non-zero data in options segment.''' + opt_plus = False + return opt_plus + + def set_exws(self): + '''Sets exws attribute - excessive window scaling factor (> 14).''' + exws = False + return exws + + def set_bad(self): + '''Sets bad attribute - malformed TCP options.''' + bad = False + return bad + diff --git a/test.py b/test.py new file mode 100644 index 0000000..d9a240d --- /dev/null +++ b/test.py @@ -0,0 +1,28 @@ +from passive_fingerprinting.passive_data import passive_data +from passive_fingerprinting.passive_data import pull_data +from passive_fingerprinting.passive_data import tcp_sig + +# Create Sqlite DB +passive_data.setup_db() + +# Create DB Connection +conn = passive_data.create_con() + +# Pull data from Github Ram JSON if Github is resolvable. +if passive_data.test_github_con(): + tcp_sig_data = pull_data.import_data() + + # Iterate over JSON Objects + for i in tcp_sig_data['signature_list']: + try: + signature = tcp_sig(i) + author_id = passive_data.author_insert(conn, signature.author, signature.author_email, signature.author_github) + print(author_id) + os_id = passive_data.os_insert(conn, signature.os_name, signature.os_version, signature.os_class, signature.os_vendor, signature.os_url) + print(os_id) + device_id = passive_data.device_insert(conn, signature.device_type, signature.device_vendor, signature.device_url) + print(device_id) + passive_data.signature_insert(conn, signature.sig_acid, signature.sig_tcp_flag, signature.signature['ver'], signature.signature['ittl'], signature.signature['olen'], signature.signature['mss'], signature.signature['wsize'], signature.signature['scale'], signature.signature['olayout'], signature.signature['quirks'], signature.signature['pclass'], signature.sig_comments, os_id, device_id, author_id) + except Exception as e: + print(e) + From fcd3d253b137ff0102239cc440f6ea0af8fa46b8 Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Tue, 1 Mar 2022 19:53:24 -0500 Subject: [PATCH 09/10] Check IN --- passer.py | 6 +- passive_fingerprinting/passive_data.py | 191 -------- passive_fingerprinting/signature_matching.py | 132 ------ passive_fingerprinting/tests/test_passive.py | 45 -- .../__init__.py | 5 +- smudge/passive_data.py | 125 +++++ smudge/signature_matching.py | 440 ++++++++++++++++++ .../tests/__init__.py | 0 smudge/tests/test.pcap | Bin 0 -> 7896 bytes smudge/tests/test_passive.py | 24 + smudge/tests/test_signature_matching.py | 12 + tcp_extract.v0.2.5.py | 16 +- test.py | 53 ++- 13 files changed, 664 insertions(+), 385 deletions(-) delete mode 100644 passive_fingerprinting/passive_data.py delete mode 100644 passive_fingerprinting/signature_matching.py delete mode 100644 passive_fingerprinting/tests/test_passive.py rename {passive_fingerprinting => smudge}/__init__.py (50%) create mode 100644 smudge/passive_data.py create mode 100644 smudge/signature_matching.py rename {passive_fingerprinting => smudge}/tests/__init__.py (100%) create mode 100644 smudge/tests/test.pcap create mode 100644 smudge/tests/test_passive.py create mode 100644 smudge/tests/test_signature_matching.py diff --git a/passer.py b/passer.py index 1b10a40..0dde05b 100755 --- a/passer.py +++ b/passer.py @@ -20,9 +20,9 @@ #from scapy.all import sniff, Scapy_Exception, PcapWriter from scapy.all import * #Please make sure you have an up-to-date version of scapy, at least 2.4.0 # Imports for passive_fingerprinting feature. -from passive_fingerprinting.passive_data import passive_data -from passive_fingerprinting.passive_data import pull_data -from passive_fingerprinting.passive_data import tcp_sig +from smudge.passive_data import passive_data +from smudge.passive_data import pull_data +from smudge.passive_data import tcp_sig sys.path.insert(0, '.') #Allows us to load from the current directory (There was one claim that we need to create an empty file __init__.py , but this does not appear to be required.) diff --git a/passive_fingerprinting/passive_data.py b/passive_fingerprinting/passive_data.py deleted file mode 100644 index 5dba24b..0000000 --- a/passive_fingerprinting/passive_data.py +++ /dev/null @@ -1,191 +0,0 @@ -from os.path import exists -import sqlite3 -import urllib.request - - - -class passive_data: - """ - A class filled with static methods that interacts with the sqlite database. - """ - - @staticmethod - def test_github_con(): - '''Tests Internet Connection to Github.com''' - test_result = urllib.request.urlopen("https://www.github.com").getcode() - if test_result == 200: - return True - else: - return False - - @staticmethod - def create_con(): - '''Create Database Connection''' - return sqlite3.connect('signature.db') - - - @staticmethod - def setup_db(): - '''Create Sqlite3 DB with all required tables''' - if exists('signature.db'): - pass - else: - with open('signature.db', 'x') as fp: - pass - conn = sqlite3.connect('signature.db') - # Create Signature Table - conn.execute('''CREATE TABLE "author" ( - "id" INTEGER NOT NULL UNIQUE, - "name" TEXT NOT NULL, - "email" TEXT, - "github" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - #Create Device Table - conn.execute('''CREATE TABLE "device" ( - "id" INTEGER NOT NULL UNIQUE, - "type" TEXT NOT NULL, - "vendor" TEXT, - "url" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - #Create OS Table - conn.execute('''CREATE TABLE "os" ( - "id" INTEGER NOT NULL, - "name" TEXT, - "version" TEXT, - "class" TEXT, - "vendor" TEXT, - "url" TEXT, - PRIMARY KEY("id" AUTOINCREMENT) - )''') - # Create Signatures Table - conn.execute('''CREATE TABLE "signatures" ( - "id" INTEGER NOT NULL UNIQUE, - "acid" INTEGER UNIQUE, - "tcp_flag" TEXT, - "ver" TEXT NOT NULL, - "ittl" INTEGER, - "olen" INTEGER, - "mss" TEXT, - "wsize" TEXT, - "scale" TEXT, - "olayout" TEXT, - "quirks" TEXT, - "pclass" TEXT, - "comments" TEXT, - "os_id" INTEGER, - "device_id" INTEGER, - "author_id" INTEGER, - FOREIGN KEY("os_id") REFERENCES "os"("id"), - FOREIGN KEY("author_id") REFERENCES "author"("id"), - FOREIGN KEY("device_id") REFERENCES "device"("id"), - PRIMARY KEY("id" AUTOINCREMENT) - );''') - conn.close() - return True - - @staticmethod - def author_insert(conn, name, email, github): - '''Insert Statement for the Author Table''' - entry = conn.execute('SELECT id FROM author WHERE (name=? AND email=?)', (name, email)) - entry = entry.fetchone() - if entry is None: - author_id = conn.execute("insert into author (name, email, github) values (?, ?, ?)", (name, email, github)) - conn.commit() - author_id = author_id.lastrowid - else: - author_id = entry[0] - return author_id - - @staticmethod - def os_insert(conn, name, version, os_class, vendor, url): - '''Insert Statement for the OS Table''' - entry = conn.execute('SELECT id FROM os WHERE (name=? AND version=? AND class=? AND vendor=?)', (name, version, os_class, vendor)) - entry = entry.fetchone() - if entry is None: - os_id = conn.execute("insert into os (name, version, class, vendor, url) values (?, ?, ?, ?, ?)", (name, version, os_class, vendor, url)) - conn.commit() - os_id = os_id.lastrowid - else: - os_id = entry[0] - return os_id - - @staticmethod - def device_insert(conn, device_type, vendor, url): - '''Insert Statement for the Device Table''' - entry = conn.execute('SELECT id FROM device WHERE (type=? AND vendor=? AND url=?)', (device_type, vendor, url)) - entry = entry.fetchone() - if entry is None: - device_id = conn.execute("insert into device (type, vendor, url) values (?, ?, ?)",(device_type, vendor, url)) - conn.commit() - device_id = device_id.lastrowid - else: - device_id = entry[0] - return device_id - - @staticmethod - def signature_insert(conn, acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id): - '''Insert Statement for the Signature Table''' - entry = conn.execute('SELECT id FROM signatures WHERE (acid=?)', ([acid])) - entry = entry.fetchone() - if entry is None: - conn.execute("insert into signatures (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (acid, tcp_flag, ver, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments, os_id, device_id, author_id)) - conn.commit() - return True - - - -class pull_data: - """ - A class that contains a method that: - * Loads a json file from github into memory. - * Dumps the json into the sqlite database. - - The use of class methods is used so that class variables can be overrided for testing. - ... - - Class Variables - ---------- - url : str - URL of raw json file that contains TCP Signatures. - """ - - import json - import urllib.request - url = "https://raw.githubusercontent.com/activecm/tcp-sig-json/testing-data/tcp-sig.json" - - @classmethod - def import_data(cls): - """Imports TCP Signatures from raw JSON file hosted on Github.""" - with cls.urllib.request.urlopen(cls.url) as f: - data = cls.json.load(f) - return data - - - -class tcp_sig: - """ - Data mapping class that takes a TCP Signature object and inserts it into the sqlite database. - """ - - def __init__(self, tcp_sig_obj): - self.version = tcp_sig_obj['version']['name'] - self.rev = tcp_sig_obj['version']['rev'] - self.date = tcp_sig_obj['version']['date'] - self.os_name = tcp_sig_obj['os']['name'] - self.os_version = tcp_sig_obj['os']['version'] - self.os_class = tcp_sig_obj['os']['class'] - self.os_vendor = tcp_sig_obj['os']['vendor'] - self.os_url = tcp_sig_obj['os']['url'] - self.device_type = tcp_sig_obj['device']['type'] - self.device_vendor = tcp_sig_obj['device']['vendor'] - self.device_url = tcp_sig_obj['device']['url'] - self.sig_acid = tcp_sig_obj['signatures']['acid'] - self.sig_tcp_flag = tcp_sig_obj['signatures']['tcp_flag'] - self.sig_tcp_sig = tcp_sig_obj['signatures']['tcp_sig'] - self.sig_comments = tcp_sig_obj['signatures']['comments'] - self.author = tcp_sig_obj['author']['name'] - self.author_email = tcp_sig_obj['author']['email'] - self.author_github = tcp_sig_obj['author']['github'] - self.signature = dict(zip(['ver', 'ittl', 'olen', 'mss', 'wsize', 'scale', 'olayout', 'quirks', 'pclass'], self.sig_tcp_sig.split(':'))) diff --git a/passive_fingerprinting/signature_matching.py b/passive_fingerprinting/signature_matching.py deleted file mode 100644 index fca45c7..0000000 --- a/passive_fingerprinting/signature_matching.py +++ /dev/null @@ -1,132 +0,0 @@ -class quirk: - """ - Creates quirks - comma-delimited properties and quirks observed in IP or TCP headers. - If a signature scoped to both IPv4 and IPv6 contains quirks valid - for just one of these protocols, such quirks will be ignored for - on packets using the other protocol. For example, any combination - of 'df', 'id+', and 'id-' is always matched by any IPv6 packet. - """ - - def __init__(self, p): - self.p = p - self.df = self.set_df() - self.id_plus = self.set_id_plus() - self.id_minus = self.set_id_minus() - self.ecn = self.set_ecn() - self.zero_plus = self.set_zero_plus() - self.flow = self.set_flow() - self.seq_minus = self.set_seq_minus() - self.ack_plus = self.set_ack_plus() - self.ack_minus = self.set_ack_minus() - self.urtr_plus = self.set_uptr_plus() - self.urgf_plus = self.set_urgf_plus() - self.ts1_minus = self.set_ts1_minus() - self.ts2_plus = self.set_ts2_plus() - self.opt_plus = self.set_opt_plus() - self.exws = self.set_exws() - self.bad = self.set_bad() - - def set_df(self): - '''Sets df attribute based on flag - "don't fragment" set (probably PMTUD); ignored for IPv6.''' - df = False - if 'DF' in self.p['IP'].flags: - df = True - return df - - def set_id_plus(self): - '''Sets id+ attribute based on flag and IPID - DF set but IPID non-zero; ignored for IPv6.''' - id_plus = False - if self.p['IP'].flags =='DF' and self.p['IP'].id != 0: - id_plus = True - return id_plus - - def set_id_minus(self): - '''Sets id- attribute based on flag and IPID - DF not set but IPID is zero; ignored for IPv6.''' - id_minus = False - if self.p['IP'].flags =='DF' and self.p['IP'].id == 0: - id_minus = True - return id_minus - - def set_ecn(self): - '''Sets ecn attribute - explicit congestion notification support.''' - ecn = False - if 'E' in self.p['TCP'].flag: - ecn = True - return ecn - - def set_zero_plus(self): - '''Sets 0+ Attribute - "must be zero" field not zero; ignored for IPv6.''' - zero_plus = False - if self.p.reserved != 0: - zero_plus = True - return False - - def set_flow(self): - '''Sets flow Attribute - non-zero IPv6 flow ID; ignored for IPv4.''' - #TODO IPv6 support - return False - - def set_seq_minus(self): - '''Sets seq- attribute - sequence number is zero.''' - seq_minus = False - if self.p['TCP'].seq == 0: - seq_minus = True - return seq_minus - - def set_ack_plus(self): - '''Sets ack+ - ACK number is non-zero, but ACK flag not set.''' - ack_plus = False - if self.p['TCP'].ack != 0: - ack_plus = True - return ack_plus - - def set_ack_minus(self): - '''Sets ack- - ACK number is zero, but ACK flag set.''' - ack_minus = False - if self.p['TCP'].ack == 0: - ack_minus = True - return ack_minus - - def set_uptr_plus(self): - '''Sets uptr+ attribute - URG pointer is non-zero, but URG flag not set.''' - uptr_plus = False - return uptr_plus - - def set_urgf_plus(self): - '''Sets urgf+ attribute - URG flag used.''' - urgf_plus = False - if 'URG' in self.p['IP'].flags: - urgf_plus = True - return urgf_plus - - def set_pushf_plus(self): - '''Sets pushf+ attribute - PUSH flag used.''' - if 'PUSH' in self.p['IP'].flags: - pushf_plus = True - return pushf_plus - - def set_ts1_minus(self): - '''Sets ts1- attribute - own timestamp specified as zero.''' - ts1_minus = False - return ts1_minus - - def set_ts2_plus(self): - '''Sets ts2+ attribute - non-zero peer timestamp on initial SYN.''' - ts2_plus = False - return ts2_plus - - def set_opt_plus(self): - '''Sets opt+ attribute - trailing non-zero data in options segment.''' - opt_plus = False - return opt_plus - - def set_exws(self): - '''Sets exws attribute - excessive window scaling factor (> 14).''' - exws = False - return exws - - def set_bad(self): - '''Sets bad attribute - malformed TCP options.''' - bad = False - return bad - diff --git a/passive_fingerprinting/tests/test_passive.py b/passive_fingerprinting/tests/test_passive.py deleted file mode 100644 index aa5c117..0000000 --- a/passive_fingerprinting/tests/test_passive.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest -import pathlib as pl -import sqlite3 - -from passive_fingerprinting import passive_data - - -class passive_data_testcase(unittest.TestCase): - def test_setup_db_1(self): - '''UT to ensure DB file gets created.''' - passive_data.setup_db() - path = pl.Path('signature.db') - self.assertTrue(path.is_file()) - - def test_setup_db_2(self): - '''UT to ensure author table was created.''' - con = sqlite3.connect('signature.db') - cur = con.cursor() - list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='author'; """).fetchall() - self.assertTrue(('author',) in list_of_tables) - - def test_setup_db_3(self): - '''UT to ensure device table was created.''' - con = sqlite3.connect('signature.db') - cur = con.cursor() - list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='device'; """).fetchall() - self.assertTrue(('device',) in list_of_tables) - - def test_setup_db_4(self): - '''UT to ensure os table was created.''' - con = sqlite3.connect('signature.db') - cur = con.cursor() - list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='os'; """).fetchall() - self.assertTrue(('os',) in list_of_tables) - - def test_setup_db_5(self): - '''UT to ensure signatures table was created.''' - con = sqlite3.connect('signature.db') - cur = con.cursor() - list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='signatures'; """).fetchall() - self.assertTrue(('signatures',) in list_of_tables) - - def test_test_github_con_1(self): - '''UT to ensure Github connection test is successful.''' - self.assertTrue(passive_data.test_github_con()) \ No newline at end of file diff --git a/passive_fingerprinting/__init__.py b/smudge/__init__.py similarity index 50% rename from passive_fingerprinting/__init__.py rename to smudge/__init__.py index 2569fa0..c9c0656 100644 --- a/passive_fingerprinting/__init__.py +++ b/smudge/__init__.py @@ -3,4 +3,7 @@ from .passive_data import passive_data from .passive_data import pull_data -from .passive_data import tcp_sig \ No newline at end of file +from .passive_data import tcp_sig + +from .signature_matching import quirk +from .signature_matching import signature \ No newline at end of file diff --git a/smudge/passive_data.py b/smudge/passive_data.py new file mode 100644 index 0000000..92cf2c9 --- /dev/null +++ b/smudge/passive_data.py @@ -0,0 +1,125 @@ +from os.path import exists +import sqlite3 +import urllib.request + + + +class passive_data: + """ + A class filled with static methods that interacts with the sqlite database. + """ + + @staticmethod + def test_github_con(): + '''Tests Internet Connection to Github.com''' + test_result = urllib.request.urlopen("https://www.github.com").getcode() + if test_result == 200: + return True + else: + return False + + @staticmethod + def create_con(): + '''Create Database Connection''' + return sqlite3.connect('signature.db') + + + @staticmethod + def setup_db(): + '''Create Sqlite3 DB with all required tables''' + if exists('signature.db'): + pass + else: + with open('signature.db', 'x') as fp: + pass + conn = sqlite3.connect('signature.db') + # Create Signatures Table + conn.execute('''CREATE TABLE "signatures" ( + "id" INTEGER NOT NULL UNIQUE, + "acid" INTEGER UNIQUE, + "platform" TEXT, + "tcp_flag" TEXT, + "version" TEXT NOT NULL, + "ittl" TEXT, + "olen" TEXT, + "mss" TEXT, + "wsize" TEXT, + "scale" TEXT, + "olayout" TEXT, + "quirks" TEXT, + "pclass" TEXT, + "comments" TEXT, + PRIMARY KEY("id" AUTOINCREMENT) + );''') + conn.close() + return True + + @staticmethod + def signature_insert(conn, sig_obj): + '''Insert Statement for the Signature Table.''' + entry = conn.execute('SELECT id FROM signatures WHERE (acid=?)', (sig_obj.sig_acid,)) + entry = entry.fetchone() + if entry is None: + conn.execute("insert into signatures (acid, platform, tcp_flag, version, ittl, olen, mss, wsize, scale, olayout, quirks, pclass, comments) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (sig_obj.sig_acid, sig_obj.platform, sig_obj.sig_tcp_flag, sig_obj.version, sig_obj.ittl, sig_obj.olen, sig_obj.mss, sig_obj.wsize, sig_obj.scale, sig_obj.olayout, sig_obj.quirks, sig_obj.pclass, sig_obj.sig_comments)) + conn.commit() + return True + + + +class pull_data: + """ + A class that contains a method that: + * Loads a json file from github into memory. + * Dumps the json into the sqlite database. + + The use of class methods is used so that class variables can be overrided for testing. + ... + + Class Variables + ---------- + url : str + URL of raw json file that contains TCP Signatures. + """ + + import json + import urllib.request + url = "https://raw.githubusercontent.com/activecm/tcp-sig-json/testing-data/tcp-sig.json" + + @classmethod + def import_data(cls): + """Imports TCP Signatures from raw JSON file hosted on Github.""" + with cls.urllib.request.urlopen(cls.url) as f: + data = cls.json.load(f) + return data + + + +class tcp_sig: + """ + Data mapping class that takes a TCP Signature object and inserts it into the sqlite database. + """ + + def __init__(self, tcp_sig_obj): + self.sig_acid = tcp_sig_obj['acid'] + self.platform = tcp_sig_obj['platform'] + self.sig_tcp_flag = tcp_sig_obj['tcp_flag'] + self.sig_comments = tcp_sig_obj['comments'] + self.signature = dict(zip(['version', 'ittl', 'olen', 'mss', 'wsize', 'scale', 'olayout', 'quirks', 'pclass'], tcp_sig_obj['tcp_sig'].split(':'))) + self.version = self.signature['version'] + self.ittl = self.signature['ittl'] + self.olen = self.signature['olen'] + self.mss = self.signature['mss'] + self.wsize = self.signature['wsize'] + self.scale = self.signature['scale'] + self.olayout = self.signature['olayout'] + self.quirks = self.signature['quirks'] + self.pclass = self.signature['pclass'] + + @property + def qstring(self): + qstring = "{ver}:{ittl}:{olen}:{mss}:{wsize}:{scale}:{olayout}:{quirk}:{pclass}".format(ver=self.version, ittl=self.ittl, olen=self.olen, mss=self.mss, wsize=self.wsize, scale=self.scale, olayout=self.olayout, quirk=self.quirks, pclass=self.pclass) + return qstring + + def __str__(self): + return self.qstring + diff --git a/smudge/signature_matching.py b/smudge/signature_matching.py new file mode 100644 index 0000000..7fa90aa --- /dev/null +++ b/smudge/signature_matching.py @@ -0,0 +1,440 @@ +import sqlite3 + +class quirk: + """ + Creates quirks - comma-delimited properties and quirks observed in IP or TCP headers. + If a signature scoped to both IPv4 and IPv6 contains quirks valid + for just one of these protocols, such quirks will be ignored for + on packets using the other protocol. For example, any combination + of 'df', 'id+', and 'id-' is always matched by any IPv6 packet. + """ + + def __init__(self, p): + self.p = p + + def __str__(self): + return self.qstring + + @property + def df(self): + '''Sets df attribute based on flag - "don't fragment" set (probably PMTUD); ignored for IPv6.''' + df = False + if 'DF' in self.p['IP'].flags: + df = 'df' + return df + + @property + def id_plus(self): + '''Sets id+ attribute based on flag and IPID - DF set but IPID non-zero; ignored for IPv6.''' + id_plus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id != 0: + id_plus = 'id+' + return id_plus + + @property + def id_minus(self): + '''Sets id- attribute based on flag and IPID - DF not set but IPID is zero; ignored for IPv6.''' + id_minus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id == 0: + id_minus = 'id-' + return id_minus + + @property + def ecn(self): + '''Sets ecn attribute - explicit congestion notification support.''' + ecn = False + if 'E' in self.p['TCP'].flags: + ecn = 'ecn' + return ecn + + @property + def zero_plus(self): + '''Sets 0+ Attribute - "must be zero" field not zero; ignored for IPv6.''' + zero_plus = False + if self.p.reserved != 0: + zero_plus = '0+' + return False + + @property + def flow(self): + '''Sets flow Attribute - non-zero IPv6 flow ID; ignored for IPv4.''' + #TODO IPv6 support + return False + + @property + def seq_minus(self): + '''Sets seq- attribute - sequence number is zero.''' + seq_minus = False + if self.p['TCP'].seq == 0: + seq_minus = 'seq-' + return seq_minus + + @property + def ack_plus(self): + '''Sets ack+ - ACK number is non-zero, but ACK flag not set.''' + ack_plus = False + if self.p['TCP'].ack != 0: + ack_plus = 'ack+' + return ack_plus + + @property + def ack_minus(self): + '''Sets ack- - ACK number is zero, but ACK flag set.''' + ack_minus = False + if self.p['TCP'].ack == 0: + ack_minus = 'ack-' + return ack_minus + + @property + def uptr_plus(self): + '''Sets uptr+ attribute - URG pointer is non-zero, but URG flag not set.''' + uptr_plus = 'uptr+' + return uptr_plus + + @property + def urgf_plus(self): + '''Sets urgf+ attribute - URG flag used.''' + urgf_plus = False + if 'URG' in self.p['IP'].flags: + urgf_plus = 'urgf+' + return urgf_plus + + @property + def pushf_plus(self): + '''Sets pushf+ attribute - PUSH flag used.''' + pushf_plus = False + if 'PUSH' in self.p['IP'].flags: + pushf_plus = 'pushf+' + return pushf_plus + + @property + def ts1_minus(self): + '''Sets ts1- attribute - own timestamp specified as zero.''' + ts1_minus = False + return ts1_minus + + @property + def ts2_plus(self): + '''Sets ts2+ attribute - non-zero peer timestamp on initial SYN.''' + ts2_plus = False + return ts2_plus + + @property + def opt_plus(self): + '''Sets opt+ attribute - trailing non-zero data in options segment.''' + opt_plus = False + return opt_plus + + @property + def exws(self): + '''Sets exws attribute - excessive window scaling factor (> 14).''' + exws = False + return exws + + @property + def bad(self): + '''Sets bad attribute - malformed TCP options.''' + bad = False + return bad + + @property + def qstring(self): + '''Looks at all attributes and makes quirks.''' + quirks = [] + if self.df: quirks.append(self.df) + if self.id_plus: quirks.append(self.id_plus) + if self.id_minus: quirks.append(self.id_minus) + if self.ecn: quirks.append(self.ecn) + if self.zero_plus: quirks.append(self.zero_plus) + if self.flow: quirks.append(self.flow) + if self.seq_minus: quirks.append(self.seq_minus) + if self.ack_plus: quirks.append(self.ack_plus) + if self.ack_minus: quirks.append(self.ack_minus) + if self.uptr_plus: quirks.append(self.uptr_plus) + if self.urgf_plus: quirks.append(self.urgf_plus) + if self.pushf_plus: quirks.append(self.pushf_plus) + if self.ts1_minus: quirks.append(self.ts1_minus) + if self.ts2_plus: quirks.append(self.ts2_plus) + if self.opt_plus: quirks.append(self.opt_plus) + if self.exws: quirks.append(self.exws) + if self.bad: quirks.append(self.bad) + quirks = ",".join(quirks) + return quirks + + +class signature: + """ + Data mapping class that takes a TCP Signature object and inserts it into the sqlite database. + """ + def __init__(self, p): + self.p = p + + def process_options(option): + if option[0] == 'MSS' and (option[1] == 0 or option[1] == ''): + return 'M*' + elif option[0] == 'MSS' and option[1] > 1: + return 'M' + str(option[1]) + elif option[0] == 'NOP': + return 'N' + elif option[0] == 'WScale': + return 'W' + str(option[1]) + elif option[0] == 'SAckOK': + return 'S' + elif option[0] == 'EOL': + return 'E' + else: + return '?' + str(option[1]) + + @property + def version(self): + '''Signature for IPv4 ('4'), IPv6 ('6'), or both ('*').''' + version = self.p.version + return str(version) + + @property + def ittl(self): + ''' + Initial TTL used by the OS. Almost all operating systems use + 64, 128, or 255; ancient versions of Windows sometimes used + 32, and several obscure systems sometimes resort to odd values + such as 60. + + NEW SIGNATURES: P0f will usually suggest something, using the + format of 'observed_ttl+distance' (e.g. 54+10). Consider using + traceroute to check that the distance is accurate, then sum up + the values. If initial TTL can't be guessed, p0f will output + 'nnn+?', and you need to use traceroute to estimate the '?'. + + A handful of userspace tools will generate random TTLs. In these + cases, determine maximum initial TTL and then add a - suffix to + the value to avoid confusion. + ''' + ittl = self.p['IP'].ttl + return ittl + + @property + def olen(self): + ''' + Length of IPv4 options or IPv6 extension headers. Usually zero + for normal IPv4 traffic; always zero for IPv6 due to the + limitations of libpcap. + ''' + olen = len(self.p['IP'].options) + return str(olen) + + @property + def mss(self): + mss = dict(self.p['TCP'].options) + try: + return str(mss['MSS']) + except: + return '*' + + @property + def window_size(self): + ''' + Window size. Can be expressed as a fixed value, but many + operating systems set it to a multiple of MSS or MTU, or a + multiple of some random integer. P0f automatically detects these + cases, and allows notation such as 'mss*4', 'mtu*4', or '%8192' + to be used. Wilcard ('*') is possible too. + ''' + window_size = self.p['TCP'].window + if (self.p['TCP'].window / int(self.mss)).is_integer(): + window_size = "mss*" + str(int(self.p['TCP'].window / int(self.mss))) + return str(window_size) + + @property + def scale(self): + ''' + Window scaling factor, if specified in TCP options. Fixed value + or '*'. + NEW SIGNATURES: Copy literally, unless the value varies randomly. + Many systems alter between 2 or 3 scaling factors, in which case, + it's better to have several 'sig' lines, rather than a wildcard. + ''' + options = dict(self.p['TCP'].options) + try: + return options['WScale'] + except: + return '*' + + @property + def olayout(self): + if len(self.p['TCP'].options) == 0: + return '*' + else: + loo = [] + for i in self.p['TCP'].options: + loo.append(signature.process_options(i)) + return ','.join(map(str, loo)) + + @property + def quirk(self): + q = quirk(self.p) + return str(q) + + @property + def pclass(self): + ''' + Payload size classification: '0' for zero, '+' for non-zero, + '*' for any. The packets we fingerprint right now normally have + no payloads, but some corner cases exist. + ''' + pclass = len(self.p['TCP'].payload) + if pclass != 0: + pclass = '+' + return str(pclass) + + @property + def qstring(self): + qstring = "{ver}:{ittl}:{olen}:{mss}:{wsize}:{scale}:{olayout}:{quirk}:{pclass}".format(ver=self.version, ittl=self.ittl, olen=self.olen, mss=self.mss, wsize=self.window_size, scale=self.scale, olayout=self.olayout, quirk=self.quirk, pclass=self.pclass) + return qstring + + def __str__(self): + return self.qstring + + + +class matching(): + + @staticmethod + def create_con(): + '''Create Database Connection''' + return sqlite3.connect('signature.db') + + # Select 100% + def sig_match_one(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=? AND mss=? AND wsize=? AND scale=? AND olayout=? AND quirks=? AND pclass=?", + [so.version, so.ittl, so.olen, so.mss, so.window_size, so.scale, so.olayout, so.quirk, so.pclass] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + # Select 100% + def sig_match_one(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=? AND mss=? AND wsize=? AND scale=? AND olayout=? AND quirks=? AND pclass=?", + [so.version, so.ittl, so.olen, so.mss, so.window_size, so.scale, so.olayout, so.quirk, so.pclass] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + # Select 80% + def sig_match_eighty(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=? AND mss=? AND wsize=? AND scale=? AND olayout=? AND pclass=?", + [so.version, so.ittl, so.olen, so.mss, so.window_size, so.scale, so.olayout, so.pclass] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + # Select 60% + def sig_match_sixty(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=? AND wsize=? AND scale=? AND olayout=?", + [so.version, so.ittl, so.olen, so.window_size, so.scale, so.olayout] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + # Select 40% + def sig_match_fourty(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=? AND olayout=?", + [so.version, so.ittl, so.olen, so.olayout] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + # Select 20% + def sig_match_twenty(conn, so): + cur = conn.cursor() + cur.execute( + "SELECT * FROM signatures WHERE version=? AND ittl=? AND olen=?", + [so.version, so.ittl, so.olen] + ) + signature_matches = cur.fetchall() + if len(signature_matches) == 0: + return None + else: + return signature_matches + + + def match(so): + conn = matching.create_con() + results = '' + one_hundred = matching.sig_match_one(conn, so) + if one_hundred: + results = ('100%', one_hundred) + if results == '': + eighty = matching.sig_match_eighty(conn, so) + if eighty: + results = ('80%', eighty) + if results == '': + sixty = matching.sig_match_sixty(conn, so) + if sixty: + results = ('60%', sixty) + if results == '': + fourty = matching.sig_match_fourty(conn, so) + if fourty: + results = ('40%', fourty) + if results == '': + twenty = matching.sig_match_twenty(conn, so) + if twenty: + results = ('20%', twenty) + if results == '': + results = ('0%', so) + conn.close() + return results + + + +class query_object(): + """ + Data mapping class that takes a TCP Signature object and inserts it into the sqlite database. + """ + + def __init__(self, acid, platform, tcp_flag, comments, version, ittl, olen, mss, wsize, scale, olayout, quirks, pclass): + self.sig_acid = acid + self.platform = platform + self.sig_tcp_flag = tcp_flag + self.sig_comments = comments + self.version = version + self.ittl = ittl + self.olen = olen + self.mss = mss + self.wsize = wsize + self.scale = scale + self.olayout = olayout + self.quirks = quirks + self.pclass = pclass + + @property + def qstring(self): + qstring = "{ver}:{ittl}:{olen}:{mss}:{wsize}:{scale}:{olayout}:{quirk}:{pclass}".format(ver=self.version, ittl=self.ittl, olen=self.olen, mss=self.mss, wsize=self.wsize, scale=self.scale, olayout=self.olayout, quirk=self.quirks, pclass=self.pclass) + return qstring + + def __str__(self): + return self.qstring \ No newline at end of file diff --git a/passive_fingerprinting/tests/__init__.py b/smudge/tests/__init__.py similarity index 100% rename from passive_fingerprinting/tests/__init__.py rename to smudge/tests/__init__.py diff --git a/smudge/tests/test.pcap b/smudge/tests/test.pcap new file mode 100644 index 0000000000000000000000000000000000000000..3c14df4df0d137864dc9cdf8132927945bd57db0 GIT binary patch literal 7896 zcmaLc3s}x)8vyY4ssHnKpd5-YTfAWjo*!h67f%J3v zv3P{$Ntjl7(HEuVcGXpuLW99hqX&q+y(1x-T{ehMdf6$&%~w&JdKg7hbm?NP6=7AG z^CN{=&(2oN1<{A|Y>QV|m=GI=%i>1(|8Z7cJTu9Od>sAtar7n3F6rr8 zLi}y0t#}l&erca-g~xn~qW1_C{c{`HX?}zS+NbRiVviJAyxpULBF4^hQT(AA#iZ&m zlh%c>ZTV-v5Mr}vTaiJGS0AZX__e7h`a3-rTWehjd;3(t93lRkA&W&Hsb(hl28zFV zaq)?FoYsx7+zzid65{RvTQMJEZ1bi{jE_O_LOT>q(NQxrf5Ob~gbfqo1=w-5PYM)` zkxDU^i*0qWJ7F7^hOHE0rK>D(PUQy`{p{9BYy+?~InxV{WUIL)k@ zGhsP-^Lh$#^?S1T-D9d)k0Ikwd}w(t#%ZpEy}T&0lMuZh*ot34jBDzt#5Mg;tlta8 zl*ocTT0_FpC;NROME{PmShn0j5hLA#VrgNG7*nVCItCCQ^sWR^bM8R_Z*7V zjh|<2)*2Jm|6vAHXj2m-MjNs$AH+X1?)`YO;d8^>Qs>!;j zo-CFXsKmhx#hY+dLDmh&yK5eVWm$8U2rB>Sotp#DDE?RF1 z@#G9yoSvY%U48k?R1HNj3Gc#|ggwYizb?eb`LcK_Q7JCN7^{Zp;wywLp1RFJh|v|Y zXxy$8W4Rcri|q-!YFRoMJO2IPpB|hgxsK76!N<6ESM_WpQPuN-W`bVGgf* zpxR5&UL&mAJG~bRaphE5{BEyGe7c5@;|KaU_94tAK2fiv#;=ye#uk-0VI3dGtzsN0 zC9LN!X+Me>BOc3QqOy|OUWB51DvDkwJf=}2VG&b@Ul8Jgv$o>buzKb{RMohP2cYP` zs%B=gt<=E~=CLj~RETGOm&I|9lwu$k<8{%|5SH+3$O0jbnI?J!LLfClrtVF~Za84Fi6sp9#-B9#fhN5ZC{#g|2p72tlSIym_V5?n` z;(DBYz^J%gv%FEv;P1lNGLJ&qfUrq@hRhc+dcs<-D<+s#6@c456pv-|7~lUhk3uE2 z^%5N~#O?o;#mFqx3u!Mwu_@ozE)||e3YF9$^JV?)NryYEc8q5s>)kt?6j@K6k7A$? z#^|-DLw^dDRJL<>y?*TUNEV;$REfvaQGD!*V(#+zFbb8Fr%&7zk#$_2EOt**iL;AQ z{52WH)6358pioKK)83j8*WH%I+36}Vcr+hJM-=-%8QXYR@_jt??a zB8^A!tv)E)UrD$_n-Ugd?ev!rO^I@hefFrt)pt;|@Y>8YXF~#o+N{#a%S(vc@5y4c zStXWDL~#Ir&&+BQ6+xR3cI&+_W(x6Ht}LF&REg~}Q5^j*%-VEs%p}?z-ZL$y_Y~r{ z@5|!py-IN}7uV?`)Q{Pb;rckC?i=~HDVw;~a#znyxqhp7`KC}&ChBy1%65_>qvRHmt zCHgs|SY$#mBsjMzZA)16gtC!B^o0FbduEE^d~_bH>MHDAjACED3!(0@Qs}yV3^z_)(NwB>X8ys=4VATk#yknBGVg_12ut%`ukY){@aC*Nj+ph4CMt2yXBgw1Ql52>kSf{%I&ptp9wTD>e1$BQ zG*yYW7ozxd5{fI&uee6x?b>4gFA+i<@v$ssdZ@%+ytj7}y2mgxr!F_6@OGVfJoJzd ztr@a7x0y-|>^ZYrZ(`{trU@5#F42$8lEmY#^mrxwMA4U7t*OF;x!jf-(-&csPE#(-~KUDS2G7C|RDL`@P zf&~c_y4RM&j(QKE63*VbS&!#=Tqojhvn}c zBE<3WvS_SdPbJ23ahEOzz+7K@9bTuJ^a4Da|%Ux z4Oh`1!q$ZMvllV;ogs^N8>wEJIZ+s+FYkOn))Q$kycG&6-w|S~0kZfplm-7h*9Z6+ zzB!-Ai1*A$=;Mw|Yb!*1xMypR;|0jNp|Vfm*BoOkazl~d*Q4M|?l)6l>aPc zV@K6@uCMvmz0jp74$MlKPDc}#;s0o^5G!A?70V#TzTc_l`uGVx*T194--TlcyEbOk LzlCV6bDI4hV!2YX literal 0 HcmV?d00001 diff --git a/smudge/tests/test_passive.py b/smudge/tests/test_passive.py new file mode 100644 index 0000000..8aa57db --- /dev/null +++ b/smudge/tests/test_passive.py @@ -0,0 +1,24 @@ +import unittest +import pathlib as pl +import sqlite3 + +from smudge import passive_data + + +class passive_data_testcase(unittest.TestCase): + def test_setup_db_1(self): + '''UT to ensure DB file gets created.''' + passive_data.setup_db() + path = pl.Path('signature.db') + self.assertTrue(path.is_file()) + + def test_setup_db_5(self): + '''UT to ensure signatures table was created.''' + con = sqlite3.connect('signature.db') + cur = con.cursor() + list_of_tables = cur.execute("""SELECT name FROM sqlite_master WHERE type='table'AND name='signatures'; """).fetchall() + self.assertTrue(('signatures',) in list_of_tables) + + def test_test_github_con_1(self): + '''UT to ensure Github connection test is successful.''' + self.assertTrue(passive_data.test_github_con()) \ No newline at end of file diff --git a/smudge/tests/test_signature_matching.py b/smudge/tests/test_signature_matching.py new file mode 100644 index 0000000..618b6b2 --- /dev/null +++ b/smudge/tests/test_signature_matching.py @@ -0,0 +1,12 @@ +import unittest +from scapy.all import sniff + +from smudge import signature + + +class signature_matching_testcase(unittest.TestCase): + def test_signature_1(self): + '''Signature.''' + packet_1 = sniff(offline="smudge/tests/test.pcap")[0] + signature_1 = signature(packet_1) + self.assertTrue(signature_1.version == str(4)) diff --git a/tcp_extract.v0.2.5.py b/tcp_extract.v0.2.5.py index bf4cbe7..61f83f2 100644 --- a/tcp_extract.v0.2.5.py +++ b/tcp_extract.v0.2.5.py @@ -18,11 +18,17 @@ import sys from scapy.all import sniff, Raw, Scapy_Exception, IP, IPv6, TCP # pylint: disable=no-name-in-module +def test_mss(MSS): + '''Look for MSS in TCP Option.''' + d = dict(MSS) + try: + return d['MSS'] + except: + return '*' def debug_out(output_string): """Send debuging output to stderr.""" - if cl_args['devel']: sys.stderr.write(output_string + '\n') sys.stderr.flush() @@ -35,6 +41,13 @@ def processpacket(p): if ((p.haslayer(IP) and p[IP].proto == 6) or (p.haslayer(IPv6) and p[IPv6].nh == 6)) and p.haslayer(TCP) and isinstance(p[TCP], TCP): # pylint: disable=too-many-boolean-expressions if (p[TCP].flags & 0x17) == 0x02: #SYN (ACK, RST, and FIN off) tcp_attributes = {} + tcp_attributes['ver'] = p[IP].version + tcp_attributes['ittl'] = p[IP].ttl + tcp_attributes['olen'] = len(p[IP].options) + tcp_attributes['mss'] = test_mss(p[TCP].options) + tcp_attributes['window'] = p[TCP].window + tcp_attributes['frag'] = p[IP].frag + tcp_attributes['len'] = p[IP].len tcp_attributes['sport'] = p[TCP].sport tcp_attributes['dport'] = p[TCP].dport tcp_attributes['seq'] = p[TCP].seq @@ -42,7 +55,6 @@ def processpacket(p): tcp_attributes['dataofs'] = p[TCP].dataofs tcp_attributes['reserved'] = p[TCP].reserved tcp_attributes['flags'] = p[TCP].flags - tcp_attributes['window'] = p[TCP].window tcp_attributes['chksum'] = p[TCP].chksum tcp_attributes['urgptr'] = p[TCP].urgptr tcp_attributes['options'] = p[TCP].options diff --git a/test.py b/test.py index d9a240d..1486170 100644 --- a/test.py +++ b/test.py @@ -1,6 +1,16 @@ -from passive_fingerprinting.passive_data import passive_data -from passive_fingerprinting.passive_data import pull_data -from passive_fingerprinting.passive_data import tcp_sig +#Imports +from scapy.all import sniff +from smudge.passive_data import passive_data +from smudge.passive_data import pull_data +from smudge.passive_data import tcp_sig +from smudge.signature_matching import signature +from smudge.signature_matching import matching +from smudge.signature_matching import query_object +import time + + +# Setup +############################################ # Create Sqlite DB passive_data.setup_db() @@ -15,14 +25,35 @@ # Iterate over JSON Objects for i in tcp_sig_data['signature_list']: try: - signature = tcp_sig(i) - author_id = passive_data.author_insert(conn, signature.author, signature.author_email, signature.author_github) - print(author_id) - os_id = passive_data.os_insert(conn, signature.os_name, signature.os_version, signature.os_class, signature.os_vendor, signature.os_url) - print(os_id) - device_id = passive_data.device_insert(conn, signature.device_type, signature.device_vendor, signature.device_url) - print(device_id) - passive_data.signature_insert(conn, signature.sig_acid, signature.sig_tcp_flag, signature.signature['ver'], signature.signature['ittl'], signature.signature['olen'], signature.signature['mss'], signature.signature['wsize'], signature.signature['scale'], signature.signature['olayout'], signature.signature['quirks'], signature.signature['pclass'], signature.sig_comments, os_id, device_id, author_id) + smud = tcp_sig(i) + passive_data.signature_insert(conn, smud) except Exception as e: print(e) +# SNIFFFFFFFFING +############################################ +# Takes the packet and onLY LOOKS AT sYNs +packets = sniff(offline="smudge/bap.pcap", filter="tcp[tcpflags] & tcp-syn != 0") + + +# Extracts the signature +for i in packets: + try: + packet_signature = signature(i) + print("\n\nSignature Identified for: {IP} --> {signature}".format(IP=i['IP'].src, signature=str(packet_signature))) + time.sleep(1.5) +# Matches + mo = matching.match(packet_signature) + a = mo[1][0] + b = query_object(acid=a[1], platform=a[2], tcp_flag=a[3], comments=a[13], version=a[4], ittl=a[5], olen=a[6], mss=a[7], wsize=a[8], scale=a[9], olayout=a[10], quirks=a[11], pclass=a[12]) + print("Match at: {percent} to signature {signature}".format(percent=mo[0], signature=b)) + print("Signature identified as {platform}".format(platform=b.platform)) + print("Comments: {comments}\n\n".format(comments=b.sig_comments)) + except: + pass + + + + + + From 6bc9c822f0b0d17ab43906a551702b6d9b8757df Mon Sep 17 00:00:00 2001 From: David Quartarolo Date: Mon, 18 Apr 2022 16:57:07 -0400 Subject: [PATCH 10/10] Working Code --- .gitignore | 8 +- passer.py | 58 +++++++++----- smudge/signature_matching.py | 150 +++++++++++++++++++++++++++++------ test.py | 59 -------------- 4 files changed, 171 insertions(+), 104 deletions(-) delete mode 100644 test.py diff --git a/.gitignore b/.gitignore index b1cadc6..92ca30c 100644 --- a/.gitignore +++ b/.gitignore @@ -107,4 +107,10 @@ venv.bak/ *.db # VScode Launch.json files -.vscode/ \ No newline at end of file +.vscode/ + +# Packet Capture Files +*.pcap + +# Python Non Integrated Test +trial.py \ No newline at end of file diff --git a/passer.py b/passer.py index 0dde05b..b99c2c5 100755 --- a/passer.py +++ b/passer.py @@ -23,6 +23,9 @@ from smudge.passive_data import passive_data from smudge.passive_data import pull_data from smudge.passive_data import tcp_sig +from smudge.signature_matching import signature +from smudge.signature_matching import matching +from smudge.signature_matching import query_object sys.path.insert(0, '.') #Allows us to load from the current directory (There was one claim that we need to create an empty file __init__.py , but this does not appear to be required.) @@ -735,6 +738,23 @@ def IP_handler(task_q, sh_da, prefs, dests): while True: try: (p, meta) = task_q.get(block=True, timeout=None) + #### Smudge has entered the chat. + if cl_args['passive_fingerprinting'] != False and p.haslayer("TCP"): + if 'S' in str(p['TCP'].flags): + try: + packet_signature = signature(p) + if cl_args['devel'] != False: + dev_out = "\n\nSignature Identified for: {IP} --> {signature}".format(IP=p['IP'].src, signature=str(packet_signature)) + print("\033[91m{}\033[00m".format(dev_out)) + mo = matching.match(packet_signature) + a = mo[1][0] + b = query_object(acid=a[1], platform=a[2], tcp_flag=a[3], comments=a[13], version=a[4], ittl=a[5], olen=a[6], mss=a[7], wsize=a[8], scale=a[9], olayout=a[10], quirks=a[11], pclass=a[12]) + m_out = "Match at: {percent} to signature {signature}".format(percent=mo[0], signature=b) + print("\033[96m{}\033[00m" .format(m_out)) + print("Signature identified as {platform}".format(platform=b.platform)) + print("Comments: {comments}\n\n".format(comments=b.sig_comments)) + except: + pass except KeyboardInterrupt: break else: @@ -1029,36 +1049,34 @@ def packet_stream_processor(name_param, pcap_interface, pcap_source_file, highpr sys.stderr.write('Too many arguments that do not match a parameter. Any chance you did not put the bpf expression in quotes? Exiting.\n') quit() - #Not currently offered as actual user-supplied parameters, but could easily be done so - cl_args['per_packet_timeout'] = 1 #_Could_ shorten this to speed up traceroutes, but if too low we may miss responses. Better to have more parallel traceroutes, below. - cl_args['hop_limit'] = 30 - cl_args['trace_v6'] = True - cl_args['max_traceroutes'] = 4 - - mkdir_p(config_dir) - mkdir_p(cache_dir) - mkdir_p(cache_dir + '/ipv4/') - mkdir_p(cache_dir + '/ipv6/') - mkdir_p(cache_dir + '/dom/') - # If Passive Finger Printing Capability is enabled. if cl_args['passive_fingerprinting']: + print("\033[95m {}\033[00m".format("Smudge Enabled")) + # Create Sqlite DB for Smudge Signatures passive_data.setup_db() + # Create DB Connection conn = passive_data.create_con() if passive_data.test_github_con(): tcp_sig_data = pull_data.import_data() - - # Iterate over JSON Objects + # Iterate over JSON Objects for i in tcp_sig_data['signature_list']: try: - signature = tcp_sig(i) - author_id = passive_data.author_insert(conn, signature.author, signature.author_email, signature.author_github) - os_id = passive_data.os_insert(conn, signature.os_name, signature.os_version, signature.os_class, signature.os_vendor, signature.os_url) - device_id = passive_data.device_insert(conn, signature.device_type, signature.device_vendor, signature.device_url) - passive_data.signature_insert(conn, signature.sig_acid, signature.sig_tcp_flag, signature.signature['ver'], signature.signature['ittl'], signature.signature['olen'], signature.signature['mss'], signature.signature['wsize'], signature.signature['scale'], signature.signature['olayout'], signature.signature['quirks'], signature.signature['pclass'], signature.sig_comments, os_id, device_id, author_id) + smud = tcp_sig(i) + passive_data.signature_insert(conn, smud) except Exception as e: print(e) + #Not currently offered as actual user-supplied parameters, but could easily be done so + cl_args['per_packet_timeout'] = 1 #_Could_ shorten this to speed up traceroutes, but if too low we may miss responses. Better to have more parallel traceroutes, below. + cl_args['hop_limit'] = 30 + cl_args['trace_v6'] = True + cl_args['max_traceroutes'] = 4 + + mkdir_p(config_dir) + mkdir_p(cache_dir) + mkdir_p(cache_dir + '/ipv4/') + mkdir_p(cache_dir + '/ipv6/') + mkdir_p(cache_dir + '/dom/') mgr = Manager() #This section sets up a shared data dictionary; all items in it must be Manager()-based shared data structures shared_data = {} @@ -1204,3 +1222,5 @@ def packet_stream_processor(name_param, pcap_interface, pcap_source_file, highpr destinations['suspicious'].put(None) sys.stderr.write('\nDone.\n') + + diff --git a/smudge/signature_matching.py b/smudge/signature_matching.py index 7fa90aa..ca32d58 100644 --- a/smudge/signature_matching.py +++ b/smudge/signature_matching.py @@ -10,6 +10,7 @@ class quirk: """ def __init__(self, p): + '''Takes a packet as an argument.''' self.p = p def __str__(self): @@ -19,25 +20,37 @@ def __str__(self): def df(self): '''Sets df attribute based on flag - "don't fragment" set (probably PMTUD); ignored for IPv6.''' df = False - if 'DF' in self.p['IP'].flags: - df = 'df' - return df + version = self.p.version + if version == 6: + return False + else: + if 'DF' in self.p['IP'].flags.names: + df = 'df' + return df @property def id_plus(self): '''Sets id+ attribute based on flag and IPID - DF set but IPID non-zero; ignored for IPv6.''' - id_plus = False - if self.p['IP'].flags =='DF' and self.p['IP'].id != 0: - id_plus = 'id+' - return id_plus + version = self.p.version + if version == 6: + return False + else: + id_plus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id != 0: + id_plus = 'id+' + return id_plus @property def id_minus(self): '''Sets id- attribute based on flag and IPID - DF not set but IPID is zero; ignored for IPv6.''' - id_minus = False - if self.p['IP'].flags =='DF' and self.p['IP'].id == 0: - id_minus = 'id-' - return id_minus + version = self.p.version + if version == 6: + return False + else: + id_minus = False + if self.p['IP'].flags =='DF' and self.p['IP'].id == 0: + id_minus = 'id-' + return id_minus @property def ecn(self): @@ -50,10 +63,14 @@ def ecn(self): @property def zero_plus(self): '''Sets 0+ Attribute - "must be zero" field not zero; ignored for IPv6.''' - zero_plus = False - if self.p.reserved != 0: - zero_plus = '0+' - return False + version = self.p.version + if version == 6: + return False + else: + zero_plus = False + if self.p.reserved != 0: + zero_plus = '0+' + return zero_plus @property def flow(self): @@ -111,14 +128,27 @@ def pushf_plus(self): def ts1_minus(self): '''Sets ts1- attribute - own timestamp specified as zero.''' ts1_minus = False + try: + ts1 = dict(self.p['TCP'].options) + if ts1['Timestamp'][0] == 0: + ts1_minus = 'T0' + except: + pass return ts1_minus @property def ts2_plus(self): '''Sets ts2+ attribute - non-zero peer timestamp on initial SYN.''' ts2_plus = False + try: + ts2 = dict(self.p['TCP'].options) + if ts2['Timestamp'][1] != 0: + ts2_plus = 'T' + except: + pass return ts2_plus + #TODO @property def opt_plus(self): '''Sets opt+ attribute - trailing non-zero data in options segment.''' @@ -128,14 +158,25 @@ def opt_plus(self): @property def exws(self): '''Sets exws attribute - excessive window scaling factor (> 14).''' - exws = False - return exws + try: + exws = dict(self.p['TCP'].options) + except: + exws = False + if exws != False: + try: + exws = exws['WScale'] >= 14 + return exws + except: + pass + else: + return False + #TODO @property def bad(self): '''Sets bad attribute - malformed TCP options.''' - bad = False - return bad + bad = isinstance(self.p['TCP'].options, list) + return False @property def qstring(self): @@ -183,7 +224,18 @@ def process_options(option): elif option[0] == 'EOL': return 'E' else: - return '?' + str(option[1]) + #TODO + # The p0f docs state: + # ?n - unknown option ID n + # What does that even mean? + # Then to make things even more vague + # some random documentation on cert.org states: + # ?n - unrecognized option number n. + # Soooooo, unrecognized != unknown + # I came up with the following and the output does not look correct. \ + # We went with literally returning '?n' + # return '?' + str(option[1]) + return '?n' @property def version(self): @@ -209,7 +261,12 @@ def ittl(self): cases, determine maximum initial TTL and then add a - suffix to the value to avoid confusion. ''' - ittl = self.p['IP'].ttl + if self.version == '4': + ittl = self.p['IP'].ttl + elif self.version == '6': + ittl = self.p['IPv6'].ttl + else: + ittl = '' return ittl @property @@ -219,11 +276,28 @@ def olen(self): for normal IPv4 traffic; always zero for IPv6 due to the limitations of libpcap. ''' - olen = len(self.p['IP'].options) + if self.version == '4': + olen = len(self.p['IP'].options) + elif self.version == '6': + olen = len(self.p['IPv6'].options) + else: + olen = '' return str(olen) @property def mss(self): + ''' + maximum segment size, if specified in TCP options. Special value + of '*' can be used to denote that MSS varies depending on the + parameters of sender's network link, and should not be a part of + the signature. In this case, MSS will be used to guess the + type of network hookup according to the [mtu] rules. + + NEW SIGNATURES: Use '*' for any commodity OSes where MSS is + around 1300 - 1500, unless you know for sure that it's fixed. + If the value is outside that range, you can probably copy it + literally. + ''' mss = dict(self.p['TCP'].options) try: return str(mss['MSS']) @@ -240,8 +314,9 @@ def window_size(self): to be used. Wilcard ('*') is possible too. ''' window_size = self.p['TCP'].window - if (self.p['TCP'].window / int(self.mss)).is_integer(): - window_size = "mss*" + str(int(self.p['TCP'].window / int(self.mss))) + if self.mss != '*': + if (self.p['TCP'].window / int(self.mss)).is_integer(): + window_size = "mss*" + str(int(self.p['TCP'].window / int(self.mss))) return str(window_size) @property @@ -261,6 +336,11 @@ def scale(self): @property def olayout(self): + ''' + comma-delimited layout and ordering of TCP options, if any. This + is one of the most valuable TCP fingerprinting signals. Supported + values. + ''' if len(self.p['TCP'].options) == 0: return '*' else: @@ -271,6 +351,10 @@ def olayout(self): @property def quirk(self): + ''' + Comma-delimited properties and quirks observed in IP or TCP + headers. + ''' q = quirk(self.p) return str(q) @@ -437,4 +521,20 @@ def qstring(self): return qstring def __str__(self): - return self.qstring \ No newline at end of file + return self.qstring + + + + + + + +''' +try: + ts2 = dict(self.p['TCP'].options) + if ts2['Timestamp'][1] != 0: + ts2_plus = 'T' + except: + pass + +''' \ No newline at end of file diff --git a/test.py b/test.py deleted file mode 100644 index 1486170..0000000 --- a/test.py +++ /dev/null @@ -1,59 +0,0 @@ -#Imports -from scapy.all import sniff -from smudge.passive_data import passive_data -from smudge.passive_data import pull_data -from smudge.passive_data import tcp_sig -from smudge.signature_matching import signature -from smudge.signature_matching import matching -from smudge.signature_matching import query_object -import time - - -# Setup -############################################ - -# Create Sqlite DB -passive_data.setup_db() - -# Create DB Connection -conn = passive_data.create_con() - -# Pull data from Github Ram JSON if Github is resolvable. -if passive_data.test_github_con(): - tcp_sig_data = pull_data.import_data() - - # Iterate over JSON Objects - for i in tcp_sig_data['signature_list']: - try: - smud = tcp_sig(i) - passive_data.signature_insert(conn, smud) - except Exception as e: - print(e) - -# SNIFFFFFFFFING -############################################ -# Takes the packet and onLY LOOKS AT sYNs -packets = sniff(offline="smudge/bap.pcap", filter="tcp[tcpflags] & tcp-syn != 0") - - -# Extracts the signature -for i in packets: - try: - packet_signature = signature(i) - print("\n\nSignature Identified for: {IP} --> {signature}".format(IP=i['IP'].src, signature=str(packet_signature))) - time.sleep(1.5) -# Matches - mo = matching.match(packet_signature) - a = mo[1][0] - b = query_object(acid=a[1], platform=a[2], tcp_flag=a[3], comments=a[13], version=a[4], ittl=a[5], olen=a[6], mss=a[7], wsize=a[8], scale=a[9], olayout=a[10], quirks=a[11], pclass=a[12]) - print("Match at: {percent} to signature {signature}".format(percent=mo[0], signature=b)) - print("Signature identified as {platform}".format(platform=b.platform)) - print("Comments: {comments}\n\n".format(comments=b.sig_comments)) - except: - pass - - - - - -