rm tox_wrapper/tests/*
Gitea Actions Demo / Explore-Gitea-Actions (push) Waiting to run Details

This commit is contained in:
emdee@macaw.me 2023-12-16 14:49:50 +00:00
parent 7338667e7e
commit c87f4a18e3
6 changed files with 0 additions and 4368 deletions

View File

@ -1,391 +0,0 @@
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Minor modifications made by Eugene Dementiev (http://www.dementiev.eu/)
"""
import socket
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None) -> None:
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module) -> None:
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
if type(destaddr) != type(b''): # python3
destaddr_bytes = destaddr.encode(encoding='idna')
else:
destaddr_bytes = destaddr
req = req + chr(0x03).encode() + chr(len(destaddr_bytes)).encode() + destaddr_bytes
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport) -> None:
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport) -> None:
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
recv = self.recv(1)
if not recv:
raise GeneralProxyError((1, _generalerrors[1]))
resp = resp + recv
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair) -> None:
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = int(self.__proxy[2])
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))

View File

@ -1,163 +0,0 @@
# -*- mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*-
import os
import sys
import logging
from io import BytesIO
import urllib
import traceback
global LOG
LOG = logging.getLogger('app.'+'ts')
try:
import pycurl
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
lNO_PROXY = ['localhost', '127.0.0.1']
CONNECT_TIMEOUT = 20.0
def bAreWeConnected() -> bool:
# FixMe: Linux only
sFile = f"/proc/{os.getpid()}/net/route"
if not os.path.isfile(sFile): return None
i = 0
for elt in open(sFile, "r").readlines():
if elt.startswith('Iface'): continue
if elt.startswith('lo'): continue
i += 1
return i > 0
def pick_up_proxy_from_environ() -> dict:
retval = dict()
if os.environ.get('socks_proxy', ''):
# socks_proxy takes precedence over https/http
proxy = os.environ.get('socks_proxy', '')
i = proxy.find('//')
if i >= 0: proxy = proxy[i+2:]
retval['proxy_host'] = proxy.split(':')[0]
retval['proxy_port'] = proxy.split(':')[-1]
retval['proxy_type'] = 2
retval['udp_enabled'] = False
elif os.environ.get('https_proxy', ''):
# https takes precedence over http
proxy = os.environ.get('https_proxy', '')
i = proxy.find('//')
if i >= 0: proxy = proxy[i+2:]
retval['proxy_host'] = proxy.split(':')[0]
retval['proxy_port'] = proxy.split(':')[-1]
retval['proxy_type'] = 1
retval['udp_enabled'] = False
elif os.environ.get('http_proxy', ''):
proxy = os.environ.get('http_proxy', '')
i = proxy.find('//')
if i >= 0: proxy = proxy[i+2:]
retval['proxy_host'] = proxy.split(':')[0]
retval['proxy_port'] = proxy.split(':')[-1]
retval['proxy_type'] = 1
retval['udp_enabled'] = False
else:
retval['proxy_host'] = ''
retval['proxy_port'] = ''
retval['proxy_type'] = 0
retval['udp_enabled'] = True
return retval
def download_url(url:str, settings:str = None) -> None:
if not bAreWeConnected(): return ''
if settings is None:
settings = pick_up_proxy_from_environ()
if pycurl:
LOG.debug('Downloading with pycurl: ' + str(url))
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
# Follow redirect.
c.setopt(c.FOLLOWLOCATION, True)
# cookie jar
cjar = os.path.join(os.environ['HOME'], '.local', 'jar.cookie')
if os.path.isfile(cjar):
c.setopt(c.COOKIEFILE, cjar)
# LARGS+=( --cookie-jar --junk-session-cookies )
#? c.setopt(c.ALTSVC_CTRL, 16)
c.setopt(c.NOPROXY, ','.join(lNO_PROXY))
#? c.setopt(c.CAINFO, certifi.where())
if settings['proxy_type'] == 2 and settings['proxy_host']:
socks_proxy = 'socks5h://'+settings['proxy_host']+':'+str(settings['proxy_port'])
settings['udp_enabled'] = False
c.setopt(c.PROXY, socks_proxy)
c.setopt(c.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
elif settings['proxy_type'] == 1 and settings['proxy_host']:
https_proxy = 'https://'+settings['proxy_host']+':'+str(settings['proxy_port'])
c.setopt(c.PROXY, https_proxy)
elif settings['proxy_type'] == 1 and settings['proxy_host']:
http_proxy = 'http://'+settings['proxy_host']+':'+str(settings['proxy_port'])
c.setopt(c.PROXY, http_proxy)
c.setopt(c.PROTOCOLS, c.PROTO_HTTPS)
try:
c.perform()
c.close()
#? assert c.getinfo(c.RESPONSE_CODE) < 300
result = buffer.getvalue()
# Body is a byte string.
LOG.info('nodes loaded with pycurl: ' + str(url))
return result
except Exception as ex:
LOG.error('TOX Downloading error with pycurl: ' + str(ex))
LOG.error('\n' + traceback.format_exc())
# drop through
if requests:
LOG.debug('Downloading with requests: ' + str(url))
try:
headers = dict()
headers['Content-Type'] = 'application/json'
proxies = dict()
if settings['proxy_type'] == 2 and settings['proxy_host']:
socks_proxy = 'socks5://'+settings['proxy_host']+':'+str(settings['proxy_port'])
settings['udp_enabled'] = False
proxies['https'] = socks_proxy
elif settings['proxy_type'] == 1 and settings['proxy_host']:
https_proxy = 'https://'+settings['proxy_host']+':'+str(settings['proxy_port'])
proxies['https'] = https_proxy
elif settings['proxy_type'] == 1 and settings['proxy_host']:
http_proxy = 'http://'+settings['proxy_host']+':'+str(settings['proxy_port'])
proxies['http'] = http_proxy
req = requests.get(url,
headers=headers,
proxies=proxies,
timeout=CONNECT_TIMEOUT)
# max_retries=3
assert req.status_code < 300
result = req.content
LOG.info('nodes loaded with requests: ' + str(url))
return result
except Exception as ex:
LOG.error('TOX Downloading error with requests: ' + str(ex))
# drop through
if not settings['proxy_type']: # no proxy
LOG.debug('Downloading with urllib no proxy: ' + str(url))
try:
req = urllib.request.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(req)
result = response.read()
LOG.info('nodes loaded with no proxy: ' + str(url))
return result
except Exception as ex:
LOG.error('TOX Downloading ' + str(ex))
return ''
return ''

View File

@ -1,573 +0,0 @@
# -*- mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*-
import getpass
import os
import re
import select
import shutil
import socket
import sys
import time
from typing import Union, Callable, Union
if False:
import cepa as stem
from cepa.connection import MissingPassword
from cepa.control import Controller
from cepa.util.tor_tools import is_valid_fingerprint
else:
import stem
from stem.connection import MissingPassword
from stem.control import Controller
from stem.util.tor_tools import is_valid_fingerprint
global LOG
import logging
import warnings
warnings.filterwarnings('ignore')
LOG = logging.getLogger()
bHAVE_TORR = shutil.which('tor-resolve')
yKNOWN_ONIONS = """
- facebookwkhpilnemxj7asaniu7vnjjbiltxjqhye3mhbshg7kx5tfyd # facebook
- duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad # ddg
- zkaan2xfbuxia2wpf7ofnkbz6r5zdbbvxbunvp5g2iebopbfc4iqmbad # hks
"""
# grep -B 1 '<li><a href="' /tmp/tor.html |sed -e 's/<li><a href="http:../ - /' -e 's/.onion.*//' -e 's/<li id=./ # /' -e 's/".*//' -e '/^--/d' -e '/<li id/d'
# This will slow things down 1-2 min each
yKNOWN_ONIONS_TOR = """
# 2019.www.torproject.org
- jqyzxhjk6psc6ul5jnfwloamhtyh7si74b4743k2qgpskwwxrzhsxmad
# api.donate.torproject.org
- rbi3fpvpz4vlrx67scoqef2zxz7k4xyiludszg655favvkygjmhz6wyd
# archive.torproject.org
- uy3qxvwzwoeztnellvvhxh7ju7kfvlsauka7avilcjg7domzxptbq7qd
# aus1.torproject.org
- ot3ivcdxmalbsbponeeq5222hftpf3pqil24q3s5ejwo5t52l65qusid
# aus2.torproject.org
- b5t7emfr2rn3ixr4lvizpi3stnni4j4p6goxho7lldf4qg4hz5hvpqid
# blog.torproject.org
- pzhdfe7jraknpj2qgu5cz2u3i4deuyfwmonvzu5i3nyw4t4bmg7o5pad
# bridges.torproject.org
- yq5jjvr7drkjrelzhut7kgclfuro65jjlivyzfmxiq2kyv5lickrl4qd
# cloud.torproject.org
- ui3cpcohcoko6aydhuhlkwqqtvadhaflcc5zb7mwandqmcal7sbwzwqd
# collector.torproject.org
- pgmrispjerzzf2tdzbfp624cg5vpbvdw2q5a3hvtsbsx25vnni767yad
# collector2.torproject.org
- 3srlmjzbyyzz62jvdfqwn5ldqmh6mwnqxre2zamxveb75uz2qrqkrkyd
# community.torproject.org
- xmrhfasfg5suueegrnc4gsgyi2tyclcy5oz7f5drnrodmdtob6t2ioyd
# consensus-health.torproject.org
- tkskz5dkjel4xqyw5d5l3k52kgglotwn6vgb5wrl2oa5yi2szvywiyid
# crm.torproject.org
- 6ojylpznauimd2fga3m7g24vd7ebkzlemxdprxckevqpzs347ugmynqd
# deb.torproject.org
- apow7mjfryruh65chtdydfmqfpj5btws7nbocgtaovhvezgccyjazpqd
# dev.crm.torproject.org
- eewp4iydzyu2a5d6bvqadadkozxdbhsdtmsoczu2joexfrjjsheaecad
# dist.torproject.org
- scpalcwstkydpa3y7dbpkjs2dtr7zvtvdbyj3dqwkucfrwyixcl5ptqd
# donate-api.torproject.org
- lkfkuhcx62yfvzuz5o3ju4divuf4bsh2bybwd3oierq47kyp2ig2gvid
# donate.torproject.org
- yoaenchicimox2qdc47p36zm3cuclq7s7qxx6kvxqaxjodigfifljqqd
# exonerator.torproject.org
- pm46i5h2lfewyx6l7pnicbxhts2sxzacvsbmqiemqaspredf2gm3dpad
# extra.torproject.org
- kkr72iohlfix5ipjg776eyhplnl2oiv5tz4h2y2bkhjix3quafvjd5ad
# gettor.torproject.org
- ueghr2hzndecdntou33mhymbbxj7pir74nwzhqr6drhxpbz3j272p4id
# git.torproject.org
- xtlfhaspqtkeeqxk6umggfbr3gyfznvf4jhrge2fujz53433i2fcs3id
# gitlab.torproject.org
- eweiibe6tdjsdprb4px6rqrzzcsi22m4koia44kc5pcjr7nec2rlxyad
# gitweb.torproject.org
- gzgme7ov25seqjbphab4fkcph3jkobfwwpivt5kzbv3kqx2y2qttl4yd
# grafana1.torproject.org
- 7zjnw5lx2x27rwiocxkqdquo7fawj46mf2wiu2l7e6z6ng6nivmdxnad
# grafana2.torproject.org
- f3vd6fyiccuppybkxiblgigej3pfvvqzjnhd3wyv7h4ee5asawf2fhqd
# ircbouncer.torproject.org
- moz5kotsnjony4oxccxfo4lwk3pvoxmdoljibhgoonzgzjs5oemtjmqd
# metabase.metrics.torproject.org
- gr5pseamigereei4c6654hafzhid5z2c3oqzn6cfnx7yfyelt47znhad
# metrics.torproject.org
- hctxrvjzfpvmzh2jllqhgvvkoepxb4kfzdjm6h7egcwlumggtktiftid
# moat.torproject.org
- z7m7ogzdhu43nosvjtsuplfmuqa3ge5obahixydhmzdox6owwxfoxzid
# nagios.torproject.org
- w6vizvw4ckesva5fvlkrepynemxdq6pgo5sh4r76ec6msq5notkhqryd
# newsletter.torproject.org
- a4ygisnerpgtc5ayerl22pll6cls3oyj54qgpm7qrmb66xrxts6y3lyd
# nightlies.tbb.torproject.org
- umj4zbqdfcyevlkgqgpq6foxk3z75zzxsbgt5jqmfxofrbrjh3crbnad
# nyx.torproject.org
- 3ewfgrt4gzfccp6bnquhqb266r3zepiqpnsk3falwygkegtluwuyevid
- xao2lxsmia2edq2n5zxg6uahx6xox2t7bfjw6b5vdzsxi7ezmqob6qid
- dud2sxm6feahhuwj4y4lzktduy7v3qpaqsfkggtj2ojmzathttkegoid
# openpgpkey.torproject.org
- 2yldcptk56shc7lwieozoglw3t5ghty7m6mf2faysvfnzccqavbu2mad
# people.torproject.org
- 5ecey6oe4rocdsfoigr4idu42cecm2j7zfogc3xc7kfn4uriehwrs6qd
# prometheus1.torproject.org
- ydok5jiruh3ak6hcfdlm2g7iuraaxcomeckj2nucjsxif6qmrrda2byd
# prometheus2.torproject.org
- vyo6yrqhl3by7d6n5t6hjkflaqbarjpqjnvapr5u5rafk4imnfrmcjyd
# rbm.torproject.org
- nkuz2tpok7ctwd5ueer5bytj3bm42vp7lgjcsnznal3stotg6vyaakyd
# research.torproject.org
- xhqthou6scpfnwjyzc3ekdgcbvj76ccgyjyxp6cgypxjlcuhnxiktnqd
# review.torproject.net
- zhkhhhnppc5k6xju7n25rjba3wuip73jnodicxl65qdpchrwvvsilcyd
# rpm.torproject.org
- 4ayyzfoh5qdrokqaejis3rdredhvf22n3migyxfudpkpunngfc7g4lqd
# snowflake.torproject.org
- oljlphash3bpqtrvqpr5gwzrhroziw4mddidi5d2qa4qjejcbrmoypqd
# spec.torproject.org
- i3xi5qxvbrngh3g6o7czwjfxwjzigook7zxzjmgwg5b7xnjcn5hzciad
# staging-api.donate.torproject.org
- vorwws6g6mx23djlznmlqva4t5olulpnet6fxyiyytcu5dorp3fstdqd
# staging.crm.torproject.org
- pt34uujusar4arrvsqljndqlt7tck2d5cosaav5xni4nh7bmvshyp2yd
# staging.donate-api.torproject.org
- 7niqsyixinnhxvh33zh5dqnplxnc2yd6ktvats3zmtbbpzcphpbsa6qd
# status.torproject.org
- eixoaclv7qvnmu5rolbdwba65xpdiditdoyp6edsre3fitad777jr3ad
# stem.torproject.org
- mf34jlghauz5pxjcmdymdqbe5pva4v24logeys446tdrgd5lpsrocmqd
# styleguide.torproject.org
- 7khzpw47s35pwo3lvtctwf2szvnq3kgglvzc22elx7of2awdzpovqmqd
# submission.torproject.org
- givpjczyrb5jjseful3o5tn3tg7tidbu4gydl4sa5ekpcipivqaqnpad
# support.torproject.org
- rzuwtpc4wb3xdzrj3yeajsvm3fkq4vbeubm2tdxaqruzzzgs5dwemlad
# survey.torproject.org
- eh5esdnd6fkbkapfc6nuyvkjgbtnzq2is72lmpwbdbxepd2z7zbgzsqd
# svn-archive.torproject.org
- b63iq6es4biaawfilwftlfkw6a6putogxh4iakei2ioppb7dsfucekyd
# tb-manual.torproject.org
- dsbqrprgkqqifztta6h3w7i2htjhnq7d3qkh3c7gvc35e66rrcv66did
# test-api.donate.torproject.org
- wiofesr5qt2k7qrlljpk53isgedxi6ddw6z3o7iay2l7ne3ziyagxaid
# test-data.tbb.torproject.org
- umbk3kbgov4ekg264yulvbrpykfye7ohguqbds53qn547mdpt6o4qkad
# test.crm.torproject.org
- a4d52y2erv4eijii66cpnyqn7rsnnq3gmtrsdxzt2laoutvu4gz7fwid
# test.donate-api.torproject.org
- i4zhrn4md3ucd5dfgeo5lnqd3jy2z2kzp3lt4tdisvivzoqqtlrymkid
# www
- tttyx2vwp7ihml3vkhywwcizv6nbwrikpgeciy3qrow7l7muak2pnhad
# www.torproject.org
- 2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid
"""
# we check these each time but we got them by sorting bad relays
# in the wild we'll keep a copy here so we can avoid restesting
yKNOWN_NODNS = """
- 0x0.is
- a9.wtf
- apt96.com
- axims.net
- backup.spekadyon.org
- dfri.se
- dotsrc.org
- dtf.contact
- ezyn.de
- for-privacy.net
- galtland.network
- heraldonion.org
- interfesse.net
- kryptonit.org
- linkspartei.org
- mkg20001.io
- nicdex.com
- nx42.de
- pineapple.cx
- privacylayer.xyz
- privacysvcs.net
- prsv.ch
- sebastian-elisa-pfeifer.eu
- thingtohide.nl
- tor-exit-2.aa78i2efsewr0neeknk.xyz
- tor-exit-3.aa78i2efsewr0neeknk.xyz
- tor.dlecan.com
- tor.skankhunt42.pw
- transliberation.today
- tuxli.org
- unzane.com
- verification-for-nusenu.net
- www.defcon.org
"""
# - aklad5.com
# - artikel5ev.de
# - arvanode.net
# - dodo.pm
# - erjan.net
# - galtland.network
# - lonet.sh
# - moneneis.de
# - olonet.sh
# - or-exit-2.aa78i2efsewr0neeknk.xyz
# - or.wowplanet.de
# - ormycloud.org
# - plied-privacy.net
# - rivacysvcs.net
# - redacted.org
# - rofl.cat
# - sv.ch
# - tikel10.org
# - tor.wowplanet.de
# - torix-relays.org
# - tse.com
# - w.digidow.eu
# - w.cccs.de
def oMakeController(sSock:str = '', port:int = 9051):
import getpass
if sSock and os.path.exists(sSock):
controller = Controller.from_socket_file(path=sSock)
else:
controller = Controller.from_port(port=port)
sys.stdout.flush()
p = getpass.unix_getpass(prompt='Controller Password: ', stream=sys.stderr)
controller.authenticate(p)
return controller
oSTEM_CONTROLER = None
def oGetStemController(log_level:int = 10, sock_or_pair:str = '/run/tor/control'):
global oSTEM_CONTROLER
if oSTEM_CONTROLER: return oSTEM_CONTROLER
import stem.util.log
# stem.util.log.Runlevel = 'DEBUG' = 20 # log_level
if os.path.exists(sock_or_pair):
LOG.info(f"controller from socket {sock_or_pair}")
controller = Controller.from_socket_file(path=sock_or_pair)
else:
if type(sock_or_pair) == int:
port = sock_or_pair
elif ':' in sock_or_pair:
port = sock_or_pair.split(':')[1]
else:
port = sock_or_pair
try:
port = int(port)
except: port = 9051
LOG.info(f"controller from port {port}")
controller = Controller.from_port(port=port)
try:
controller.authenticate()
except (Exception, MissingPassword):
sys.stdout.flush()
p = getpass.unix_getpass(prompt='Controller Password: ', stream=sys.stderr)
controller.authenticate(p)
oSTEM_CONTROLER = controller
LOG.debug(f"{controller}")
return oSTEM_CONTROLER
def bAreWeConnected() -> bool:
# FixMe: Linux only
sFile = f"/proc/{os.getpid()}/net/route"
if not os.path.isfile(sFile): return None
i = 0
for elt in open(sFile, "r").readlines():
if elt.startswith('Iface'): continue
if elt.startswith('lo'): continue
i += 1
return i > 0
def sMapaddressResolv(target:str, iPort:int = 9051, log_level:int = 10) -> str:
if not stem:
LOG.warn('please install the stem Python package')
return ''
try:
controller = oGetStemController(log_level=log_level)
map_dict = {"0.0.0.0": target}
map_ret = controller.map_address(map_dict)
return map_ret
except Exception as e:
LOG.exception(e)
return ''
def vwait_for_controller(controller, wait_boot:int = 10) -> None:
if bAreWeConnected() is False:
raise SystemExit("we are not connected")
percent = i = 0
# You can call this while boostrapping
while percent < 100 and i < wait_boot:
bootstrap_status = controller.get_info("status/bootstrap-phase")
progress_percent = re.match('.* PROGRESS=([0-9]+).*', bootstrap_status)
percent = int(progress_percent.group(1))
LOG.info(f"Bootstrapping {percent}%")
time.sleep(5)
i += 5
def bin_to_hex(raw_id:int, length: Union[int, None] = None) -> str:
if length is None: length = len(raw_id)
res = ''.join('{:02x}'.format(raw_id[i]) for i in range(length))
return res.upper()
def lIntroductionPoints(controller=None, lOnions:list = [], itimeout:int = 120, log_level:int = 10):
"""now working !!! stem 1.8.x timeout must be huge >120
'Provides the descriptor for a hidden service. The **address** is the
'.onion' address of the hidden service '
What about Services?
"""
try:
from cryptography.utils import int_from_bytes
except ImportError:
import cryptography.utils
# guessing - not in the current cryptography but stem expects it
def int_from_bytes(**args): return int.to_bytes(*args)
cryptography.utils.int_from_bytes = int_from_bytes
# this will fai if the trick above didnt work
from stem.prereq import is_crypto_available
is_crypto_available(ed25519=True)
from queue import Empty
from stem import Timeout
from stem.client.datatype import LinkByFingerprint
from stem.descriptor.hidden_service import HiddenServiceDescriptorV3
if type(lOnions) not in [set, tuple, list]:
lOnions = list(lOnions)
if controller is None:
controller = oGetStemController(log_level=log_level)
l = []
for elt in lOnions:
LOG.info(f"controller.get_hidden_service_descriptor {elt}")
try:
desc = controller.get_hidden_service_descriptor(elt,
await_result=True,
timeout=itimeout)
# LOG.log(40, f"{dir(desc)} get_hidden_service_descriptor")
# timeouts 20 sec
# mistakenly a HSv2 descriptor
hs_address = HiddenServiceDescriptorV3.from_str(str(desc)) # reparse as HSv3
oInnerLayer = hs_address.decrypt(elt)
# LOG.log(40, f"{dir(oInnerLayer)}")
# IntroductionPointV3
n = oInnerLayer.introduction_points
if not n:
LOG.warn(f"NO introduction points for {elt}")
continue
LOG.info(f"{elt} {len(n)} introduction points")
lp = []
for introduction_point in n:
for linkspecifier in introduction_point.link_specifiers:
if isinstance(linkspecifier, LinkByFingerprint):
# LOG.log(40, f"Getting fingerprint for {linkspecifier}")
if hasattr(linkspecifier, 'fingerprint'):
assert len(linkspecifier.value) == 20
lp += [bin_to_hex(linkspecifier.value)]
LOG.info(f"{len(lp)} introduction points for {elt}")
l += lp
except (Empty, Timeout,) as e: # noqa
LOG.warn(f"Timed out getting introduction points for {elt}")
except stem.DescriptorUnavailable as e:
LOG.error(e)
except Exception as e:
LOG.exception(e)
return l
def zResolveDomain(domain:str) -> int:
try:
ip = sTorResolve(domain)
except Exception as e: # noqa
ip = ''
if ip == '':
try:
lpair = getaddrinfo(domain, 443)
except Exception as e:
LOG.warn(f"{e}")
lpair = None
if lpair is None:
LOG.warn(f"TorResolv and getaddrinfo failed for {domain}")
return ''
ip = lpair[0]
return ip
def sTorResolve(target:str,
verbose:bool = False,
sHost:str = '127.0.0.1',
iPort:int = 9050,
SOCK_TIMEOUT_SECONDS:float = 10.0,
SOCK_TIMEOUT_TRIES:int = 3,
) -> str:
MAX_INFO_RESPONSE_PACKET_LENGTH = 8
if '@' in target:
LOG.warn(f"sTorResolve failed invalid hostname {target}")
return ''
target = target.strip('/')
seb = b"\x04\xf0\x00\x00\x00\x00\x00\x01\x00"
seb += bytes(target, 'US-ASCII') + b"\x00"
assert len(seb) == 10 + len(target), str(len(seb)) + repr(seb)
# LOG.debug(f"0 Sending {len(seb)} to The TOR proxy {seb}")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((sHost, iPort))
sock.settimeout(SOCK_TIMEOUT_SECONDS)
oRet = sock.sendall(seb) # noqa
i = 0
data = ''
while i < SOCK_TIMEOUT_TRIES:
i += 1
time.sleep(3)
lReady = select.select([sock.fileno()], [], [],
SOCK_TIMEOUT_SECONDS)
if not lReady[0]: continue
try:
flags=socket.MSG_WAITALL
data = sock.recv(MAX_INFO_RESPONSE_PACKET_LENGTH, flags)
except socket.timeout:
LOG.warn(f"4 The TOR proxy {(sHost, iPort)}" \
+" didnt reply in " + str(SOCK_TIMEOUT_SECONDS) + " sec."
+" #" +str(i))
except Exception as e:
LOG.error("4 The TOR proxy " \
+repr((sHost, iPort)) \
+" errored with " + str(e)
+" #" +str(i))
sock.close()
return ''
else:
if len(data) > 0: break
if len(data) == 0:
if i > SOCK_TIMEOUT_TRIES:
sLabel = "5 No reply #"
else:
sLabel = "5 No data #"
LOG.warn(f"sTorResolve: {sLabel} {i} on {sHost}:{iPort}")
sock.close()
return ''
assert len(data) >= 8
packet_sf = data[1]
if packet_sf == 90:
# , "%d" % packet_sf
assert f"{packet_sf}" == "90", f"packet_sf = {packet_sf}"
return f"{data[4]}.{data[5]}.{data[6]}.{data[7]}"
else:
# 91
LOG.warn(f"tor-resolve failed for {target} on {sHost}:{iPort}")
os.system(f"tor-resolve -4 {target} > /tmp/e 2>/dev/null")
# os.system("strace tor-resolve -4 "+target+" 2>&1|grep '^sen\|^rec'")
return ''
def getaddrinfo(sHost:str, sPort:str) -> list:
# do this the explicit way = Ive seen the compact connect fail
# >>> sHost, sPort = 'l27.0.0.1', 33446
# >>> sock.connect((sHost, sPort))
# socket.gaierror: [Errno -2] Name or service not known
try:
lElts = socket.getaddrinfo(sHost, int(sPort), socket.AF_INET)
lElts = list(filter(lambda elt: elt[1] == socket.SOCK_DGRAM, lElts))
assert len(lElts) == 1, repr(lElts)
lPair = lElts[0][-1]
assert len(lPair) == 2, repr(lPair)
assert type(lPair[1]) == int, repr(lPair)
except (socket.gaierror, OSError, BaseException) as e:
LOG.error(e)
return None
return lPair
def icheck_torrc(sFile:str, oArgs) -> int:
l = open(sFile, 'rt').readlines()
a = {}
for elt in l:
elt = elt.strip()
if not elt or ' ' not in elt: continue
(k, v,) = elt.split(' ', 1)
a[k] = v
keys = a
if 'HashedControlPassword' not in keys:
LOG.info('Add HashedControlPassword for security')
print('run: tor --hashcontrolpassword <TopSecretWord>')
if 'ExcludeExitNodes' in keys:
elt = 'BadNodes.ExcludeExitNodes.BadExit'
LOG.warn(f"Remove ExcludeNodes and move then to {oArgs.bad_nodes}")
print(f"move to the {elt} section as a list")
if 'GuardNodes' in keys:
elt = 'GoodNodes.GuardNodes'
LOG.warn(f"Remove GuardNodes and move then to {oArgs.good_nodes}")
print(f"move to the {elt} section as a list")
if 'ExcludeNodes' in keys:
elt = 'BadNodes.ExcludeNodes.BadExit'
LOG.warn(f"Remove ExcludeNodes and move then to {oArgs.bad_nodes}")
print(f"move to the {elt} section as a list")
if 'ControlSocket' not in keys and os.path.exists('/run/tor/control'):
LOG.info('Add ControlSocket /run/tor/control for us')
print('ControlSocket /run/tor/control GroupWritable RelaxDirModeCheck')
if 'UseMicrodescriptors' not in keys or keys['UseMicrodescriptors'] != '1':
LOG.info('Add UseMicrodescriptors 0 for us')
print('UseMicrodescriptors 0')
if 'AutomapHostsSuffixes' not in keys:
LOG.info('Add AutomapHostsSuffixes for onions')
print('AutomapHostsSuffixes .exit,.onion')
if 'AutoMapHostsOnResolve' not in keys:
LOG.info('Add AutoMapHostsOnResolve for onions')
print('AutoMapHostsOnResolve 1')
if 'VirtualAddrNetworkIPv4' not in keys:
LOG.info('Add VirtualAddrNetworkIPv4 for onions')
print('VirtualAddrNetworkIPv4 172.16.0.0/12')
return 0
def lExitExcluder(oArgs, iPort:int = 9051, log_level:int = 10) -> list:
"""
https://raw.githubusercontent.com/nusenu/noContactInfo_Exit_Excluder/main/exclude_noContactInfo_Exits.py
"""
if not stem:
LOG.warn('please install the stem Python package')
return ''
LOG.debug('lExcludeExitNodes')
try:
controller = oGetStemController(log_level=log_level)
# generator
relays = controller.get_server_descriptors()
except Exception as e:
LOG.error(f'Failed to get relay descriptors {e}')
return None
if controller.is_set('ExcludeExitNodes'):
LOG.info('ExcludeExitNodes is in use already.')
return None
exit_excludelist=[]
LOG.debug("Excluded exit relays:")
for relay in relays:
if relay.exit_policy.is_exiting_allowed() and not relay.contact:
if is_valid_fingerprint(relay.fingerprint):
exit_excludelist.append(relay.fingerprint)
LOG.debug("https://metrics.torproject.org/rs.html#details/%s" % relay.fingerprint)
else:
LOG.warn('Invalid Fingerprint: %s' % relay.fingerprint)
try:
controller.set_conf('ExcludeExitNodes', exit_excludelist)
LOG.info('Excluded a total of %s exit relays without ContactInfo from the exit position.' % len(exit_excludelist))
except Exception as e:
LOG.exception('ExcludeExitNodes ' +str(e))
return exit_excludelist
if __name__ == '__main__':
target = 'duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad'
controller = oGetStemController(log_level=10)
lIntroductionPoints(controller, [target], itimeout=120)

View File

@ -1,970 +0,0 @@
# -*- mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*-
import argparse
import contextlib
import inspect
import json
import logging
import os
import re
import select
import shutil
import socket
import sys
import time
import traceback
import unittest
from ctypes import *
from random import Random
import functools
from typing import Union, Callable, Union
random = Random()
try:
import coloredlogs
if 'COLOREDLOGS_LEVEL_STYLES' not in os.environ:
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'spam=22;debug=28;verbose=34;notice=220;warning=202;success=118,bold;error=124;critical=background=red'
# https://pypi.org/project/coloredlogs/
except ImportError as e:
coloredlogs = False
try:
import stem
except ImportError as e:
stem = False
try:
import nmap
except ImportError as e:
nmap = False
import tox_wrapper
import tox_wrapper.toxcore_enums_and_consts as enums
from tox_wrapper.tests.support_http import bAreWeConnected
from tox_wrapper.tests.support_onions import (is_valid_fingerprint,
lIntroductionPoints,
oGetStemController,
sMapaddressResolv, sTorResolve)
try:
from user_data.settings import get_user_config_path
except ImportError:
get_user_config_path = None
# LOG=util.log
global LOG
LOG = logging.getLogger()
def LOG_ERROR(l:str) -> None: print('ERRORc: '+l)
def LOG_WARN(l:str) -> None: print('WARNc: ' +l)
def LOG_INFO(l:str) -> None: print('INFOc: ' +l)
def LOG_DEBUG(l:str) -> None: print('DEBUGc: '+l)
def LOG_TRACE(l:str) -> None: pass # print('TRACE+ '+l)
try:
from trepan.api import debug
from trepan.interfaces import server as Mserver
except:
# print('trepan3 TCP server NOT available.')
pass
else:
# print('trepan3 TCP server available.')
def trepan_handler(num=None, f=None):
connection_opts={'IO': 'TCP', 'PORT': 6666}
intf = Mserver.ServerInterface(connection_opts=connection_opts)
dbg_opts = {'interface': intf }
print(f'Starting TCP server listening on port 6666.')
debug(dbg_opts=dbg_opts)
return
# self._audio_thread.isAlive
iTHREAD_TIMEOUT = 1
iTHREAD_SLEEP = 1
iTHREAD_JOINS = 8
iNODES = 6
lToxSamplerates = [8000, 12000, 16000, 24000, 48000]
lToxSampleratesK = [8, 12, 16, 24, 48]
lBOOLEANS = [
'local_discovery_enabled',
'udp_enabled',
'ipv6_enabled',
'trace_enabled',
'compact_mode',
'allow_inline',
'notifications',
'sound_notifications',
'calls_sound',
'hole_punching_enabled',
'dht_announcements_enabled',
'save_history',
'download_nodes_list'
'core_logging',
]
sDIR = os.environ.get('TMPDIR', '/tmp')
sTOX_VERSION = "1000002018"
bHAVE_NMAP = shutil.which('nmap')
bHAVE_JQ = shutil.which('jq')
bHAVE_BASH = shutil.which('bash')
bHAVE_TORR = shutil.which('tor-resolve')
lDEAD_BS = [
# Failed to resolve "tox3.plastiras.org"
"tox3.plastiras.org",
'tox.kolka.tech',
# here and gone
'122-116-39-151.hinet-ip.hinet.net',
# IPs that do not reverse resolve
'49.12.229.145',
"46.101.197.175",
'114.35.245.150',
'172.93.52.70',
'195.123.208.139',
'205.185.115.131',
# IPs that do not rreverse resolve
'yggnode.cf', '188.225.9.167',
'85-143-221-42.simplecloud.ru', '85.143.221.42',
# IPs that do not ping
'104.244.74.69', 'tox.plastiras.org',
'195.123.208.139',
'gt.sot-te.ch', '32.226.5.82',
# suspicious IPs
'tox.abilinski.com', '172.103.164.250', '172.103.164.250.tpia.cipherkey.com',
]
def assert_main_thread() -> None:
from PyQt5 import QtCore, QtWidgets
from qtpy.QtWidgets import QApplication
# this "instance" method is very useful!
app_thread = QtWidgets.QApplication.instance().thread()
curr_thread = QtCore.QThread.currentThread()
if app_thread != curr_thread:
raise RuntimeError('attempt to call MainWindow.append_message from non-app thread')
@contextlib.contextmanager
def ignoreStdout() -> None:
devnull = os.open(os.devnull, os.O_WRONLY)
old_stdout = os.dup(1)
sys.stdout.flush()
os.dup2(devnull, 1)
os.close(devnull)
try:
yield
finally:
os.dup2(old_stdout, 1)
os.close(old_stdout)
@contextlib.contextmanager
def ignoreStderr() -> None:
devnull = os.open(os.devnull, os.O_WRONLY)
old_stderr = os.dup(2)
sys.stderr.flush()
os.dup2(devnull, 2)
os.close(devnull)
try:
yield
finally:
os.dup2(old_stderr, 2)
os.close(old_stderr)
def clean_booleans(oArgs) -> None:
for key in lBOOLEANS:
if not hasattr(oArgs, key): continue
val = getattr(oArgs, key)
if type(val) == bool: continue
if val in ['False', 'false', '0']:
setattr(oArgs, key, False)
else:
setattr(oArgs, key, True)
def on_log(iTox, level, filename, line, func, message, *data) -> None:
# LOG.debug(repr((level, filename, line, func, message,)))
tox_log_cb(level, filename, line, func, message)
def tox_log_cb(level, filename, line, func, message, *args) -> None:
"""
* @param level The severity of the log message.
* @param filename The source file from which the message originated.
* @param line The source line from which the message originated.
* @param func The function from which the message originated.
* @param message The log message.
* @param user_data The user data pointer passed to tox_new in options.
"""
if type(func) == bytes:
func = str(func, 'utf-8')
message = str(message, 'UTF-8')
filename = str(filename, 'UTF-8')
if filename == 'network.c':
if line == 660: return
# root WARNING 3network.c#944:b'send_packet'attempted to send message with network family 10 (probably IPv6) on IPv4 socket
if line == 944: return
i = message.find('07 = GET_NODES')
if i > 0:
return
if filename == 'TCP_common.c': return
i = message.find(' | ')
if i > 0:
message = message[:i]
# message = filename +'#' +str(line) +':'+func +' '+message
name = 'core'
# old level is meaningless
level = 10 # LOG.level
# LOG._log(LOG.level, f"{level}: {message}", list())
i = message.find('(0: OK)')
if i > 0:
level = 10 # LOG.debug
else:
i = message.find('(1: ')
if i > 0:
level = 30 # LOG.warn
else:
level = 20 # LOG.info
o = LOG.makeRecord(filename, level, func, line, message, list(), None)
# LOG.handle(o)
LOG_TRACE(f"{level}: {func}{line} {message}")
return
elif level == 1:
LOG.critical(f"{level}: {message}")
elif level == 2:
LOG.error(f"{level}: {message}")
elif level == 3:
LOG.warn(f"{level}: {message}")
elif level == 4:
LOG.info(f"{level}: {message}")
elif level == 5:
LOG.debug(f"{level}: {message}")
else:
LOG_TRACE(f"{level}: {message}")
def vAddLoggerCallback(tox_options, callback=None) -> None:
if callback is None:
tox_wrapper.tox.Tox.libtoxcore.tox_options_set_log_callback(
tox_options._options_pointer,
POINTER(None)())
tox_options.self_logger_cb = None
return
c_callback = CFUNCTYPE(None, c_void_p, c_int, c_char_p, c_int, c_char_p, c_char_p, c_void_p)
tox_options.self_logger_cb = c_callback(callback)
tox_wrapper.tox.Tox.libtoxcore.tox_options_set_log_callback(
tox_options._options_pointer,
tox_options.self_logger_cb)
def get_video_indexes() -> list:
# Linux
return [str(l[5:]) for l in os.listdir('/dev/') if l.startswith('video')]
def get_audio():
with ignoreStderr():
import pyaudio
oPyA = pyaudio.PyAudio()
input_devices = output_devices = 0
for i in range(oPyA.get_device_count()):
device = oPyA.get_device_info_by_index(i)
if device["maxInputChannels"]:
input_devices += 1
if device["maxOutputChannels"]:
output_devices += 1
# {'index': 21, 'structVersion': 2, 'name': 'default', 'hostApi': 0, 'maxInputChannels': 64, 'maxOutputChannels': 64, 'defaultLowInputLatency': 0.008707482993197279, 'defaultLowOutputLatency': 0.008707482993197279, 'defaultHighInputLatency': 0.034829931972789115, 'defaultHighOutputLatency': 0.034829931972789115, 'defaultSampleRate': 44100.0}
audio = {'input': oPyA.get_default_input_device_info()['index'] if input_devices else -1,
'output': oPyA.get_default_output_device_info()['index'] if output_devices else -1,
'enabled': input_devices and output_devices}
return audio
def oToxygenToxOptions(oArgs):
data = None
tox_options = tox_wrapper.tox.Tox.options_new()
if oArgs.proxy_type:
tox_options.contents.proxy_type = int(oArgs.proxy_type)
tox_options.contents.proxy_host = bytes(oArgs.proxy_host, 'UTF-8')
tox_options.contents.proxy_port = int(oArgs.proxy_port)
tox_options.contents.udp_enabled = False
else:
tox_options.contents.udp_enabled = oArgs.udp_enabled
if not os.path.exists('/proc/sys/net/ipv6'):
oArgs.ipv6_enabled = False
else:
tox_options.contents.ipv6_enabled = oArgs.ipv6_enabled
tox_options.contents.tcp_port = int(oArgs.tcp_port)
tox_options.contents.dht_announcements_enabled = oArgs.dht_announcements_enabled
tox_options.contents.hole_punching_enabled = oArgs.hole_punching_enabled
# overrides
tox_options.contents.local_discovery_enabled = False
tox_options.contents.experimental_thread_safety = False
# REQUIRED!!
if oArgs.ipv6_enabled and not os.path.exists('/proc/sys/net/ipv6'):
LOG.warning('Disabling IPV6 because /proc/sys/net/ipv6 does not exist' + repr(oArgs.ipv6_enabled))
tox_options.contents.ipv6_enabled = False
else:
tox_options.contents.ipv6_enabled = bool(oArgs.ipv6_enabled)
if data: # load existing profile
tox_options.contents.savedata_type = enums.TOX_SAVEDATA_TYPE['TOX_SAVE']
tox_options.contents.savedata_data = c_char_p(data)
tox_options.contents.savedata_length = len(data)
else: # create new profile
tox_options.contents.savedata_type = enums.TOX_SAVEDATA_TYPE['NONE']
tox_options.contents.savedata_data = None
tox_options.contents.savedata_length = 0
#? tox_options.contents.log_callback = LOG
if tox_options._options_pointer:
# LOG.debug("Adding logging to tox_options._options_pointer ")
vAddLoggerCallback(tox_options, on_log)
else:
LOG.warning("No tox_options._options_pointer " +repr(tox_options._options_pointer))
return tox_options
def oMainArgparser(_=None, iMode=0):
# 'Mode: 0=chat 1=chat+audio 2=chat+audio+video default: 0'
if not os.path.exists('/proc/sys/net/ipv6'):
bIpV6 = 'False'
else:
bIpV6 = 'True'
lIpV6Choices=[bIpV6, 'False']
sNodesJson = os.path.join(os.environ['HOME'], '.config', 'tox', 'DHTnodes.json')
if not os.path.exists(sNodesJson): sNodesJson = ''
logfile = os.path.join(os.environ.get('TMPDIR', '/tmp'), 'toxygen.log')
if not os.path.exists(logfile): logfile = ''
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--proxy_host', '--proxy-host', type=str,
# oddball - we want to use '' as a setting
default='0.0.0.0',
help='proxy host')
parser.add_argument('--proxy_port', '--proxy-port', default=0, type=int,
help='proxy port')
parser.add_argument('--proxy_type', '--proxy-type', default=0, type=int,
choices=[0,1,2],
help='proxy type 1=http, 2=socks')
parser.add_argument('--tcp_port', '--tcp-port', default=0, type=int,
help='tcp port')
parser.add_argument('--udp_enabled', type=str, default='True',
choices=['True', 'False'],
help='En/Disable udp')
parser.add_argument('--ipv6_enabled', type=str, default=bIpV6,
choices=lIpV6Choices,
help=f"En/Disable ipv6 - default {bIpV6}")
parser.add_argument('--trace_enabled',type=str,
default='True' if os.environ.get('DEBUG') else 'False',
choices=['True','False'],
help='Debugging from toxcore logger_trace or env DEBUG=1')
parser.add_argument('--download_nodes_list', type=str, default='False',
choices=['True', 'False'],
help='Download nodes list')
parser.add_argument('--nodes_json', type=str,
default=sNodesJson)
parser.add_argument('--network', type=str,
choices=['main', 'local'],
default='main')
parser.add_argument('--download_nodes_url', type=str,
default='https://nodes.tox.chat/json')
parser.add_argument('--logfile', default=logfile,
help='Filename for logging - start with + for stdout too')
parser.add_argument('--loglevel', default=logging.INFO, type=int,
# choices=[logging.info,logging.trace,logging.debug,logging.error]
help='Threshold for logging (lower is more) default: 20')
parser.add_argument('--mode', type=int, default=iMode,
choices=[0,1,2],
help='Mode: 0=chat 1=chat+audio 2=chat+audio+video default: 0')
parser.add_argument('--hole_punching_enabled',type=str,
default='False', choices=['True','False'],
help='En/Enable hole punching')
parser.add_argument('--dht_announcements_enabled',type=str,
default='True', choices=['True','False'],
help='En/Disable DHT announcements')
# argparse.ArgumentError: argument --save_history: conflicting option string: --save_history
# parser.add_argument('--save_history', type=str, default='True',
# choices=['True', 'False'],
# help='En/Disable saving history')
return parser
def vSetupLogging(oArgs) -> None:
global LOG
logging._defaultFormatter = logging.Formatter(datefmt='%m-%d %H:%M:%S')
logging._defaultFormatter.default_time_format = '%m-%d %H:%M:%S'
logging._defaultFormatter.default_msec_format = ''
add = None
kwargs = dict(level=oArgs.loglevel,
format='%(levelname)-8s %(message)s')
if oArgs.logfile:
add = oArgs.logfile.startswith('+')
sub = oArgs.logfile.startswith('-')
if add or sub:
oArgs.logfile = oArgs.logfile[1:]
kwargs['filename'] = oArgs.logfile
if coloredlogs:
# https://pypi.org/project/coloredlogs/
aKw = dict(level=oArgs.loglevel,
logger=LOG,
stream=sys.stdout,
fmt='%(name)s %(levelname)s %(message)s'
)
coloredlogs.install(**aKw)
if oArgs.logfile:
oHandler = logging.FileHandler(oArgs.logfile)
LOG.addHandler(oHandler)
else:
logging.basicConfig(**kwargs)
if add:
oHandler = logging.StreamHandler(sys.stdout)
LOG.addHandler(oHandler)
LOG.info(f"Setting loglevel to {oArgs.loglevel!s}")
def setup_logging(oArgs) -> None:
global LOG
if coloredlogs:
aKw = dict(level=oArgs.loglevel,
logger=LOG,
fmt='%(name)s %(levelname)s %(message)s')
if oArgs.logfile:
oFd = open(oArgs.logfile, 'wt')
setattr(oArgs, 'log_oFd', oFd)
aKw['stream'] = oFd
coloredlogs.install(**aKw)
if oArgs.logfile:
oHandler = logging.StreamHandler(stream=sys.stdout)
LOG.addHandler(oHandler)
else:
aKw = dict(level=oArgs.loglevel,
format='%(name)s %(levelname)-4s %(message)s')
if oArgs.logfile:
aKw['filename'] = oArgs.logfile
logging.basicConfig(**aKw)
logging._defaultFormatter = logging.Formatter(datefmt='%m-%d %H:%M:%S')
logging._defaultFormatter.default_time_format = '%m-%d %H:%M:%S'
logging._defaultFormatter.default_msec_format = ''
LOG.setLevel(oArgs.loglevel)
# LOG.trace = lambda l: LOG.log(0, repr(l))
LOG.info(f"Setting loglevel to {oArgs.loglevel!s}")
def signal_handler(num, f) -> None:
from trepan.api import debug
from trepan.interfaces import server as Mserver
connection_opts={'IO': 'TCP', 'PORT': 6666}
intf = Mserver.ServerInterface(connection_opts=connection_opts)
dbg_opts = {'interface': intf}
LOG.info('Starting TCP server listening on port 6666.')
debug(dbg_opts=dbg_opts)
return
def merge_args_into_settings(args:list, settings:dict) -> None:
if args:
if not hasattr(args, 'audio'):
LOG.warn('No audio ' +repr(args))
settings['audio'] = getattr(args, 'audio')
if not hasattr(args, 'video'):
LOG.warn('No video ' +repr(args))
settings['video'] = getattr(args, 'video')
for key in settings.keys():
# proxy_type proxy_port proxy_host
not_key = 'not_' +key
if hasattr(args, key):
val = getattr(args, key)
if type(val) == bytes:
# proxy_host - ascii?
# filenames - ascii?
val = str(val, 'UTF-8')
settings[key] = val
elif hasattr(args, not_key):
val = not getattr(args, not_key)
settings[key] = val
clean_settings(settings)
return
def clean_settings(self:dict) -> None:
# failsafe to ensure C tox is bytes and Py settings is str
# overrides
self['mirror_mode'] = False
self['save_history'] = True
# REQUIRED!!
if not os.path.exists('/proc/sys/net/ipv6'):
LOG.warn('Disabling IPV6 because /proc/sys/net/ipv6 does not exist')
self['ipv6_enabled'] = False
if 'proxy_type' in self and self['proxy_type'] == 0:
self['proxy_host'] = ''
self['proxy_port'] = 0
if 'proxy_type' in self and self['proxy_type'] != 0 and \
'proxy_host' in self and self['proxy_host'] != '' and \
'proxy_port' in self and self['proxy_port'] != 0:
if 'udp_enabled' in self and self['udp_enabled']:
# We don't currently support UDP over proxy.
LOG.info("UDP enabled and proxy set: disabling UDP")
self['udp_enabled'] = False
if 'local_discovery_enabled' in self and self['local_discovery_enabled']:
LOG.info("local_discovery_enabled enabled and proxy set: disabling local_discovery_enabled")
self['local_discovery_enabled'] = False
if 'dht_announcements_enabled' in self and self['dht_announcements_enabled']:
LOG.info("dht_announcements_enabled enabled and proxy set: disabling dht_announcements_enabled")
self['dht_announcements_enabled'] = False
if 'auto_accept_path' in self and \
type(self['auto_accept_path']) == bytes:
self['auto_accept_path'] = str(self['auto_accept_path'], 'UTF-8')
LOG.debug("Cleaned settings")
def lSdSamplerates(iDev:int) -> list:
try:
import sounddevice as sd
except ImportError:
return []
samplerates = (32000, 44100, 48000, 96000, )
device = iDev
supported_samplerates = []
for fs in samplerates:
try:
sd.check_output_settings(device=device, samplerate=fs)
except Exception as e:
# LOG.debug(f"Sample rate not supported {fs}" +' '+str(e))
pass
else:
supported_samplerates.append(fs)
return supported_samplerates
def _get_nodes_path(oArgs:str):
if oArgs and hasattr(oArgs, 'nodes_json') and \
oArgs.nodes_json and os.path.isfile(oArgs.nodes_json):
default = oArgs.nodes_json
elif get_user_config_path:
default = os.path.join(get_user_config_path(), 'toxygen_nodes.json')
else:
# Windwoes
default = os.path.join(os.getenv('HOME'), '.config', 'tox', 'toxygen_nodes.json')
LOG.debug("_get_nodes_path: " +default)
return default
DEFAULT_NODES_COUNT = 8
global aNODES
aNODES = {}
# @functools.lru_cache(maxsize=12) TypeError: unhashable type: 'Namespace'
def generate_nodes(oArgs=None,
nodes_count:int = DEFAULT_NODES_COUNT,
ipv:str = 'ipv4',
udp_not_tcp=True) -> dict:
global aNODES
sKey = ipv
sKey += ',0' if udp_not_tcp else ',1'
if sKey in aNODES and aNODES[sKey]:
return aNODES[sKey]
sFile = _get_nodes_path(oArgs)
assert os.path.exists(sFile), sFile
lNodes = generate_nodes_from_file(sFile,
nodes_count=nodes_count,
ipv=ipv,
udp_not_tcp=udp_not_tcp)
assert lNodes
aNODES[sKey] = lNodes
return aNODES[sKey]
aNODES_CACHE = {}
def generate_nodes_from_file(sFile:str,
nodes_count:int = DEFAULT_NODES_COUNT,
ipv:str = 'ipv4',
udp_not_tcp:bool = True,
) -> dict:
"""https://github.com/TokTok/c-toxcore/issues/469
I had a conversation with @irungentoo on IRC about whether we really need to call tox_bootstrap() when having UDP disabled and why. The answer is yes, because in addition to TCP relays (tox_add_tcp_relay()), toxcore also needs to know addresses of UDP onion nodes in order to work correctly. The DHT, however, is not used when UDP is disabled. tox_bootstrap() function resolves the address passed to it as argument and calls onion_add_bs_node_path() and DHT_bootstrap() functions. Although calling DHT_bootstrap() is not really necessary as DHT is not used, we still need to resolve the address of the DHT node in order to populate the onion routes with onion_add_bs_node_path() call.
"""
global aNODES_CACHE
key = ipv
key += ',0' if udp_not_tcp else ',1'
if key in aNODES_CACHE:
sorted_nodes = aNODES_CACHE[key]
else:
if not os.path.exists(sFile):
LOG.error("generate_nodes_from_file file not found " +sFile)
return []
try:
with open(sFile, 'rt') as fl:
json_nodes = json.loads(fl.read())['nodes']
except Exception as e:
LOG.error(f"generate_nodes_from_file error {sFile}\n{e}")
return []
else:
LOG.debug("generate_nodes_from_file " +sFile)
if udp_not_tcp:
nodes = [(node[ipv], node['port'], node['public_key'],) for
node in json_nodes if node[ipv] != 'NONE' \
and node["status_udp"] in [True, "true"]
]
else:
nodes = []
elts = [(node[ipv], node['tcp_ports'], node['public_key'],) \
for node in json_nodes if node[ipv] != 'NONE' \
and node["status_tcp"] in [True, "true"]
]
for (ipv, ports, public_key,) in elts:
for port in ports:
nodes += [(ipv, port, public_key)]
if not nodes:
LOG.warn(f'empty generate_nodes from {sFile} {json_nodes!r}')
return []
sorted_nodes = nodes
aNODES_CACHE[key] = sorted_nodes
random.shuffle(sorted_nodes)
if nodes_count is not None and len(sorted_nodes) > nodes_count:
sorted_nodes = sorted_nodes[-nodes_count:]
LOG.debug(f"generate_nodes_from_file {sFile} len={len(sorted_nodes)}")
return sorted_nodes
def tox_bootstrapd_port() -> int:
port = 33446
sFile = '/etc/tox-bootstrapd.conf'
if os.path.exists(sFile):
with open(sFile, 'rt') as oFd:
for line in oFd.readlines():
if line.startswith('port = '):
port = int(line[7:])
return port
def bootstrap_local(elts:list, lToxes:list, oArgs=None):
if os.path.exists('/run/tox-bootstrapd/tox-bootstrapd.pid'):
LOG.debug('/run/tox-bootstrapd/tox-bootstrapd.pid')
iRet = True
else:
iRet = os.system("netstat -nle4|grep -q :33")
if iRet > 0:
LOG.warn(f'bootstraping local No local DHT running')
LOG.info(f'bootstraping local')
return bootstrap_udp(elts, lToxes, oArgs)
def lDNSClean(l:list) -> list:
global lDEAD_BS
# list(set(l).difference(set(lDEAD_BS)))
return [elt for elt in l if elt not in lDEAD_BS]
def lExitExcluder(oArgs, iPort:int =9051) -> list:
"""
https://raw.githubusercontent.com/nusenu/noContactInfo_Exit_Excluder/main/exclude_noContactInfo_Exits.py
"""
if not stem:
LOG.warn('please install the stem Python package')
return ''
LOG.debug('lExcludeExitNodes')
try:
controller = oGetStemController(log_level=10)
# generator
relays = controller.get_server_descriptors()
except Exception as e:
LOG.error(f'Failed to get relay descriptors {e}')
return None
if controller.is_set('ExcludeExitNodes'):
LOG.info('ExcludeExitNodes is in use already.')
return None
exit_excludelist=[]
LOG.debug("Excluded exit relays:")
for relay in relays:
if relay.exit_policy.is_exiting_allowed() and not relay.contact:
if is_valid_fingerprint(relay.fingerprint):
exit_excludelist.append(relay.fingerprint)
LOG.debug("https://metrics.torproject.org/rs.html#details/%s" % relay.fingerprint)
else:
LOG.warn('Invalid Fingerprint: %s' % relay.fingerprint)
try:
controller.set_conf('ExcludeExitNodes', exit_excludelist)
LOG.info('Excluded a total of %s exit relays without ContactInfo from the exit position.' % len(exit_excludelist))
except Exception as e:
LOG.exception('ExcludeExitNodes ' +str(e))
return exit_excludelist
aHOSTS = {}
@functools.lru_cache(maxsize=20)
def sDNSLookup(host:str) -> str:
global aHOSTS
ipv = 0
if host in lDEAD_BS:
# LOG.warn(f"address skipped because in lDEAD_BS {host}")
return ''
if host in aHOSTS:
return aHOSTS[host]
try:
s = host.replace('.','')
int(s)
ipv = 4
except:
try:
s = host.replace(':','')
int(s)
ipv = 6
except: pass
if ipv > 0:
# LOG.debug(f"v={ipv} IP address {host}")
return host
LOG.debug(f"sDNSLookup {host}")
ip = ''
if host.endswith('.tox') or host.endswith('.onion'):
if False and stem:
ip = sMapaddressResolv(host)
if ip: return ip
ip = sTorResolve(host)
if ip: return ip
if not bHAVE_TORR:
LOG.warn(f"onion address skipped because no tor-resolve {host}")
return ''
try:
sout = f"/tmp/TR{os.getpid()}.log"
i = os.system(f"tor-resolve -4 {host} > {sout}")
if not i:
LOG.warn(f"onion address skipped because tor-resolve on {host}")
return ''
ip = open(sout, 'rt').read()
if ip.endswith('failed.'):
LOG.warn(f"onion address skipped because tor-resolve failed on {host}")
return ''
LOG.debug(f"onion address tor-resolve {ip} on {host}")
return ip
except:
pass
else:
try:
ip = socket.gethostbyname(host)
LOG.debug(f"host={host} gethostbyname IP address {ip}")
if ip:
aHOSTS[host] = ip
return ip
# drop through
except:
# drop through
pass
if ip == '':
try:
sout = f"/tmp/TR{os.getpid()}.log"
i = os.system(f"dig {host} +timeout=15|grep ^{host}|sed -e 's/.* //'> {sout}")
if not i:
LOG.warn(f"address skipped because dig failed on {host}")
return ''
ip = open(sout, 'rt').read().strip()
LOG.debug(f"address dig {ip} on {host}")
aHOSTS[host] = ip
return ip
except:
ip = host
LOG.debug(f'sDNSLookup {host} -> {ip}')
if ip and ip != host:
aHOSTS[host] = ip
return ip
def bootstrap_udp(lelts:list, lToxes:list, oArgs=None) -> None:
lelts = lDNSClean(lelts)
socket.setdefaulttimeout(15.0)
for oTox in lToxes:
random.shuffle(lelts)
if hasattr(oTox, 'oArgs'):
oArgs = oTox.oArgs
if hasattr(oArgs, 'contents') and oArgs.contents.proxy_type != 0:
lelts = lelts[:1]
# LOG.debug(f'bootstrap_udp DHT bootstraping {oTox.name} {len(lelts)}')
for largs in lelts:
assert len(largs) == 3
host, port, key = largs
assert host; assert port; assert key
if host in lDEAD_BS: continue
ip = sDNSLookup(host)
if not ip:
LOG.warn(f'bootstrap_udp to host={host} port={port} did not resolve ip={ip}')
continue
if type(port) == str:
port = int(port)
try:
assert len(key) == 64, key
# NOT ip
oRet = oTox.bootstrap(host,
port,
key)
except Exception as e:
if oArgs is None or (
hasattr(oArgs, 'contents') and oArgs.contents.proxy_type == 0):
pass
# LOG.error(f'bootstrap_udp failed to host={host} port={port} {e}')
continue
if not oRet:
LOG.warn(f'bootstrap_udp failed to {host} : {oRet}')
elif oTox.self_get_connection_status() != enums.TOX_CONNECTION['NONE']:
LOG.info(f'bootstrap_udp to {host} connected')
break
else:
# LOG.debug(f'bootstrap_udp to {host} not connected')
pass
def bootstrap_tcp(lelts:list, lToxes:list, oArgs=None) -> None:
lelts = lDNSClean(lelts)
for oTox in lToxes:
if hasattr(oTox, 'oArgs'): oArgs = oTox.oArgs
random.shuffle(lelts)
# LOG.debug(f'bootstrap_tcp bootstapping {oTox.name} {len(lelts)}')
for (host, port, key,) in lelts:
assert host; assert port;assert key
if host in lDEAD_BS: continue
ip = sDNSLookup(host)
if not ip:
LOG.warn(f'bootstrap_tcp to {host} did not resolve ip={ip}')
# continue
ip = host
if host.endswith('.onion') and stem:
l = lIntroductionPoints(host)
if not l:
LOG.warn(f'bootstrap_tcp to {host} has no introduction points')
continue
if type(port) == str:
port = int(port)
try:
assert len(key) == 64, key
oRet = oTox.add_tcp_relay(ip,
port,
key)
except Exception as e:
# The address could not be resolved to an IP address, or the IP address passed was invalid.
LOG.warn(f'bootstrap_tcp to {host} : ' +str(e))
continue
if not oRet:
LOG.warn(f'bootstrap_tcp failed to {host} : {oRet}')
elif hasattr(oTox, 'mycon_time') and oTox.mycon_time == 1:
LOG.debug(f'bootstrap_tcp to {host} not yet connected')
elif hasattr(oTox, 'mycon_status') and oTox.mycon_status is False:
LOG.debug(f'bootstrap_tcp to {host} not True')
elif oTox.self_get_connection_status() != enums.TOX_CONNECTION['NONE']:
LOG.info(f'bootstrap_tcp to {host} connected')
break
else:
# LOG.debug(f'bootstrap_tcp to {host} but not connected'
# +f" last={int(oTox.mycon_time)}" )
pass
def iNmapInfoNmap(sProt:str, sHost:str, sPort:str, key=None, environ=None, cmd:str = '') -> int:
if sHost in ['-', 'NONE']: return 0
if not nmap: return 0
nmps = nmap.PortScanner
if sProt in ['socks', 'socks5', 'tcp4']:
prot = 'tcp'
cmd = f" -Pn -n -sT -p T:{sPort}"
else:
prot = 'udp'
cmd = f" -Pn -n -sU -p U:{sPort}"
LOG.debug(f"iNmapInfoNmap cmd={cmd}")
sys.stdout.flush()
o = nmps().scan(hosts=sHost, arguments=cmd)
aScan = o['scan']
ip = list(aScan.keys())[0]
state = aScan[ip][prot][sPort]['state']
LOG.info(f"iNmapInfoNmap: to {sHost} {state}")
return 0
def iNmapInfo(sProt:str, sHost:str, sPort:str, key=None, environ=None, cmd:str = 'nmap'):
if sHost in ['-', 'NONE']: return 0
sFile = os.path.join("/tmp", f"{sHost}.{os.getpid()}.nmap")
if sProt in ['socks', 'socks5', 'tcp4']:
cmd += f" -Pn -n -sT -p T:{sPort} {sHost} | grep /tcp "
else:
cmd += f" -Pn -n -sU -p U:{sPort} {sHost} | grep /udp "
LOG.debug(f"iNmapInfo cmd={cmd}")
sys.stdout.flush()
iRet = os.system(cmd +f" >{sFile} 2>&1 ")
LOG.debug(f"iNmapInfo cmd={cmd} iRet={iRet}")
if iRet != 0:
return iRet
assert os.path.exists(sFile), sFile
with open(sFile, 'rt') as oFd:
l = oFd.readlines()
assert len(l)
l = [line for line in l if line and not line.startswith('WARNING:')]
s = '\n'.join([s.strip() for s in l])
LOG.info(f"iNmapInfo: to {sHost}\n{s}")
return 0
# bootstrap_iNmapInfo(lElts, self._args, sProt)
def bootstrap_iNmapInfo(lElts:list, oArgs, protocol:str = "tcp4", bIS_LOCAL:bool = False, iNODES:int = iNODES, cmd:str = 'nmap') -> bool:
if not bIS_LOCAL and not bAreWeConnected():
LOG.warn(f"bootstrap_iNmapInfo not local and NOT CONNECTED")
return True
if os.environ['USER'] != 'root':
LOG.warn(f"bootstrap_iNmapInfo not ROOT USER={os.environ['USER']}")
cmd = 'sudo ' +cmd
lRetval = []
LOG.info(f"bootstrap_iNmapInfo testing nmap={nmap} len={len(lElts[:iNODES])}")
for elts in lElts[:iNODES]:
host, port, key = elts
ip = sDNSLookup(host)
if not ip:
LOG.info(f"bootstrap_iNmapInfo to {host} did not resolve ip={ip}")
continue
if type(port) == str:
port = int(port)
iRet = -1
try:
if not nmap:
iRet = iNmapInfo(protocol, ip, port, key, cmd=cmd)
else:
iRet = iNmapInfoNmap(protocol, ip, port, key)
if iRet != 0:
LOG.warn('iNmapInfo to ' +repr(host) +' retval=' +str(iRet))
lRetval += [False]
else:
LOG.info('iNmapInfo to ' +repr(host) +' retval=' +str(iRet))
lRetval += [True]
except Exception as e:
LOG.exception('iNmapInfo to {host} : ' +str(e)
)
lRetval += [False]
return any(lRetval)
def caseFactory(cases:list) -> list:
"""We want the tests run in order."""
if len(cases) > 1:
ordered_cases = sorted(cases, key=lambda f: inspect.findsource(f)[1])
else:
ordered_cases = cases
return ordered_cases
def suiteFactory(*testcases):
"""We want the tests run in order."""
linen = lambda f: getattr(tc, f).__code__.co_firstlineno
lncmp = lambda a, b: linen(a) - linen(b)
test_suite = unittest.TestSuite()
for tc in testcases:
test_suite.addTest(unittest.makeSuite(tc, sortUsing=lncmp))
return test_suite

File diff suppressed because it is too large Load Diff