From dd8e2292525b168000101265bac6f37db1725f7b Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:31:35 -0500 Subject: [PATCH 01/17] Delete 000-default-conf --- 000-default-conf | 72 ------------------------------------------------ 1 file changed, 72 deletions(-) delete mode 100644 000-default-conf diff --git a/000-default-conf b/000-default-conf deleted file mode 100644 index b102f1b..0000000 --- a/000-default-conf +++ /dev/null @@ -1,72 +0,0 @@ - - # The ServerName directive sets the request scheme, hostname and port that - # the server uses to identify itself. This is used when creating - # redirection URLs. In the context of virtual hosts, the ServerName - # specifies what hostname must appear in the request's Host: header to - # match this virtual host. For the default virtual host (this file) this - # value is not decisive as it is used as a last resort host regardless. - # However, you must set it for any further virtual host explicitly. - #ServerName www.example.com - - ServerAdmin webmaster@localhost - DocumentRoot /var/www/html - - # Available loglevels: trace8, ..., trace1, debug, info, notice, warn, - # error, crit, alert, emerg. - # It is also possible to configure the loglevel for particular - # modules, e.g. - #LogLevel info ssl:warn - - ErrorLog ${APACHE_LOG_DIR}/error.log - CustomLog ${APACHE_LOG_DIR}/access.log combined - - # For most configuration files from conf-available/, which are - # enabled or disabled at a global level, it is possible to - # include a line for only one particular virtual host. For example the - # following line enables the CGI configuration for this host only - # after it has been globally disabled with "a2disconf". - #Include conf-available/serve-cgi-bin.conf - - -# Run the Django app as the clearinghouse user -WSGIDaemonProcess chdjango user=ch processes=5 threads=10 -WSGIProcessGroup chdjango - -# HTTP - - # Redirect requests for the server index page or that are - # clearinghouse-related to the HTTPS site. - RedirectMatch ^/$ https://10.0.2.15/ch/html/login - RedirectMatch ^/ch https://10.0.2.15/ch/html/login - - -# SSL - - ServerAdmin webmaster@localhost - - # Enable SSL - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/server.crt - SSLCertificateKeyFile /etc/apache2/ssl/server.key - # You can add intermediate certificates here. - - # Point Apache to the clearinghouse's static images/CSS/JavaScript - Alias /site_media /home/ch/deployment/clearinghouse/website/html/media - - Require all granted - - - # XXX We should configure the Django admin page static files too! - # XXX See https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/modwsgi/ - - # Point the URL https://mysite/ch to the Django app - WSGIScriptAlias /ch /home/ch/deployment/clearinghouse/wsgi/wsgi.py - - - - Require all granted - - - - -# vim: syntax=apache ts=4 sw=4 sts=4 sr noet From 1a75c8f00866148f0c39053d22ab17f5c7a37e56 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:32:05 -0500 Subject: [PATCH 02/17] Delete apache.conf --- apache.conf | 41 ----------------------------------------- 1 file changed, 41 deletions(-) delete mode 100644 apache.conf diff --git a/apache.conf b/apache.conf deleted file mode 100644 index a741592..0000000 --- a/apache.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Run the Django app as the clearinghouse user -WSGIDaemonProcess chdjango user=abhishek processes=5 threads=10 -WSGIProcessGroup chdjango - -# HTTP - - # Redirect requests for the server index page or that are - # clearinghouse-related to the HTTPS site. - RedirectMatch ^/$ https://abhi/abhishek/html/login - RedirectMatch ^/ch https://abhi/abhishek/html/login - - -# SSL - - ServerAdmin webmaster@localhost - - # Enable SSL - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/server.crt - SSLCertificateKeyFile /etc/apache2/ssl/server.key - # You can add intermediate certificates here. - - # Point Apache to the clearinghouse's static images/CSS/JavaScript - Alias /site_media /home/abhishek/deployment/clearinghouse/website/html/media - - Require all granted - - - # XXX We should configure the Django admin page static files too! - # XXX See https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/modwsgi/ - - # Point the URL https://abhi/abhishek to the Django app - WSGIScriptAlias /abhishek /home/abhishek/deployment/clearinghouse/wsgi/wsgi.py - - - - Require all granted - - - - From a1a75ec12f37217ceaca3c496b767ae7662151ed Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:32:16 -0500 Subject: [PATCH 03/17] Delete client_connect.r2py --- client_connect.r2py | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 client_connect.r2py diff --git a/client_connect.r2py b/client_connect.r2py deleted file mode 100644 index 07bce5d..0000000 --- a/client_connect.r2py +++ /dev/null @@ -1,5 +0,0 @@ -if callfunc == 'initialize': - myip = getmyip() - myport = 12346 - client_obj = openconnection('10.0.0.2',12345,myip,myport,10) - log(client_obj) From 67edec161f5c728160788ab0cf3fa193bdca6916 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:32:27 -0500 Subject: [PATCH 04/17] Delete emulcomm.py --- emulcomm.py | 2246 --------------------------------------------------- 1 file changed, 2246 deletions(-) delete mode 100644 emulcomm.py diff --git a/emulcomm.py b/emulcomm.py deleted file mode 100644 index cf4a664..0000000 --- a/emulcomm.py +++ /dev/null @@ -1,2246 +0,0 @@ -""" - Author: Justin Cappos, Armon Dadgar - - Start Date: 27 June 2008 - - Description: - - This is a collection of communications routines that provide a programmer - with a reasonable environment. This is used by repy.py to provide a - highly restricted (but usable) environment. -""" - -import socket - -# Armon: Used to check if a socket is ready -import select - -# socket uses getattr and setattr. We need to make these available to it... -socket.getattr = getattr -socket.setattr = setattr - - -# needed to set threads for recvmess and waitforconn -import threading -# threading in python2.7 uses hasattr. It needs to be made available. -threading.hasattr = hasattr - - -# So I can exit all threads when an error occurs or do select -import harshexit - -# Needed for finding out info about sockets, available interfaces, etc -import nonportable - -# So I can print a clean traceback when an error happens -import tracebackrepy - -# accounting -# id(sock) will be used to register and unregister sockets with nanny -import nanny - -# give me uniqueIDs for the comminfo table -import idhelper - -# for sleep -import time - -# Armon: Used for decoding the error messages -import errno - -# Armon: Used for getting the constant IP values for resolving our external IP -import repy_constants - -# Get the exceptions -from exception_hierarchy import * - -###### Module Data - -# This is a library of all currently bound sockets. Since multiple -# UDP bindings on a single port is hairy, we store bound sockets -# here, and use them for both sending and receiving if they are -# available. This feels slightly byzantine, but it allows us to -# avoid modifying the repy API. -# -# Format of entries is as follows: -# Key - 3-tuple of ("UDP", IP, Port) -# Val - Bound socket object -_BOUND_SOCKETS = {} # Ticket = 1015 (Resolved) - -# If we have a preference for an IP/Interface this flag is set to True -user_ip_interface_preferences = False - -# Do we allow non-specified IPs -allow_nonspecified_ips = True - -# Armon: Specified the list of allowed IP and Interfaces in order of their preference -# The basic structure is list of tuples (IP, Value), IP is True if its an IP, False if its an interface -user_specified_ip_interface_list = [] - -# This list caches the allowed IP's -# It is updated at the launch of repy, or by calls to getmyip and update_ip_cache -# NOTE: The loopback address 127.0.0.1 is always permitted. update_ip_cache will always add this -# if it is not specified explicitly by the user -allowediplist = [] -cachelock = threading.Lock() # This allows only a single simultaneous cache update - - -##### Internal Functions - -# Determines if a specified IP address is allowed in the context of user settings -def _ip_is_allowed(ip): - """ - - Determines if a given IP is allowed, by checking against the cached allowed IP's. - - - ip: The IP address to search for. - - - True, if allowed. False, otherwise. - """ - global allowediplist - global user_ip_interface_preferences - global allow_nonspecified_ips - - # If there is no preference, anything goes - # same with allow_nonspecified_ips - if not user_ip_interface_preferences or allow_nonspecified_ips: - return True - - # Check the list of allowed IP's - return (ip in allowediplist) - - -# Only appends the elem to lst if the elem is unique -def _unique_append(lst, elem): - if elem not in lst: - lst.append(elem) - -# This function updates the allowed IP cache -# It iterates through all possible IP's and stores ones which are bindable as part of the allowediplist -def update_ip_cache(): - global allowediplist - global user_ip_interface_preferences - global user_specified_ip_interface_list - global allow_nonspecified_ips - - # If there is no preference, this is a no-op - if not user_ip_interface_preferences: - return - - # Acquire the lock to update the cache - cachelock.acquire() - - # If there is any exception release the cachelock - try: - # Stores the IP's - allowed_list = [] - - # Iterate through the allowed list, handle each element - for (is_ip_addr, value) in user_specified_ip_interface_list: - # Handle normal IP's - if is_ip_addr: - _unique_append(allowed_list, value) - - # Handle interfaces - else: - try: - # Get the IP's associated with the NIC - interface_ips = nonportable.os_api.get_interface_ip_addresses(value) - for interface_ip in interface_ips: - _unique_append(allowed_list, interface_ip) - except: - # Catch exceptions if the NIC does not exist - pass - - # This will store all the IP's that we are able to bind to - bindable_list = [] - - # Try binding to every ip - for ip in allowed_list: - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - sock.bind((ip,0)) - except: - pass # Not a good ip, skip it - else: - bindable_list.append(ip) # This is a good ip, store it - finally: - sock.close() - - # Add loopback - _unique_append(bindable_list, "127.0.0.1") - - # Update the global cache - allowediplist = bindable_list - - finally: - # Release the lock - cachelock.release() - - -############## General Purpose socket functions ############## - -def _is_already_connected_exception(exceptionobj): - """ - - Determines if a given error number indicates that the socket - is already connected. - - - An exception object from a network call. - - - True if already connected, false otherwise - """ - # Get the type - exception_type = type(exceptionobj) - - # Only continue if the type is socket.error - if exception_type is not socket.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of error messages meaning we are connected - connected_errors = ["EISCONN", "WSAEISCONN"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in connected_errors) - - -def _is_addr_in_use_exception(exceptionobj): - """ - - Determines if a given error number indicates that the provided - localip / localport are already bound and that the unique - tuple is already in use. - - - An exception object from a network call. - - - True if already in use, false otherwise - """ - # Get the type - exception_type = type(exceptionobj) - - # Only continue if the type is socket.error - if exception_type is not socket.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of error messages meaning we are in use - in_use_errors = ["EADDRINUSE", "WSAEADDRINUSE"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in in_use_errors) - - -def _is_addr_unavailable_exception(exceptionobj): - """ - - Determines if a given error number indicates that the provided - localip is not available during a bind() call. - This indicates an AddressBindingError should be raised. - - - An exception object from a network call. - - - True if already in use, false otherwise - """ - # Get the type - exception_type = type(exceptionobj) - - # Only continue if the type is socket.error - if exception_type is not socket.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of error messages meaning the address is not available - not_avail_errors = ["EADDRNOTAVAIL", "WSAEADDRNOTAVAIL"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in not_avail_errors) - - -def _is_conn_refused_exception(exceptionobj): - """ - - Determines if a given error number indicates that the remote - host has actively refused the connection. E.g. - ECONNREFUSED - - - An exception object from a network call. - - - True if the error indicates the connection was refused, false otherwise - """ - # Get the type - exception_type = type(exceptionobj) - - # Only continue if the type is socket.error - if exception_type is not socket.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of error messages meaning the host refused - refused_errors = ["ECONNREFUSED", "WSAECONNREFUSED"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in refused_errors) - - -def _is_network_down_exception(exceptionobj): - """ - - Determines if a given error number indicates that the - network is down. - - - An exception object from a network call. - - - True if the network is down, false otherwise - """ - # Get the type - exception_type = type(exceptionobj) - - # Only continue if the type is socket.error - if exception_type is not socket.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # These error messages mean we are disconnected. - # (The "WSA" ones are the Windows Sockets API's renditions of - # the Linux/BSD errno.h preprocessor macros). - net_down_errors = ["ENETDOWN", "EHOSTUNREACH", "ENETUNREACH", - "WSAENETDOWN", "WSAEHOSTUNREACH", "WSAENETUNREACH"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in net_down_errors) - - -def _is_recoverable_network_exception(exceptionobj): - """ - - Determines if a given error number is recoverable or fatal. - - - An exception object from a network call. - - - True if potentially recoverable, False if fatal. - """ - # Get the type - exception_type = type(exceptionobj) - - # socket.timeout is recoverable always - if exception_type == socket.timeout: - return True - - # Only continue if the type is socket.error or select.error - elif exception_type != socket.error and exception_type != select.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of recoverable error numbers - recoverable_errors = ["EINTR","EAGAIN","EBUSY","EWOULDBLOCK","ETIMEDOUT","ERESTART", - "WSAEINTR","WSAEWOULDBLOCK","WSAETIMEDOUT","EALREADY","WSAEALREADY", - "EINPROGRESS","WSAEINPROGRESS"] - - # Convert the errno to and error string name - try: - errname = errno.errorcode[errnum] - except Exception,e: - # The error is unknown for some reason... - errname = None - - # Return if the error name is in our white list - return (errname in recoverable_errors) - - -# Determines based on exception if the connection has been terminated -def _is_terminated_connection_exception(exceptionobj): - """ - - Determines if the exception is indicated the connection is terminated. - - - An exception object from a network call. - - - True if the connection is terminated, False otherwise. - False means we could not determine with certainty if the socket is closed. - """ - # Get the type - exception_type = type(exceptionobj) - - # We only want to continue if it is socket.error or select.error - if exception_type != socket.error and exception_type != select.error: - return False - - # Get the error number - errnum = exceptionobj[0] - - # Store a list of errors which indicate connection closed - connection_closed_errors = ["EPIPE","EBADF","EBADR","ENOLINK","EBADFD","ENETRESET", - "ECONNRESET","WSAEBADF","WSAENOTSOCK","WSAECONNRESET",] - - # Convert the errnum to an error string - try: - errname = errno.errorcode[errnum] - except: - # The error number is not defined... - errname = None - - # Return whether the errname is in our pre-defined list - return (errname in connection_closed_errors) - - - -# Armon: This is used for semantics, to determine if we have a valid IP. -def _is_valid_ip_address(ipaddr): - """ - - Determines if ipaddr is a valid IP address. - 0.X and 224-255.X addresses are not allowed. - Additionally, 192.168.0.0 is not allowed. - - - ipaddr: String to check for validity. (It will check that this is a string). - - - True if a valid IP, False otherwise. - """ - # Argument must be of the string type - if not type(ipaddr) == str: - return False - - if ipaddr == '192.168.0.0': - return False - - # A valid IP should have 4 segments, explode on the period - octets = ipaddr.split(".") - - # Check that we have 4 parts - if len(octets) != 4: - return False - - # Check that each segment is a number between 0 and 255 inclusively. - for octet in octets: - # Attempt to convert to an integer - try: - ipnumber = int(octet) - except ValueError: - # There was an error converting to an integer, not an IP - return False - - # IP addresses octets must be between 0 and 255 - if not (ipnumber >= 0 and ipnumber <= 255): - return False - - # should not have a ValueError (I already checked) - firstipnumber = int(octets[0]) - - # IP addresses with the first octet 0 refer to all local IPs. These are - # not allowed - if firstipnumber == 0: - return False - - # IP addresses with the first octet >=224 are either Multicast or reserved. - # These are not allowed - if firstipnumber >= 224: - return False - - # At this point, assume the IP is valid - return True - - -# Armon: This is used for semantics, to determine if the given port is valid -def _is_valid_network_port(port): - """ - - Determines if a given network port is valid. - - - port: A numeric type (this will be checked) port number. - - - True if valid, False otherwise. - """ - # Check the type is int or long - if not (type(port) == long or type(port) == int): - return False - - if port >= 1 and port <= 65535: - return True - else: - return False - - -# Used to decide if an IP is the loopback IP or not. This is needed for -# accounting -def _is_loopback_ipaddr(host): - if not host.startswith('127.'): - return False - if len(host.split('.')) != 4: - return False - - octets = host.split('.') - if len(octets) != 4: - return False - for octet in octets: - try: - if int(octet) > 255 or int(octet) < 0: - return False - except ValueError: - return False - - return True - - -# Checks if binding to the local port is allowed -# type should be "TCP" or "UDP". -def _is_allowed_localport(type, localport): - # Switch to the proper resource - if type == "TCP": - resource = "connport" - elif type == "UDP": - resource = "messport" - else: - raise InternalRepyError("Bad type specified for _is_allowed_localport()") - - # Check what is allowed by nanny - return nanny.is_item_allowed(resource, float(localport)) - - - - -######################### Simple Public Functions ########################## - - - -# Public interface -def gethostbyname(name): - """ - - Provides information about a hostname. Calls socket.gethostbyname(). - Translate a host name to IPv4 address format. The IPv4 address is - returned as a string, such as '100.50.200.5'. If the host name is an - IPv4 address itself it is returned unchanged. - - - name: - The host name to translate. - - - RepyArgumentError (descends from NetworkError) if the name is not a string - NetworkAddressError (descends from NetworkError) if the address cannot - be resolved. - - - None. - - - This operation consumes network bandwidth of 4K netrecv, 1K netsend. - (It's hard to tell how much was actually sent / received at this level.) - - - The IPv4 address as a string. - """ - - if type(name) is not str: - raise RepyArgumentError("gethostbyname() takes a string as argument.") - - # charge 4K for a look up... I don't know the right number, but we should - # charge something. We'll always charge to the netsend interface... - nanny.tattle_quantity('netsend', 1024) - nanny.tattle_quantity('netrecv', 4096) - - try: - return socket.gethostbyname(name) - except socket.gaierror: - raise NetworkAddressError("The hostname '"+name+"' could not be resolved.") - - - -# Public interface -def getmyip(): - """ - - Provides the IP of this computer on its public facing interface. - Does some clever trickery. - - - None - - - InternetConnectivityError is the host is not connected to the internet. - - - None. - - - This operations consumes 256 netsend and 128 netrecv. - - - The localhost's IP address - """ - # Charge for the resources - nanny.tattle_quantity("netsend", 256) - nanny.tattle_quantity("netrecv", 128) - - # I got some of this from: http://groups.google.com/group/comp.lang.python/browse_thread/thread/d931cdc326d7032b?hl=en - - # Update the cache and return the first allowed IP - # Only if a preference is set - if user_ip_interface_preferences: - update_ip_cache() - # Return the first allowed ip, there is always at least 1 element (loopback) - return allowediplist[0] - - # Initialize these to None, so we can detect a failure - myip = None - - # It's possible on some platforms (Windows Mobile) that the IP will be - # 0.0.0.0 even when I have a public IP and the external IP is up. However, if - # I get a real connection with SOCK_STREAM, then I should get the real - # answer. - - # Try each stable IP - for ip_addr in repy_constants.STABLE_PUBLIC_IPS: - try: - # Try to resolve using the current connection type and - # stable IP, using port 80 since some platforms panic - # when given 0 (FreeBSD) - myip = _get_localIP_to_remoteIP(socket.SOCK_DGRAM, ip_addr, 80) - except (socket.error, socket.timeout): - # We can ignore any networking related errors, since we want to try - # the other connection types and IP addresses. If we fail, - # we will eventually raise an exception anyways. - pass - else: - # Return immediately if the IP address is good - if _is_valid_ip_address(myip): - return myip - - - # Since we haven't returned yet, we must have failed. - # Raise an exception, we must not be connected to the internet - raise InternetConnectivityError("Cannot detect a connection to the Internet.") - - - -def _get_localIP_to_remoteIP(connection_type, external_ip, external_port=80): - """ - - Resolve the local ip used when connecting outbound to an external ip. - - - connection_type: - The type of connection to attempt. See socket.socket(). - - external_ip: - The external IP to attempt to connect to. - - external_port: - The port on the remote host to attempt to connect to. - - - As with socket.socket(), socketobj.connect(), etc. - - - The locally assigned IP for the connection. - """ - # Open a socket - sockobj = socket.socket(socket.AF_INET, connection_type) - - # Make sure that the socket obj doesn't hang forever in - # case connect() is blocking. Fix to #1003 - sockobj.settimeout(1.0) - - try: - sockobj.connect((external_ip, external_port)) - - # Get the local connection information for this socket - (myip, localport) = sockobj.getsockname() - - # Always close the socket - finally: - sockobj.close() - - return myip - - - - -###################### Shared message / connection items ################### - - - - -# Armon: How frequently should we check for the availability of the socket? -RETRY_INTERVAL = 0.2 # In seconds - - -def _cleanup_socket(self): - """ - - Internal cleanup method for open sockets. The socket - lock for the socket should be acquired prior to - calling. - - - None - - The insocket/outsocket handle will be released. - - - InternalRepyError is raised if the socket lock is not held - prior to calling the function. - - - None - """ - sock = self.socketobj - socket_lock = self.sock_lock - # Make sure the lock is already acquired - # BUG: We don't know which thread exactly acquired the lock. - if socket_lock.acquire(False): - socket_lock.release() - raise InternalRepyError("Socket lock should be acquired before calling _cleanup_socket!") - - if (sock == None): - # Already cleaned up - return - # Shutdown the socket for writing prior to close - # to unblock any threads that are writing - try: - sock.shutdown(socket.SHUT_WR) - except: - pass - - # Close the socket - try: - sock.close() - except: - pass - # socket id is used to unregister socket with nanny - sockid = id(sock) - # Re-store resources - nanny.tattle_remove_item('insockets', sockid) - nanny.tattle_remove_item('outsockets', sockid) - - -####################### Message sending ############################# - - - -# Public interface!!! -def sendmessage(destip, destport, message, localip, localport): - """ - - Send a message to a host / port - - - destip: - The host to send a message to - destport: - The port to send the message to - message: - The message to send - localhost: - The local IP to send the message from - localport: - The local port to send the message from - - - AddressBindingError (descends NetworkError) when the local IP isn't - a local IP. - - ResourceForbiddenError (descends ResourceException?) when the local - port isn't allowed - - RepyArgumentError when the local IP and port aren't valid types - or values - - AlreadyListeningError if there is an existing listening UDP socket - on the same local IP and port. - - DuplicateTupleError if there is another sendmessage on the same - local IP and port to the same remote host. - - - None. - - - This operation consumes 64 bytes + number of bytes of the message that - were transmitted. This requires that the localport is allowed. - - - The number of bytes sent on success - """ - # Check the input arguments (type) - if type(destip) is not str: - raise RepyArgumentError("Provided destip must be a string!") - if type(localip) is not str: - raise RepyArgumentError("Provided localip must be a string!") - - if type(destport) is not int: - raise RepyArgumentError("Provided destport must be an int!") - if type(localport) is not int: - raise RepyArgumentError("Provided localport must be an int!") - - if type(message) is not str: - raise RepyArgumentError("Provided message must be a string!") - - - # Check the input arguments (sanity) - if not _is_valid_ip_address(destip): - raise RepyArgumentError("Provided destip is not valid! IP: '"+destip+"'") - if not _is_valid_ip_address(localip): - raise RepyArgumentError("Provided localip is not valid! IP: '"+localip+"'") - - if not _is_valid_network_port(destport): - raise RepyArgumentError("Provided destport is not valid! Port: "+str(destport)) - if not _is_valid_network_port(localport): - raise RepyArgumentError("Provided localport is not valid! Port: "+str(localport)) - - - # Check that if localip == destip, then localport != destport - if localip == destip and localport == destport: - raise RepyArgumentError("Local socket name cannot match destination socket name! Local/Dest IP and Port match.") - - # Check the input arguments (permission) - update_ip_cache() - if not _ip_is_allowed(localip): - raise ResourceForbiddenError("Provided localip is not allowed! IP: "+localip) - - if not _is_allowed_localport("UDP", localport): - raise ResourceForbiddenError("Provided localport is not allowed! Port: "+str(localport)) - - # Wait for netsend - if _is_loopback_ipaddr(destip): - nanny.tattle_quantity('loopsend', 0) - else: - nanny.tattle_quantity('netsend', 0) - - try: - sock = None - - if ("UDP", localip, localport) in _BOUND_SOCKETS: - sock = _BOUND_SOCKETS[("UDP", localip, localport)] - else: - # Get the socket - sock = _get_udp_socket(localip, localport) - # Register this socket with nanny - nanny.tattle_add_item("outsockets", id(sock)) - # Send the message - bytessent = sock.sendto(message, (destip, destport)) - - # Account for the resources - if _is_loopback_ipaddr(destip): - nanny.tattle_quantity('loopsend', bytessent + 64) - else: - nanny.tattle_quantity('netsend', bytessent + 64) - - return bytessent - - except Exception, e: - - try: - # If we're borrowing the socket, closing is not appropriate. - if not ("UDP", localip, localport) in _BOUND_SOCKETS: - sock.close() - except: - pass - - # Check if address is already in use - if _is_addr_in_use_exception(e): - raise DuplicateTupleError("Provided Local IP and Local Port is already in use!") - - if _is_addr_unavailable_exception(e): - raise AddressBindingError("Cannot bind to the specified local ip, invalid!") - - # Unknown error... - else: - raise - - - - -# Public interface!!! -def listenformessage(localip, localport): - """ - - Sets up a UDPServerSocket to receive incoming UDP messages. - - - localip: - The local IP to register the handler on. - localport: - The port to listen on. - - - DuplicateTupleError (descends NetworkError) if the port cannot be - listened on because some other process on the system is listening on - it. - - AlreadyListeningError if there is already a UDPServerSocket with the same - IP and port. - - RepyArgumentError if the port number or ip is wrong type or obviously - invalid. - - AddressBindingError (descends NetworkError) if the IP address isn't a - local IP. - - ResourceForbiddenError if the port is not allowed. - - - Prevents other UDPServerSockets from using this port / IP - - - This operation consumes an insocket and requires that the provided messport is allowed. - - - The UDPServerSocket. - """ - # Check the input arguments (type) - if type(localip) is not str: - raise RepyArgumentError("Provided localip must be a string!") - - if type(localport) is not int: - raise RepyArgumentError("Provided localport must be a int!") - - - # Check the input arguments (sanity) - if not _is_valid_ip_address(localip): - raise RepyArgumentError("Provided localip is not valid! IP: '"+localip+"'") - - if not _is_valid_network_port(localport): - raise RepyArgumentError("Provided localport is not valid! Port: "+str(localport)) - - - # Check the input arguments (permission) - update_ip_cache() - if not _ip_is_allowed(localip): - raise ResourceForbiddenError("Provided localip is not allowed! IP: '"+localip+"'") - - if not _is_allowed_localport("UDP", localport): - raise ResourceForbiddenError("Provided localport is not allowed! Port: "+str(localport)) - # This identity tuple will be used to check for an existing connection with same identity - identity = ("UDP", localip, localport, None, None) - - try: - # Check if localip is on loopback - on_loopback = _is_loopback_ipaddr(localip) - - # Get the socket - sock = _get_udp_socket(localip,localport) - - # Register this socket as an insocket - nanny.tattle_add_item('insockets',id(sock)) - - # Add the socket to _BOUND_SOCKETS so that we can - # preserve send functionality on this port. - _BOUND_SOCKETS[("UDP", localip, localport)] = sock - - except Exception, e: - - # Check if this an already in use error - if _is_addr_in_use_exception(e): - # Call _conn_cleanup_check to determine if this is because - # the socket is being cleaned up or if it is actively being used or - # if there is an existing listening socket - # This will always raise DuplicateTupleError or - # CleanupInProgressError or AlreadyListeningError - _conn_cleanup_check(identity) - - # Check if this is a binding error - if _is_addr_unavailable_exception(e): - raise AddressBindingError("Cannot bind to the specified local ip, invalid!") - - # Unknown error... - else: - raise - - # Create a UDPServerSocket - server_sock = UDPServerSocket(sock, on_loopback) - - # Return the UDPServerSocket - return server_sock - - - -####################### Connection oriented ############################# -def _conn_alreadyexists_check(identity): - """ - - This private function checks if a socket that - got EADDRINUSE is because the socket is active, - or not - - - identity: A tuple to check for cleanup - - - Raises DuplicateTupleError if the socket is actively being used. - - Raises AddressBindingError if the binding is not allowed - - - None - """ - # Decompose the tuple - family, localip, localport, desthost, destport = identity - - # Check the sockets status - (exists, status) = nonportable.os_api.exists_outgoing_network_socket(localip,localport,desthost,destport) - - # Check if the socket is actively being used - # If the socket is these states: - # ESTABLISHED : Connection is active - # CLOSE_WAIT : Connection is closed, but waiting on local program to close - # SYN_SENT (SENT) : Connection is just being established - if exists and ("ESTABLISH" in status or "CLOSE_WAIT" in status or "SENT" in status): - raise DuplicateTupleError("There is a duplicate connection which conflicts with the request!") - - # Otherwise, the socket is being cleaned up - raise AddressBindingError("Cannot bind to the specified local ip, invalid!") - - -def _conn_cleanup_check(identity): - """ - - This private function checks if a socket that - got EADDRINUSE is because the socket is active, - or because the socket is listening or - because the socket is being cleaned up. - - - identity: A tuple to check for cleanup - - - Raises DuplicateTupleError if the socket is actively being used. - - Raises AlreadyListeningError if the socket is listening. - - Raises CleanupInProgressError if the socket is being cleaned up - or if the socket does not appear to exist. This is because there - may be a race between getting EADDRINUSE and the call to this - function. - - - None - """ - # Decompose the tuple - family, localip, localport, desthost, destport = identity - - # Check the sockets status - (exists, status) = nonportable.os_api.exists_outgoing_network_socket(localip,localport,desthost,destport) - - # Check if the socket is actively being used - # If the socket is these states: - # ESTABLISHED : Connection is active - # CLOSE_WAIT : Connection is closed, but waiting on local program to close - # SYN_SENT (SENT) : Connection is just being established - if exists and ("ESTABLISH" in status or "CLOSE_WAIT" in status or "SENT" in status): - raise DuplicateTupleError("There is a duplicate connection which conflicts with the request!") - else: - # Checking if a listening TCP or UDP socket exists with given local address - # The third argument is True if socket type is TCP,False if socket type is UDP - if (nonportable.os_api.exists_listening_network_socket(localip, localport, (family == "TCP"))): - raise AlreadyListeningError("There is a listening socket on the provided localip and localport!") - # Otherwise, the socket is being cleaned up - else: - raise CleanupInProgressError("The socket is being cleaned up by the operating system!") - - -def _timed_conn_initialize(localip,localport,destip,destport, timeout): - """ - - Tries to initialize an outgoing socket to match - the given address parameters. - - - localip,localport: The local address of the socket - destip,destport: The destination address to which socket has to be connected - timeout: Maximum time to try - - - Raises TimeoutError if we timed out trying to connect. - Raises ConnectionRefusedError if the connection was refused. - Raises InternetConnectivityError if the network is down. - - Raises any errors encountered calling _get_tcp_socket, - or any non-recoverable network exception. - - - A Python socket object connected to the dest, - from the specified local tuple. - """ - - # Store our start time - starttime = nonportable.getruntime() - - # Get a TCP socket bound to the local ip / port - sock = _get_tcp_socket(localip, localport) - sock.settimeout(timeout) - - try: - # Try to connect until we timeout - connected = False - while nonportable.getruntime() - starttime < timeout: - try: - sock.connect((destip, destport)) - connected = True - break - except Exception, e: - # Check if we are already connected - if _is_already_connected_exception(e): - connected = True - raise DuplicateTupleError("There is a duplicate connection which conflicts with the request!") - break - - # Check if the network is down - if _is_network_down_exception(e): - raise InternetConnectivityError("The network is down or cannot be reached from the local IP!") - - # Check if the connection was refused - if _is_conn_refused_exception(e): - raise ConnectionRefusedError("The connection was refused!") - - # Check if this is recoverable (try again, timeout, etc) - elif not _is_recoverable_network_exception(e): - raise - - # Sleep and retry, avoid busy waiting - time.sleep(RETRY_INTERVAL) - - # Check if we timed out - if not connected: - raise TimeoutError("Timed-out connecting to the remote host!") - - # Return the socket - return sock - - except: - # Close the socket, and raise - sock.close() - raise - - -# Public interface!!! -def openconnection(destip, destport,localip, localport, timeout): - """ - - Opens a connection, returning a socket-like object - - - - destip: The destination ip to open communications with - - destport: The destination port to use for communication - - localip: The local ip to use for the communication - - localport: The local port to use for communication - - timeout: The maximum amount of time to wait to connect. This may - be a floating point number or an integer - - - - - RepyArgumentError if the arguments are invalid. This includes both - the types and values of arguments. If the localip matches the destip, - and the localport matches the destport this will also be raised. - - AddressBindingError (descends NetworkError) if the localip isn't - associated with the local system or is not allowed. - - ResourceForbiddenError (descends ResourceError) if the localport isn't - allowed. - - DuplicateTupleError (descends NetworkError) if the (localip, localport, - destip, destport) tuple is already used. This will also occur if the - operating system prevents the local IP / port from being used. - - AlreadyListeningError if the (localip, localport) tuple is already used - for a listening TCP socket. - - CleanupInProgress if the (localip, localport, destip, destport) tuple is - still being cleaned up by the OS. - - ConnectionRefusedError (descends NetworkError) if the connection cannot - be established because the destination port isn't being listened on. - - TimeoutError (common to all API functions that timeout) if the - connection times out - - InternetConnectivityError if the network is down, or if the host - cannot be reached from the local IP that has been bound to. - - - - TODO - - - This operation consumes 64*2 bytes of netsend (SYN, ACK) and 64 bytes - of netrecv (SYN/ACK). This requires that the localport is allowed. Upon - success, this call consumes an outsocket. - - - A socket-like object that can be used for communication. Use send, - recv, and close just like you would an actual socket object in python. - """ - # Check the input arguments (type) - if type(destip) is not str: - raise RepyArgumentError("Provided destip must be a string!") - if type(localip) is not str: - raise RepyArgumentError("Provided localip must be a string!") - - if type(destport) is not int: - raise RepyArgumentError("Provided destport must be an int!") - if type(localport) is not int: - raise RepyArgumentError("Provided localport must be an int!") - - if type(timeout) not in [float, int]: - raise RepyArgumentError("Provided timeout must be an int or float!") - - - # Check the input arguments (sanity) - if not _is_valid_ip_address(destip): - raise RepyArgumentError("Provided destip is not valid! IP: '"+destip+"'") - if not _is_valid_ip_address(localip): - raise RepyArgumentError("Provided localip is not valid! IP: '"+localip+"'") - - if not _is_valid_network_port(destport): - raise RepyArgumentError("Provided destport is not valid! Port: "+str(destport)) - if not _is_valid_network_port(localport): - raise RepyArgumentError("Provided localport is not valid! Port: "+str(localport)) - - if timeout <= 0: - raise RepyArgumentError("Provided timeout is not valid, must be positive! Timeout: "+str(timeout)) - - # Check that if localip == destip, then localport != destport - if localip == destip and localport == destport: - raise RepyArgumentError("Local socket name cannot match destination socket name! Local/Dest IP and Port match.") - - # Check the input arguments (permission) - update_ip_cache() - if not _ip_is_allowed(localip): - raise ResourceForbiddenError("Provided localip is not allowed! IP: "+localip) - - if not _is_allowed_localport("TCP", localport): - raise ResourceForbiddenError("Provided localport is not allowed! Port: "+str(localport)) - - - - # use this tuple during connection clean up check - identity = ("TCP", localip, localport, destip, destport) - - # Wait for netsend / netrecv - if _is_loopback_ipaddr(destip): - nanny.tattle_quantity('loopsend', 0) - nanny.tattle_quantity('looprecv', 0) - else: - nanny.tattle_quantity('netsend', 0) - nanny.tattle_quantity('netrecv', 0) - - try: - # To Know if remote IP is on loopback or not - on_loopback = _is_loopback_ipaddr(destip) - - # Get the socket - sock = _timed_conn_initialize(localip,localport,destip,destport, timeout) - - # Register this socket as an outsocket - nanny.tattle_add_item('outsockets',id(sock)) - except Exception, e: - - # Check if this an already in use error - if _is_addr_in_use_exception(e): - # Call _conn_cleanup_check to determine if this is because - # the socket is being cleaned up or if it is actively being used - # This will always raise DuplicateTupleError or - # CleanupInProgressError or AlreadyListeningError - _conn_cleanup_check(identity) - - # Check if this is a binding error - if _is_addr_unavailable_exception(e): - # Call _conn_alreadyexists_check to determine if this is because - # the connection is active or not - _conn_alreadyexists_check(identity) - - - # Unknown error... - else: - raise - - emul_sock = EmulatedSocket(sock, on_loopback) - - # Tattle the resources used - if _is_loopback_ipaddr(destip): - nanny.tattle_quantity('loopsend', 128) - nanny.tattle_quantity('looprecv', 64) - else: - nanny.tattle_quantity('netsend', 128) - nanny.tattle_quantity('netrecv', 64) - - # Return the EmulatedSocket - return emul_sock - - -def listenforconnection(localip, localport): - """ - - Sets up a TCPServerSocket to recieve incoming TCP connections. - - - localip: - The local IP to listen on - localport: - The local port to listen on - - - Raises AlreadyListeningError if another TCPServerSocket or process has bound - to the provided localip and localport. - - Raises DuplicateTupleError if another process has bound to the - provided localip and localport. - - Raises RepyArgumentError if the localip or localport are invalid - Raises ResourceForbiddenError if the ip or port is not allowed. - Raises AddressBindingError if the IP address isn't a local ip. - - - The IP / Port combination cannot be used until the TCPServerSocket - is closed. - - - Uses an insocket for the TCPServerSocket. - - - A TCPServerSocket object. - """ - # Check the input arguments (type) - if type(localip) is not str: - raise RepyArgumentError("Provided localip must be a string!") - - if type(localport) is not int: - raise RepyArgumentError("Provided localport must be a int!") - - - # Check the input arguments (sanity) - if not _is_valid_ip_address(localip): - raise RepyArgumentError("Provided localip is not valid! IP: '"+localip+"'") - - if not _is_valid_network_port(localport): - raise RepyArgumentError("Provided localport is not valid! Port: "+str(localport)) - - - # Check the input arguments (permission) - update_ip_cache() - if not _ip_is_allowed(localip): - raise ResourceForbiddenError("Provided localip is not allowed! IP: '"+localip+"'") - - if not _is_allowed_localport("TCP", localport): - raise ResourceForbiddenError("Provided localport is not allowed! Port: "+str(localport)) - - # This is used to check if there is an existing connection with the same identity - identity = ("TCP", localip, localport, None, None) - - try: - # Check if localip is on loopback - on_loopback = _is_loopback_ipaddr(localip) - # Get the socket - sock = _get_tcp_socket(localip,localport) - nanny.tattle_add_item('insockets',id(sock)) - # Get the maximum number of outsockets - max_outsockets = nanny.get_resource_limit("outsockets") - # If we have restrictions, then we want to set the outsocket - # limit - if max_outsockets: - # Set the backlog to be the maximum number of outsockets - sock.listen(max_outsockets) - else: - sock.listen(5) - - except Exception, e: - - # Check if this an already in use error - if _is_addr_in_use_exception(e): - # Call _conn_cleanup_check to determine if this is because - # the socket is being cleaned up or if it is actively being used - # This will always raise DuplicateTupleError or - # CleanupInProgressError or AlreadyListeningError - _conn_cleanup_check(identity) - - # Check if this is a binding error - if _is_addr_unavailable_exception(e): - # Call _conn_alreadyexists_check to determine if this is because - # the connection is active or not - _conn_alreadyexists_check(identity) - # Unknown error... - else: - raise - - server_sock = TCPServerSocket(sock, on_loopback) - - # Return the TCPServerSocket - return server_sock - - -# Private method to create a TCP socket and bind -# to a localip and localport. -# -def _get_tcp_socket(localip, localport): - # Create the TCP socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # Reuse the socket if it's "pseudo-availible" - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - if localip and localport: - try: - s.bind((localip,localport)) - except: # Raise the exception un-tainted - # don't leak sockets - s.close() - raise - return s - - -# Private method to create a UDP socket and bind -# to a localip and localport. -# -def _get_udp_socket(localip, localport): - # Create the UDP socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - if localip and localport: - try: - s.bind((localip, localport)) - except: - # don't leak sockets - s.close() - raise - return s - - -# Checks if the given real socket would block -def _check_socket_state(realsock, waitfor="rw", timeout=0.0): - """ - - Checks if the given socket would block on a send() or recv(). - In the case of a listening socket, read_will_block equates to - accept_will_block. - - - realsock: - A real socket.socket() object to check for. - - waitfor: - An optional specifier of what to wait for. "r" for read only, "w" for write only, - and "rw" for read or write. E.g. if timeout is 10, and wait is "r", this will block - for up to 10 seconds until read_will_block is false. If you specify "r", then - write_will_block is always true, and if you specify "w" then read_will_block is - always true. - - timeout: - An optional timeout to wait for the socket to be read or write ready. - - - A tuple, (read_will_block, write_will_block). - - - As with select.select(). Probably best to wrap this with _is_recoverable_network_exception - and _is_terminated_connection_exception. Throws an exception if waitfor is not in ["r","w","rw"] - """ - # Check that waitfor is valid - if waitfor not in ["rw","r","w"]: - raise Exception, "Illegal waitfor argument!" - - # Array to hold the socket - sock_array = [realsock] - - # Generate the read/write arrays - read_array = [] - if "r" in waitfor: - read_array = sock_array - - write_array = [] - if "w" in waitfor: - write_array = sock_array - - # Call select() - (readable, writeable, exception) = select.select(read_array,write_array,sock_array,timeout) - - # If the socket is in the exception list, then assume its both read and writable - if (realsock in exception): - return (False, False) - - # Return normally then - return (realsock not in readable, realsock not in writeable) - - -##### Class Definitions - -# Public. We pass these to the users for communication purposes -class EmulatedSocket: - """ - This object is a wrapper around a tcp - TCP socket. It allows for sending and - recieving data, and closing the socket. - - It operates in a strictly non-blocking mode, - and uses Exceptions to indicate when an - operation would result in blocking behavior. - """ - # Fields: - # socket: This is a TCP Socket - # - # send_buffer_size: The size of the send buffer. We send less than - # this to avoid a bug. - # - # on_loopback: true if the remote ip is a loopback address. - # this is used for resource accounting. - # sock_lock: Threading Lock on socket object used for - # synchronization. - __slots__ = ["socketobj", "send_buffer_size", "on_loopback", "sock_lock"] - - - def __init__(self, sock, on_loopback): - """ - - Initializes a EmulatedSocket object. - - - sock: A TCP Socket - - on_loopback: True/False based on whether remote IP is - on loopback oe not - - - InteralRepyError is raised if there is no table entry for - the socket. - - - A EmulatedSocket object. - """ - # Store the parameters tuple - self.socketobj = sock - self.on_loopback = on_loopback - self.sock_lock = threading.Lock() - - # Store the socket send buffer size and set to non-blocking - self.send_buffer_size = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) - # locking should be unnecessary because there isn't another external - # reference here yet - sock.setblocking(0) - - - def _close(self): - """ - - Private close method. Called when socket lock is held. - Does not perform any accounting / locking. Those should - be done by the public methods. - - - None - - - Closes the socket - - - None - """ - # Clean up the socket - _cleanup_socket(self) - - # Replace the socket - self.socketobj = None - - - def close(self): - """ - - Closes a socket. Pending remote recv() calls will return with the - remaining information. Local recv / send calls will fail after this. - - - None - - - None - - - Pending local recv calls will either return or have an exception. - - - If the connection is closed, no resources are consumed. This operation - uses 64 bytes of netrecv, and 128 bytes of netsend. - This call also stops consuming an outsocket. - - - True if this is the first close call to this socket, False otherwise. - """ - # Get the socket lock - socket_lock = self.sock_lock - - if (self.socketobj == None): - return False - # Wait for resources - if self.on_loopback: - nanny.tattle_quantity('looprecv', 0) - nanny.tattle_quantity('loopsend', 0) - else: - nanny.tattle_quantity('netrecv', 0) - nanny.tattle_quantity('netsend', 0) - - # Acquire the lock - socket_lock.acquire() - try: - # Internal close - self._close() - - # Tattle the resources - if self.on_loopback: - nanny.tattle_quantity('looprecv',64) - nanny.tattle_quantity('loopsend',128) - else: - nanny.tattle_quantity('netrecv',64) - nanny.tattle_quantity('netsend',128) - - # Done - return True - - finally: - socket_lock.release() - - - - def recv(self,bytes): - """ - - Receives data from a socket. It may receive fewer bytes than - requested. - - - bytes: - The maximum number of bytes to read. - - - SocketClosedLocal is raised if the socket was closed locally. - SocketClosedRemote is raised if the socket was closed remotely. - SocketWouldBlockError is raised if the socket operation would block. - - - None. - - - This operations consumes 64 + amount of data in bytes - worth of netrecv, and 64 bytes of netsend. - - - The data received from the socket (as a string). If '' is returned, - the other side has closed the socket and no more data will arrive. - """ - # Get the socket lock - socket_lock = self.sock_lock - # Wait if already oversubscribed - if self.on_loopback: - nanny.tattle_quantity('looprecv',0) - nanny.tattle_quantity('loopsend',0) - else: - nanny.tattle_quantity('netrecv',0) - nanny.tattle_quantity('netsend',0) - - - # Acquire the socket lock - socket_lock.acquire() - try: - # Get the socket - sock = self.socketobj - if sock is None: - raise KeyError # Socket is closed locally - - # Try to recieve the data - if ((bytes) <= 0): - return "" - else: - data_recieved = sock.recv(bytes) - - - - # Calculate the length of the data - data_length = len(data_recieved) - - # Raise an exception if there was no data - if data_length == 0: - raise SocketClosedRemote("The socket has been closed remotely!") - - if self.on_loopback: - nanny.tattle_quantity('looprecv',data_length+64) - nanny.tattle_quantity('loopsend',64) - else: - nanny.tattle_quantity('netrecv',data_length+64) - nanny.tattle_quantity('netsend',64) - - return data_recieved - - except KeyError: - raise SocketClosedLocal("The socket is closed!") - - except RepyException: - raise # Pass up from inner block - - except Exception, e: - # Check if this a recoverable error - if _is_recoverable_network_exception(e): - # Operation would block - raise SocketWouldBlockError("There is no data! recv() would block.") - - elif _is_terminated_connection_exception(e): - # Remote close - self._close() - raise SocketClosedRemote("The socket has been closed remotely!") - - else: - # Unknown error - self._close() - raise SocketClosedLocal("The socket has encountered an unexpected error! Error:"+str(e)) - - finally: - socket_lock.release() - - - - def send(self,message): - """ - - Sends data on a socket. It may send fewer bytes than requested. - - - message: - The string to send. - - - SocketClosedLocal is raised if the socket is closed locally. - SocketClosedRemote is raised if the socket is closed remotely. - SocketWouldBlockError is raised if the operation would block. - - - None. - - - This operations consumes 64 + size of sent data of netsend and - 64 bytes of netrecv. - - - The number of bytes sent. Be sure not to assume this is always the - complete amount! - """ - # Get the socket lock - socket_lock = self.sock_lock - # Wait if already oversubscribed - if self.on_loopback: - nanny.tattle_quantity('loopsend',0) - nanny.tattle_quantity('looprecv',0) - else: - nanny.tattle_quantity('netsend',0) - nanny.tattle_quantity('netrecv',0) - - # Trim the message size to be less than the send buffer size. - # This is a fix for http://support.microsoft.com/kb/823764 - message = message[:self.send_buffer_size-1] - - # Acquire the socket lock - socket_lock.acquire() - try: - # Get the socket - sock = self.socketobj - if sock is None: - raise KeyError # Socket is closed locally - - # Detect Socket Closed Remote - # Fixes ticket#974 - (readable, writable, exception) = select.select([sock],[],[],0) - # check if socket is readable. This is true if the remote end closed. - if readable: - - # if socket is readable but there was no data this means the remote end - # has closed the socket. We peek so that we don't consume a character. - data_peeked = sock.recv(1,socket.MSG_PEEK) - if len(data_peeked) == 0: - # remote socket is closed - raise SocketClosedRemote("The socket has been closed by the remote end!") - - # Try to send the data - bytes_sent = sock.send(message) - - if self.on_loopback: - nanny.tattle_quantity('looprecv', 64) - nanny.tattle_quantity('loopsend', 64 + bytes_sent) - else: - nanny.tattle_quantity('netrecv', 64) - nanny.tattle_quantity('netsend', 64 + bytes_sent) - - # Return the number of bytes sent - return bytes_sent - - - except KeyError: - raise SocketClosedLocal("The socket is closed!") - except RepyException: - raise # pass up from inner block - except Exception, e: - # Check if this a recoverable error - if _is_recoverable_network_exception(e): - # Operation would block - raise SocketWouldBlockError("send() would block.") - - elif _is_terminated_connection_exception(e): - # Remote close - self._close() - raise SocketClosedRemote("The socket has been closed remotely!") - - else: - # Unknown error - self._close() - raise SocketClosedLocal("The socket has encountered an unexpected error! Error:"+str(e)) - - finally: - socket_lock.release() - - - def __del__(self): - # Get the socket lock - try: - socket_lock = self.sock_lock - except KeyError: - # Closed, done - return - - # Acquire the lock and close - socket_lock.acquire() - try: - self._close() - finally: - socket_lock.release() - - -# End of EmulatedSocket class - - -# Public: Class the behaves represents a listening UDP socket. -class UDPServerSocket: - """ - This object is a wrapper around a listening - UDP socket. It allows for accepting incoming - messages, and closing the socket. - - It operates in a strictly non-blocking mode, - and uses Exceptions to indicate when an - operation would result in blocking behavior. - """ - # Fields: - # sock: This is a listening UDP socket - # on_loopback: True if the local IP is a loopback address. - # This is used for resource accounting. - # sock_lock: Threading Lock on socket object used for - # synchronization. - __slots__ = ["socketobj", "on_loopback", "sock_lock"] - - # UDP listening socket interface - def __init__(self, sock, on_loopback): - """ - - Initializes the UDPServerSocket. The socket - should already be established by listenformessage - prior to calling the initializer. - - - - socketobj : The listening socket - on_loopback : True/False based on whether the local IP - is a loopback address or not - - None - - - A UDPServerSocket - """ - # Store the parameters - self.socketobj = sock - self.on_loopback = on_loopback - self.sock_lock = threading.Lock() - - # Set the socket to non-blocking - # locking should be unnecessary because there isn't another external - # reference here yet - sock.setblocking(0) - - def getmessage(self): - """ - - Obtains an incoming message that was sent to an IP and port. - - - None. - - - SocketClosedLocal if UDPServerSocket.close() was called. - Raises SocketWouldBlockError if the operation would block. - - - None - - - This operation consumes 64 + size of message bytes of netrecv - - - A tuple consisting of the remote IP, remote port, and message. - - """ - # Get the socket lock - - socket_lock = self.sock_lock - # Wait for netrecv resources - if self.on_loopback: - nanny.tattle_quantity('looprecv',0) - else: - nanny.tattle_quantity('netrecv',0) - - # Acquire the lock - socket_lock.acquire() - try: - # Get the socket itself. This must be done after - # we acquire the lock because it is possible that the - # socket was closed/re-opened or that it was set to None, - # etc. - mysocketobj = self.socketobj - if mysocketobj is None: - raise KeyError # Indicates socket is closed - - # Try to get a message of any size. (64K is the max that fits in the - # UDP header) - message, addr = mysocketobj.recvfrom(65535) - remote_ip, remote_port = addr - - # Do some resource accounting - if self.on_loopback: - nanny.tattle_quantity('looprecv', 64 + len(message)) - else: - nanny.tattle_quantity('netrecv', 64 + len(message)) - - # Return everything - return (remote_ip, remote_port, message) - - except KeyError: - # Socket is closed - raise SocketClosedLocal("The socket has been closed!") - - except RepyException: - # Let these through from the inner block - raise - - except Exception, e: - # Check if this is a would-block error - if _is_recoverable_network_exception(e): - raise SocketWouldBlockError("No messages currently available!") - - else: - # Unexpected, close the socket, and then raise SocketClosedLocal - _cleanup_socket(self) - raise SocketClosedLocal("Unexpected error, socket closed!") - - finally: - # Release the lock - socket_lock.release() - - - - def close(self): - """ - - Closes a socket that is listening for messages. - - - None. - - - None. - - - The IP address and port can be reused by other UDPServerSockets after - this. - - - If applicable, this operation stops consuming the corresponding - insocket. - - - True if this is the first close call to this socket, False otherwise. - - """ - # Get the socket lock - socket_lock = self.sock_lock - # Acquire the lock - socket_lock.acquire() - try: - # Clean up the socket - _cleanup_socket(self) - # Replace the socket - self.socketobj = None - - return True - - finally: - socket_lock.release() - - - def __del__(self): - # Clean up global resources on garbage collection. - self.close() - - - - -class TCPServerSocket (object): - """ - This object is a wrapper around a listening - TCP socket. It allows for accepting incoming - connections, and closing the socket. - - It operates in a strictly non-blocking mode, - and uses Exceptions to indicate when an - operation would result in blocking behavior. - """ - # Fields: - # socket: This is a listening TCP socket - # sock_lock: Threading Lock on socket object used for - # synchronization. - # on_loopback: true if the remote ip is a loopback address. - # this is used for resource accounting. - # - - __slots__ = ["socketobj", "on_loopback", "sock_lock"] - def __init__(self, sock, on_loopback): - """ - - Initializes the TCPServerSocket. The socket - should already be established by listenforconnection - prior to calling the initializer. - - - socketobj: The TCP listening socket - - on_loopback: True/False based on whether local IP - is on loopback or not - - - None - - - A TCPServerSocket - """ - # Store the parameters - self.socketobj = sock - self.sock_lock = threading.Lock() - self.on_loopback = on_loopback - - # Set the socket to non-blocking - # locking should be unnecessary because there isn't another external - # reference here yet - sock.setblocking(0) - - - - def getconnection(self): - """ - - Accepts an incoming connection to a listening TCP socket. - - - None - - - Raises SocketClosedLocal if close() has been called. - Raises SocketWouldBlockError if the operation would block. - Raises ResourcesExhaustedError if there are no free outsockets. - - - If successful, consumes 128 bytes of netrecv (64 bytes for - a SYN and ACK packet) and 64 bytes of netsend (1 ACK packet). - Uses an outsocket. - - - A tuple containing: (remote ip, remote port, socket object) - """ - # Get the socket lock - socket_lock = self.sock_lock - - # Wait for netsend and netrecv resources - if self.on_loopback: - nanny.tattle_quantity('looprecv',0) - nanny.tattle_quantity('loopsend',0) - else: - nanny.tattle_quantity('netrecv',0) - nanny.tattle_quantity('netsend',0) - - # Acquire the lock - socket_lock.acquire() - try: - # Get the socket itself. This must be done after - # we acquire the lock because it is possible that the - # socket was closed/re-opened or that it was set to None, - # etc. - socket = self.socketobj - if socket is None: - raise KeyError # Indicates socket is closed - - # Try to accept - new_socket, remote_host_info = socket.accept() - remote_ip, remote_port = remote_host_info - - # Get new_socket id to register new_socket with nanny - new_sockid = id(new_socket) - # Check if remote_ip is on loopback - is_on_loopback = _is_loopback_ipaddr(remote_ip) - # Do some resource accounting - if self.on_loopback: - nanny.tattle_quantity('looprecv', 128) - nanny.tattle_quantity('loopsend', 64) - else: - nanny.tattle_quantity('netrecv', 128) - nanny.tattle_quantity('netsend', 64) - - try: - nanny.tattle_add_item('outsockets', new_sockid) - except ResourceExhaustedError: - # Close the socket, and raise - new_socket.close() - raise - - wrapped_socket = EmulatedSocket(new_socket, is_on_loopback) - - # Return everything - return (remote_ip, remote_port, wrapped_socket) - - except KeyError: - # Socket is closed - raise SocketClosedLocal("The socket has been closed!") - - except RepyException: - # Let these through from the inner block - raise - - except Exception, e: - # Check if this is a would-block error - if _is_recoverable_network_exception(e): - raise SocketWouldBlockError("No connections currently available!") - - else: - # Unexpected, close the socket, and then raise SocketClosedLocal - _cleanup_socket(self) - raise SocketClosedLocal("Unexpected error, socket closed!") - - finally: - # Release the lock - socket_lock.release() - - - def close(self): - """ - - Closes the listening TCP socket. - - - None - - - None - - - The IP and port can be re-used after closing. - - - Releases the insocket used. - - - True, if this is the first call to close. - False otherwise. - """ - # Get the socket lock - socket_lock = self.sock_lock - - # Acquire the lock - socket_lock.acquire() - try: - # Clean up the socket - _cleanup_socket(self) - # Replace the socket - self.socketobj = None - # Done - return True - - finally: - socket_lock.release() - - - def __del__(self): - # Close the socket - self.close() - From 6c6f52f837fb16de50e0f26f3d894811b2c51f3a Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:32:35 -0500 Subject: [PATCH 05/17] Delete error.log --- error.log | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 error.log diff --git a/error.log b/error.log deleted file mode 100644 index c6be7ac..0000000 --- a/error.log +++ /dev/null @@ -1,21 +0,0 @@ -root@abhishek-VirtualBox:/var/log/apache2# tail -20f error.log -[Fri Nov 14 06:12:08.023144 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] return self.format('D, j M Y H:i:s O') -[Fri Nov 14 06:12:08.023159 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/dateformat.py", line 35, in format -[Fri Nov 14 06:12:08.023181 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] pieces.append(force_text(getattr(self, piece)())) -[Fri Nov 14 06:12:08.023197 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/encoding.py", line 100, in force_text -[Fri Nov 14 06:12:08.023354 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] s = s.__unicode__() -[Fri Nov 14 06:12:08.023375 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/functional.py", line 138, in __text_cast -[Fri Nov 14 06:12:08.023559 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] return func(*self.__args, **self.__kw) -[Fri Nov 14 06:12:08.023580 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/translation/__init__.py", line 76, in ugettext -[Fri Nov 14 06:12:08.023699 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] return _trans.ugettext(message) -[Fri Nov 14 06:12:08.023719 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/translation/trans_real.py", line 281, in ugettext -[Fri Nov 14 06:12:08.029585 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] return do_translate(message, 'ugettext') -[Fri Nov 14 06:12:08.029680 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/translation/trans_real.py", line 263, in do_translate -[Fri Nov 14 06:12:08.029764 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] _default = translation(settings.LANGUAGE_CODE) -[Fri Nov 14 06:12:08.029802 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/translation/trans_real.py", line 177, in translation -[Fri Nov 14 06:12:08.029848 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] default_translation = _fetch(settings.LANGUAGE_CODE) -[Fri Nov 14 06:12:08.029883 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/translation/trans_real.py", line 159, in _fetch -[Fri Nov 14 06:12:08.029925 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] app = import_module(appname) -[Fri Nov 14 06:12:08.029958 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] File "/usr/local/lib/python2.7/dist-packages/django/utils/importlib.py", line 40, in import_module -[Fri Nov 14 06:12:08.030190 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] __import__(name) -[Fri Nov 14 06:12:08.030250 2014] [:error] [pid 12710:tid 3043838784] [remote 10.0.2.15:52662] ImportError: No module named django_pph From cfee756ff981c74b18d706eb86e5202ea8df1265 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:32:45 -0500 Subject: [PATCH 06/17] Delete experimentlib.py --- experimentlib.py | 1888 ---------------------------------------------- 1 file changed, 1888 deletions(-) delete mode 100644 experimentlib.py diff --git a/experimentlib.py b/experimentlib.py deleted file mode 100644 index 8d76c03..0000000 --- a/experimentlib.py +++ /dev/null @@ -1,1888 +0,0 @@ -""" - - experimentlib.py - - - Justin Samuel - - - December 1, 2009 - - - A library for conducting experiments using Seattle vessels. The functions in - this library allow for communicating with vessels (e.g. to upload files and - run programs) as well as for communicating with SeattleGENI (e.g. to obtain - vessels to run experiments on). - - - Ensure that this file is in a directory containing the seattlelib files as - well as the seattleclearinghouse_xmlrpc.py module. In your own script, add: - - import experimentlib - - then call the methods desired. - - Note that if your script resides outside of the directory that contains the - seattlelib files, experimentlib.py, and seattlegeni_client.py, then you'll - need to set that directory/those directories in your python path. For example, - if you downloaded an installer (even if you haven't installed Seattle on the - machine this script resides on, the path will be to the seattle_repy directory - that was among the extracted installer files. To set the path directly in your - script rather than through environment variables, you can use something like: - - import sys - sys.path.append("/path/to/seattle_repy") - - You would need to do the above *before* your line that says: - - import experimentlib - - For examples of using this experimentlib, see the examples/ directory. - - Please also see the following wiki page for usage information and how to - obtain the latest version of this experiment library: - - https://seattle.cs.washington.edu/wiki/ExperimentLibrary - - - - Object Definitions: - - * identity: a dictionary that minimally contains a public key but may also - contain the related private key and the username of a corresponding - SeattleGENI account. When one wants to perform any operation that would - require a public key, private key, or username, an identity must be - provided. An identity can be created using the functions named - create_identity_from_*. - - * vesselhandle: a vesselhandle is a string that contains the information - required to uniquely identify a vessel, regardless of the current - location (IP address) of the node the vessel is on. This is in the format - of "nodeid:vesselname". - - * nodeid: a string that contains the information required to uniquely - identify a node, regardless of its current location. - - * vesselname: a string containing the name of a vessel. This name will - be unique on any given node, but the same name is likely is used for - vessels on other nodes. Thus, this does not uniquely identify a vessel, - in general. To uniquely identify a vessel, a vesselhandle is needed. - - * nodelocation: a string containing the location of a node. This will not - always be "ip:port". It could, for example, be "NATid:port" in the case - of a node that is accessible through a NAT forwarder. - - * vesseldict: a dictionary of details related to a given vessel. The keys - that will always be present are 'vesselhandle', 'nodelocation', - 'vesselname', and 'nodeid'. Additional keys will be present depending on - the function that returns the vesseldict. See the individual function - docstring for details. - - Exceptions: - - All exceptions raised by functions in this module will either be or extend: - * SeattleExperimentError - * SeattleClearinghouseError - - The SeattleClearinghouseError* exceptions will only be raised by the functions whose - names are seattlegeni_*. Any of the seattlegeni_* functions may raise the - following in addition to specific exceptions described in the function - docstrings (these are all subclasses of SeattleClearinghouseError): - * SeattleClearinghouseCommunicationError - * SeattleClearinghouseAuthenticationError - * SeattleClearinghouseInvalidRequestError - * SeattleClearinghouseInternalError - - In the case of invalid arguments to functions, the following may be - raised (these will not always be documented for each function): - * TypeError - * ValueError - * IOError (if the function involves reading/writing files and the - filename provided is missing/unreadable/unwritable) - - For the specific exceptions raised by a given function, see the function's - docstring. -""" - -import os -import random -import time -import tracebackrepy -import xmlrpclib - -import seattleclearinghouse_xmlrpc - -# We use a helper module to do repy module imports so that we don't import -# unexpected items into this module's namespace. This helps reduce errors -# because editors/pylint make it clear when an unknown identifier is used -# and it also makes other things easier for developers, such as using ipython's -# tab completion and not causing unexpected imports if someone using this -# module decides to use "from experimentlib import *" -#import repyimporter - -import fastnmclient -""" -repytime = repyimporter.import_repy_module("time") -rsa = repyimporter.import_repy_module("rsa") -parallelize = repyimporter.import_repy_module("parallelize") -advertise = repyimporter.import_repy_module("advertise") -""" -from repyportability import add_dy_support -add_dy_support(locals()) - -repytime = dy_import_module_symbols("time.r2py") -rsa = dy_import_module_symbols("rsa.r2py") -parallelize = dy_import_module_symbols("parallelize.r2py") -advertise = dy_import_module_symbols("advertise.r2py") - -# The maximum number of node locations to return from a call to lookup_node_locations. -max_lookup_results = 1024 * 1024 - -# The timeout to use for communication, both in advertisement lookups and for -# contacting nodes directly. -defaulttimeout = 10 - -# The number of worker threads to use for each parallelized operation. -num_worker_threads = 5 - -# Whether additional information and debugging messages should be printed -# to stderr by this library. -print_debug_messages = True - -# OpenDHT can be slow/hang, which isn't fun if the experimentlib is being used -# interactively. So, let's default to central advertise server lookups here -# until we're sure all issues with OpenDHT are resolved. -# A value of None indicates the default of ['opendht', 'central']. -#advertise_lookup_types = None -advertise_lookup_types = ['central'] - -# A few options to be passed along to the SeattleGENI xmlrpc client. -# None means the default. -SEATTLECLEARINGHOUSE_XMLRPC_URL = None -SEATTLECLEARINGHOUSE_ALLOW_SSL_INSECURE = None # Set to True to allow insecure SSL. -SEATTLECLEARINGHOUSE_CA_CERTS_FILES = None - -# These constants can be used as the type argument to seattlegeni_acquire_vessels. -SEATTLECLEARINGHOUSE_VESSEL_TYPE_WAN = "wan" -SEATTLECLEARINGHOUSE_VESSEL_TYPE_LAN = "lan" -SEATTLECLEARINGHOUSE_VESSEL_TYPE_NAT = "nat" -SEATTLECLEARINGHOUSE_VESSEL_TYPE_RAND = "rand" - -# Some of these vessel status explanations are from: -# https://seattle.cs.washington.edu/wiki/NodeManagerDesign - -# Fresh: has never been started. -VESSEL_STATUS_FRESH = "Fresh" - -# Started: has been started and is running when last checked. -VESSEL_STATUS_STARTED = "Started" - -# Stopped: was running but stopped by NM command -VESSEL_STATUS_STOPPED = "Stopped" - -# Stale: it last reported a start of "Started" but significant time has -# elapsed, likely due to a system crash (what does "system crash" mean?). -VESSEL_STATUS_STALE = "Stale" - -# Terminated (the vessel stopped of its own volition, possibly due to an error) -VESSEL_STATUS_TERMINATED = "Terminated" - -# The node is not advertising -VESSEL_STATUS_NO_SUCH_NODE = "NO_SUCH_NODE" - -# The node can be communicated with but the specified vessel doesn't exist -# on the node. This will also be used when the vessel exists on the node but -# the identity being used is not a user or the owner of the vessel. -VESSEL_STATUS_NO_SUCH_VESSEL = "NO_SUCH_VESSEL" - -# The node can't be communicated with or communication fails. -VESSEL_STATUS_NODE_UNREACHABLE = "NODE_UNREACHABLE" - -# For convenience we define two sets of vessel status constants that include -# all possible statuses grouped by whether the status indicates the vessel is -# usable/active or whether it is unusable/inactive. -VESSEL_STATUS_SET_ACTIVE = set([VESSEL_STATUS_FRESH, VESSEL_STATUS_STARTED, - VESSEL_STATUS_STOPPED, VESSEL_STATUS_STALE, - VESSEL_STATUS_TERMINATED]) -VESSEL_STATUS_SET_INACTIVE = set([VESSEL_STATUS_NO_SUCH_NODE, VESSEL_STATUS_NO_SUCH_VESSEL, - VESSEL_STATUS_NODE_UNREACHABLE]) - -# Whether _initialize_time() has been called. -_initialize_time_called = False - -# Keys are node locations (a string of "host:port"), values are nmhandles. -# Note that this method of caching nmhandles will cause problems if multiple -# identities/keys are being used to contact the name node. -_nmhandle_cache = {} - -# Keys are nodeids, values are nodelocations. -_node_location_cache = {} - - - - - -class SeattleExperimentError(Exception): - """Base class for other exceptions.""" - - - -class UnexpectedVesselStatusError(SeattleExperimentError): - """ - When a vessel status is reported by a node and that status is something - we don't understand. Mostly this is something we care about because we - want to definitely tell users what to expect in their code in terms of - status, so we should be very clear about the possibly values and never - have to raise this exception. - """ - - - -class NodeCommunicationError(SeattleExperimentError): - """Unable to perform a requested action on/communication with a node/vessel.""" - - - -class NodeLocationLookupError(SeattleExperimentError): - """ - Unable to determine the location of a node based on its nodeid or unable - to successfully perform an advertisement lookup. - """ - - - -class NodeLocationNotAdvertisedError(NodeLocationLookupError): - """ - A lookup was successful but no node locations are being advertised under a - nodeid. - """ - - - -class UnableToPerformLookupError(NodeLocationLookupError): - """ - Something is wrong with performing lookups. Either none of the lookup - services that were tried were successful or there's a bug in some underlying - code being used by this module. - """ - - - -class IdentityInformationMissingError(SeattleExperimentError): - """ - The information that is part of an identity object is incomplete. For - example, if only the public key is in the identity but the identity is - used in a way that requires a private key, this exception would be - raised. - """ - - -#This is the base class for all SeattleGENI errors. We make this available -#in the namespace of the experimentlib so that clients do not have to import -#seattleclearinghouse_xmlrpc to catch these. -SeattleClearinghouseError = seattleclearinghouse_xmlrpc.SeattleClearinghouseError - -# We make these available, as well, in case users find them useful. We prefix -# all of these error names with SeattleGENI. -SeattleClearinghouseCommunicationError = seattleclearinghouse_xmlrpc.CommunicationError -SeattleClearinghouseInternalError = seattleclearinghouse_xmlrpc.InternalError -SeattleClearinghouseAuthenticationError = seattleclearinghouse_xmlrpc.AuthenticationError -SeattleClearinghouseInvalidRequestError = seattleclearinghouse_xmlrpc.InvalidRequestError -SeattleClearinghouseNotEnoughCreditsError = seattleclearinghouse_xmlrpc.NotEnoughCreditsError -SeattleClearinghouseUnableToAcquireResourcesError = seattleclearinghouse_xmlrpc.UnableToAcquireResourcesError - - - - - -def _validate_vesselhandle(vesselhandle): - if not isinstance(vesselhandle, basestring): - raise TypeError("vesselhandle must be a string, not a " + str(type(vesselhandle))) - - parts = vesselhandle.split(':') - if len(parts) != 2: - raise ValueError("invalid vesselhandle '" + vesselhandle + "', should be nodeid:vesselname") - - - - - -def _validate_vesselhandle_list(vesselhandle_list): - if not isinstance(vesselhandle_list, list): - raise TypeError("vesselhandle list must be a list, not a " + str(type(vesselhandle_list))) - - for vesselhandle in vesselhandle_list: - _validate_vesselhandle(vesselhandle) - - - - - -def _validate_nodelocation(nodelocation): - if not isinstance(nodelocation, basestring): - raise TypeError("nodelocation must be a string, not a " + str(type(nodelocation))) - - parts = nodelocation.split(':') - if len(parts) != 2: - raise ValueError("nodelocation '" + nodelocation + "' invalid, should be host:port") - - - - - -def _validate_nodelocation_list(nodelocation_list): - if not isinstance(nodelocation_list, list): - raise TypeError("nodelocation list must be a list, not a " + str(type(nodelocation_list))) - - for nodelocation in nodelocation_list: - _validate_nodelocation(nodelocation) - - - - - -def _validate_identity(identity, require_private_key=False, require_username=False): - if not isinstance(identity, dict): - raise TypeError("identity must be a dict, not a " + str(type(identity))) - - if 'publickey_str' not in identity: - raise TypeError("identity dict doesn't have a 'publickey_str' key, so it's not an identity.") - - if require_private_key: - if 'privatekey_str' not in identity: - raise IdentityInformationMissingError("identity must have a private key for the requested operation.") - - if require_username: - if 'username' not in identity: - raise IdentityInformationMissingError("identity must have a username for the requested operation.") - - - - - -def _initialize_time(): - """ - Does its best to call time_updatetime() and raises a SeattleExperimentError - if it doesn't succeed after many tries. - """ - global _initialize_time_called - - if not _initialize_time_called: - - max_attempts = 10 - possible_ports = range(10000, 60001) - - # Ports to use for UDP listening when doing a time update. - portlist = random.sample(possible_ports, max_attempts) - - for localport in portlist: - try: - repytime.time_updatetime(localport) - _initialize_time_called = True - return - except repytime.TimeError: - error_message = tracebackrepy.format_exception() - - raise SeattleExperimentError("Failed to perform time_updatetime(): " + error_message) - - - - - -def _create_list_from_key_in_dictlist(dictlist, key): - """ - List comprehensions are verboten by our coding style guide (generally for - good reason). Otherwise, we wouldn't have this function and would just write - the following wherever needed: - [x[key] for x in dictlist] - """ - new_list = [] - for dictitem in dictlist: - new_list.append(dictitem[key]) - return new_list - - - - - -def _get_nmhandle(nodelocation, identity=None): - """ - Get an nmhandle for the nodelocation and identity, if provided. This will look - use a cache of nmhandles and only create a new one if the requested nmhandle - has not previously been requested. - """ - - # Call _initialize_time() here because time must be updated at least once before - # nmhandles are used. - _initialize_time() - - host, port = nodelocation.split(':') - port = int(port) - - if identity is None: - identitystring = "None" - else: - identitystring = identity['publickey_str'] - - if identitystring not in _nmhandle_cache: - _nmhandle_cache[identitystring] = {} - - if nodelocation not in _nmhandle_cache[identitystring]: - try: - if identity is None: - nmhandle = fastnmclient.nmclient_createhandle(host, port, timeout=defaulttimeout) - elif 'privatekey_dict' in identity: - nmhandle = fastnmclient.nmclient_createhandle(host, port, privatekey=identity['privatekey_dict'], - publickey=identity['publickey_dict'], timeout=defaulttimeout) - else: - nmhandle = fastnmclient.nmclient_createhandle(host, port, publickey=identity['publickey_dict'], - timeout=defaulttimeout) - except fastnmclient.NMClientException, e: - raise NodeCommunicationError(str(e)) - - _nmhandle_cache[identitystring][nodelocation] = nmhandle - - return _nmhandle_cache[identitystring][nodelocation] - - - - - -def run_parallelized(targetlist, func, *args): - """ - - Parallelize the calling of a given function using multiple threads. - - targetlist - a list what will be the first argument to func each time it is called. - func - the function to be called once for each item in targetlist. - *args - (optional) every additional argument will be passed to func after an - item from targetlist. That is, these will be the second, third, etc. - argument to func, if provided. These are not required a. - - SeattleExperimentError - Raised if there is a problem performing parallel processing. This will - not be raised just because func raises exceptions. If func raises - exceptions when it is called, that exception information will be - available through the run_parallelized's return value. - - Up to num_worker_threads (a global variable) threads will be spawned to - call func once for every item in targetlist. - - A tuple of: - (successlist, failurelist) - where successlist is a list of tuples of the format: - (target, return_value_from_func) - and failurelist is a list of tuples of the format: - (target, exception_string) - Note that exception_string will not contain a full traceback, but rather - only the string representation of the exception. - """ - - try: - phandle = parallelize.parallelize_initfunction(targetlist, func, num_worker_threads, *args) - - while not parallelize.parallelize_isfunctionfinished(phandle): - # TODO: Give up after a timeout? This seems risky as run_parallelized may - # be used with functions that take a long time to complete and very large - # lists of targets. It would be a shame to break a user's program because - # of an assumption here. Maybe it should be an optional argument to - # run_parallelized. - time.sleep(.1) - - results = parallelize.parallelize_getresults(phandle) - except parallelize.ParallelizeError: - raise SeattleExperimentError("Error occurred in run_parallelized: " + - tracebackrepy.format_exception()) - finally: - parallelize.parallelize_closefunction(phandle) - - # These are lists of tuples. The first is a list of (target, retval), the - # second is a list of (target, errormsg) - return results['returned'], results['exception'] - - - - - - -def create_identity_from_key_files(publickey_fn, privatekey_fn=None): - """ - - Create an identity from key files. - - publickey_fn - The full path, including filename, to the public key this identity - should represent. Note that the identity's username will be assumed - to be the part of the base filename before the first period (or the - entire base filename if there is no period). So, to indicate a username - of "joe", the filename should be, for example, "joe.publickey". - privatekey_fn - (optional) The full path, including filename, to the private key that - corresponds to publickey_fn. If this is not provided, then the identity - will not be able to be used for operations the require a private key. - - IOError - if the files do not exist or are not readable. - ValueError - if the files do not contain valid keys. - - An identity object to be used with other functions in this module. - """ - identity = {} - identity["username"] = os.path.basename(publickey_fn).split(".")[0] - identity["publickey_fn"] = publickey_fn - try: - identity["publickey_dict"] = rsa.rsa_file_to_publickey(publickey_fn) - identity["publickey_str"] = rsa.rsa_publickey_to_string(identity["publickey_dict"]) - - if privatekey_fn is not None: - identity["privatekey_fn"] = privatekey_fn - identity["privatekey_dict"] = rsa.rsa_file_to_privatekey(privatekey_fn) - identity["privatekey_str"] = rsa.rsa_privatekey_to_string(identity["privatekey_dict"]) - except IOError: - raise - except ValueError: - raise - - return identity - - - - - -def create_identity_from_key_strings(publickey_string, privatekey_string=None, username=None): - """ - - Create an identity from key strings. - - publickey_string - The string containing the public key this identity should represent. The - string must consists of the modulus, followed by a space, followed by - the public exponent. This will be the same as the contents of a public - key file. - privatekey_string - (optional) The full path, including filename, to the private key that - corresponds to publickey_fn. If this is not provided, then the identity - will not be able to be used for operations the require a private key. - username - (optional) A string containing the username to associate with this - identity. This is only necessary if using this identity with the - seattlegeni_* functions. - - ValueError - if the strings do not contain valid keys. - - An identity object to be used with other functions in this module. - """ - identity = {} - identity["username"] = username - try: - identity["publickey_dict"] = rsa.rsa_string_to_publickey(publickey_string) - identity["publickey_str"] = rsa.rsa_publickey_to_string(identity["publickey_dict"]) - - if privatekey_string is not None: - identity["privatekey_dict"] = rsa.rsa_string_to_privatekey(privatekey_string) - identity["privatekey_str"] = rsa.rsa_privatekey_to_string(identity["privatekey_dict"]) - except IOError: - # Raised if there is a problem reading the file. - raise - except ValueError: - # Raised by the repy rsa module when the key is invald. - raise - - return identity - - - - - -def _lookup_node_locations(keystring, lookuptype=None): - """Does the actual work of an advertise lookup.""" - - keydict = rsa.rsa_string_to_publickey(keystring) - try: - if lookuptype is not None: - nodelist = advertise.advertise_lookup(keydict, maxvals=max_lookup_results, timeout=defaulttimeout, lookuptype=lookuptype) - else: - nodelist = advertise.advertise_lookup(keydict, maxvals=max_lookup_results, timeout=defaulttimeout) - except advertise.AdvertiseError, e: - raise UnableToPerformLookupError("Failure when trying to perform advertise lookup: " + - tracebackrepy.format_exception()) - - # If there are no vessels for a user, the lookup may return ''. - for nodename in nodelist[:]: - if nodename == '': - nodelist.remove(nodename) - - return nodelist - - - - - -def lookup_node_locations_by_identity(identity): - """ - - Lookup the locations of nodes that are advertising their location under a - specific identity's public key. - - identity - The identity whose public key should be used to lookup nodelocations. - - UnableToPerformLookupError - If a failure occurs when trying lookup advertised node locations. - - A list of nodelocations. - """ - _validate_identity(identity) - keystring = str(identity['publickey_str']) - return _lookup_node_locations(keystring, lookuptype=advertise_lookup_types) - - - - - -def lookup_node_locations_by_nodeid(nodeid): - """ - - Lookup the locations that a specific node has advertised under. There may - be multiple locations advertised if the node has recently changed location. - - nodeid - The nodeid of the node whose advertised locations are to be looked up. - - UnableToPerformLookupError - If a failure occurs when trying lookup advertised node locations. - - A list of nodelocations. - """ - return _lookup_node_locations(nodeid, lookuptype=advertise_lookup_types) - - - - - -def find_vessels_on_nodes(identity, nodelocation_list): - """ - - Contact one or more nodes and determine which vessels on those nodes are - usable by a given identity. - - identity - The identity whose vessels we are interested in. This can be the identity - of either the vessel owner or a vessel user. - nodelocation_list - A list of nodelocations that should be contacted. This can be an empty - list (which will result in an empty list of vesselhandles returned). - - SeattleExperimentError - If an error occurs performing a parallelized operation. - - A list of vesselhandles. - """ - _validate_identity(identity) - _validate_nodelocation_list(nodelocation_list) - - successlist, failurelist = run_parallelized(nodelocation_list, browse_node, identity) - - vesseldicts = [] - - for (nodeid, vesseldicts_of_node) in successlist: - vesseldicts += vesseldicts_of_node - - return _create_list_from_key_in_dictlist(vesseldicts, "vesselhandle") - - - - - -def browse_node(nodelocation, identity=None): - """ - - Contact an individual node to gather detailed information about all of the - vessels on the node that are usable by a given identity. - - nodelocation - The nodelocation of the node that should be browsed. - identity - (optional) The identity whose vessels we are interested in. This can be - the identity of either the vessel owner or a vessel user. If None, - then the vesseldicts for all vessels on the node will be returned. - - NodeCommunicationError - If the communication with the node fails for any reason, including the - node not being reachable, timeout in communicating with the node, the - node rejecting the - - A list of vesseldicts. Each vesseldict contains the additional keys: - 'status' - The status string of the vessel. - 'ownerkey' - The vessel's owner key (in dict format). - 'userkeys' - A list of the vessel's user keys (each in dict format). - """ - try: - _validate_nodelocation(nodelocation) - if identity is not None: - _validate_identity(identity) - - nmhandle = _get_nmhandle(nodelocation, identity) - try: - nodeinfo = fastnmclient.nmclient_getvesseldict(nmhandle) - except fastnmclient.NMClientException, e: - raise NodeCommunicationError("Failed to communicate with node " + nodelocation + ": " + str(e)) - - # We do our own looking through the nodeinfo rather than use the function - # nmclient_listaccessiblevessels() as we don't want to contact the node a - # second time. - usablevessels = [] - for vesselname in nodeinfo['vessels']: - if identity is None: - usablevessels.append(vesselname) - elif identity['publickey_dict'] == nodeinfo['vessels'][vesselname]['ownerkey']: - usablevessels.append(vesselname) - elif 'userkeys' in nodeinfo['vessels'][vesselname] and \ - identity['publickey_dict'] in nodeinfo['vessels'][vesselname]['userkeys']: - usablevessels.append(vesselname) - - nodeid = rsa.rsa_publickey_to_string(nodeinfo['nodekey']) - # For efficiency, let's update the _node_location_cache with this info. - # This can prevent individual advertise lookups of each nodeid by other - # functions in the experimentlib that may be called later. - _node_location_cache[nodeid] = nodelocation - - vesseldict_list = [] - for vesselname in usablevessels: - vesseldict = {} - # Required keys in vesseldicts (see the module comments for more info). - vesseldict['vesselhandle'] = nodeid + ":" + vesselname - vesseldict['nodelocation'] = nodelocation - vesseldict['vesselname'] = vesselname - vesseldict['nodeid'] = nodeid - # Additional keys that browse_node provides. - vesseldict['status'] = nodeinfo['vessels'][vesselname]['status'] - vesseldict['ownerkey'] = nodeinfo['vessels'][vesselname]['ownerkey'] - vesseldict['userkeys'] = nodeinfo['vessels'][vesselname]['userkeys'] - vesseldict['version'] = nodeinfo['version'] - vesseldict_list.append(vesseldict) - - return vesseldict_list - - except Exception, e: - # Useful for debugging during development of the experimentlib. - #traceback.print_exc() - raise - - - - - -def get_vessel_status(vesselhandle, identity): - """ - - Determine the status of a vessel. - - vesselhandle - The vesselhandle of the vessel whose status is to be checked. - identity - The identity of the owner or a user of the vessel. - - UnexpectedVesselStatusError - If the status returned by the node for the vessel is not a status value - that this experimentlib expects. - - The node the vessel is on is communicated with. - - A string that is one of the VESSEL_STATUS_* constants. - """ - _validate_vesselhandle(vesselhandle) - _validate_identity(identity) - - # Determine the last known location of the node. - nodeid, vesselname = vesselhandle.split(":") - try: - # This will get a cached node location if one exists. - nodelocation = get_node_location(nodeid) - except NodeLocationNotAdvertisedError, e: - return VESSEL_STATUS_NO_SUCH_NODE - - try: - vesselinfolist = browse_node(nodelocation, identity) - except NodeCommunicationError: - # Do a non-cache lookup of the nodeid to see if the node moved. - try: - nodelocation = get_node_location(nodeid, ignorecache=True) - except NodeLocationNotAdvertisedError, e: - return VESSEL_STATUS_NO_SUCH_NODE - - # Try to communicate again. - try: - vesselinfolist = browse_node(nodelocation, identity) - except NodeCommunicationError, e: - return VESSEL_STATUS_NODE_UNREACHABLE - - for vesselinfo in vesselinfolist: - if vesselinfo['vesselhandle'] == vesselhandle: - # The node is up and the vessel must have the identity's key as the owner - # or a user, but the status returned isn't one of the statuses we - # expect. If this does occur, it may indicate a bug in the experiment - # library where it doesn't know about all possible status a nodemanager - # may return for a vessel. - if vesselinfo['status'] not in VESSEL_STATUS_SET_ACTIVE: - raise UnexpectedVesselStatusError(vesselinfo['status']) - else: - return vesselinfo['status'] - else: - # The node is up but this vessel doesn't exist. - return VESSEL_STATUS_NO_SUCH_VESSEL - - - - - -def _do_public_node_request(nodeid, requestname, *args): - nodelocation = get_node_location(nodeid) - nmhandle = _get_nmhandle(nodelocation) - - try: - return fastnmclient.nmclient_rawsay(nmhandle, requestname, *args) - except fastnmclient.NMClientException, e: - raise NodeCommunicationError(str(e)) - - - - - -def _do_signed_vessel_request(identity, vesselhandle, requestname, *args): - _validate_identity(identity, require_private_key=True) - - nodeid, vesselname = vesselhandle.split(':') - nodelocation = get_node_location(nodeid) - nmhandle = _get_nmhandle(nodelocation, identity) - - try: - return fastnmclient.nmclient_signedsay(nmhandle, requestname, vesselname, *args) - except fastnmclient.NMClientException, e: - raise NodeCommunicationError(str(e)) - - - - - -def get_node_offcut_resources(nodeid): - """ - - Obtain information about offcut resources on a node. - - nodeid - The nodeid of the node whose offcut resources are to be queried. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - None - - A string containing information about the node's offcut resources. - """ - # TODO: This function might be more useful if it processed the string - # returned by the nodemanager and return it from this function as some - # well-defined data structure. - return _do_public_node_request(nodeid, "GetOffcutResources") - - - - - -def get_vessel_resources(vesselhandle): - """ - - Obtain vessel resource/restrictions information. - - vesselhandle - The vesselhandle of the vessels whose restrictions/resources info are to - be returned. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - None - - A string containing the vessel resource/restrictions information. - """ - # TODO: This function might be more useful if it processed the string - # returned by the nodemanager and return it from this function as some - # well-defined data structure. - nodeid, vesselname = get_nodeid_and_vesselname(vesselhandle) - return _do_public_node_request(nodeid, "GetVesselResources", vesselhandle) - - - - - -def get_vessel_log(vesselhandle, identity): - """ - - Read the vessel log. - - vesselhandle - The vesselhandle of the vessel whose log is to be read. - identity - The identity of either the owner or a user of the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - None - - A string containing the data in the vessel log. - """ - _validate_vesselhandle(vesselhandle) - return _do_signed_vessel_request(identity, vesselhandle, "ReadVesselLog") - - - - - -def get_vessel_file_list(vesselhandle, identity): - """ - - Get a list of files that are on the vessel. - - vesselhandle - The vesselhandle of the vessel whose file list is to be obtained. - identity - The identity of either the owner or a user of the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - None - - A list of filenames (strings). - """ - _validate_vesselhandle(vesselhandle) - file_list_string = _do_signed_vessel_request(identity, vesselhandle, "ListFilesInVessel") - if not file_list_string: - return [] - else: - return file_list_string.split(' ') - - - - - -def upload_file_to_vessel(vesselhandle, identity, local_filename, remote_filename=None): - """ - - Upload a file to a vessel. - - vesselhandle - The vesselhandle of the vessel that the file is to be uploaded to. - identity - The identity of either the owner or a user of the vessel. - local_filename - The name of the local file to be uploaded. That can include a directory - path. - remote_filename - (optional) The filename to use when storing the file on the vessel. If - not provided, this will be the same as the basename of local_filename. - Note that the remote_filename is subject to filename restrictions imposed - on all vessels. - TODO: describe the filename restrictions. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The file has been uploaded to the vessel. - - None - """ - _validate_vesselhandle(vesselhandle) - - if remote_filename is None: - remote_filename = os.path.basename(local_filename) - - fileobj = open(local_filename, "r") - filedata = fileobj.read() - fileobj.close() - - _do_signed_vessel_request(identity, vesselhandle, "AddFileToVessel", remote_filename, filedata) - - - - - -def download_file_from_vessel(vesselhandle, identity, remote_filename, local_filename=None, - add_location_suffix=False, return_file_contents=False): - """ - - Download a file from a vessel. - - vesselhandle - The vesselhandle of the vessel that the file is to be downloaded from. - identity - The identity of either the owner or a user of the vessel. - remote_filename - The file to be downloaded. - local_filename - (optional) The filename to use when saving the downloaded file locally. - This can include a directory path. - add_location_suffix - (optional) Whether the nodelocation and vesselname should be suffixed to - the end of the local filename when saving the file. - local_filename - (optional) If True, the downloaded file will not be saved locally and - instead will be returned as a string instead of the local filename. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The file has been downloaded and, if return_file_contents is False, it has - been saved to the local filesystem. - - If return_file_contents is False: - The full filename where this file was ultimately saved to. This will be in - the current working directory unless local_filename_prefix included a path - to a different directory. - If return_file_contents is True: - The contents of the remote file as a string. - """ - _validate_vesselhandle(vesselhandle) - - if not return_file_contents: - if local_filename is None: - local_filename = remote_filename - if add_location_suffix: - nodeid, vesselname = vesselhandle.split(':') - nodelocation = get_node_location(nodeid) - suffix = "_".join(nodelocation.split(':') + [vesselname]) - local_filename += "_" + suffix - - retrieveddata = _do_signed_vessel_request(identity, vesselhandle, "RetrieveFileFromVessel", remote_filename) - - if return_file_contents: - return retrieveddata - else: - fileobj = open(local_filename, "w") - fileobj.write(retrieveddata) - fileobj.close() - return local_filename - - - - - -def delete_file_in_vessel(vesselhandle, identity, filename): - """ - - Delete a file from a vessel. - - vesselhandle - The vesselhandle of the vessel that the file is to be deleted from. - identity - The identity of either the owner or a user of the vessel. - filename - The name of the file to be deleted from the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The file has been deleted from the vessel. - - None - """ - _validate_vesselhandle(vesselhandle) - _do_signed_vessel_request(identity, vesselhandle, "DeleteFileInVessel", filename) - - - - - -def reset_vessel(vesselhandle, identity): - """ - - Stop the vessel if it is running and reset it to a fresh state. This will - delete all files from the vessel. - - vesselhandle - The vesselhandle of the vessel that is to be reset. - identity - The identity of either the owner or a user of the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The vessel has been reset. No program is running, no files exist on the - vessel, and the vessel status is VESSEL_STATUS_FRESH. - - None - """ - _validate_vesselhandle(vesselhandle) - _do_signed_vessel_request(identity, vesselhandle, "ResetVessel") - - - - - -def start_vessel(vesselhandle, identity, program_file, arg_list=None): - """ - - Start a program running on a vessel. - - vesselhandle - The vesselhandle of the vessel that is to be started. - identity - The identity of either the owner or a user of the vessel. - program_file - The name of the file that already exists on the vessel that is to be - run on the vessel. - arg_list - (optional) A list of arguments to be passed to the program when it is - started. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The vessel has been started, running the specified program. - - None - """ - _validate_vesselhandle(vesselhandle) - arg_string = program_file - if arg_list is not None: - arg_string += " " + " ".join(arg_list) - _do_signed_vessel_request(identity, vesselhandle, "StartVessel", arg_string) - - - - - - -def stop_vessel(vesselhandle, identity): - """ - - Stop the currently running program on a vessel, if there is one. - - vesselhandle - The vesselhandle of the vessel that is to be stopped. - identity - The identity of either the owner or a user of the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - If a program was running on the vessel, it has been stopped. The vessel - state is either VESSEL_STATUS_STOPPED or VESSEL_STATUS_TERMINATED. - TODO: verify this is the case and describe when these will happen. - - None - """ - _validate_vesselhandle(vesselhandle) - _do_signed_vessel_request(identity, vesselhandle, "StopVessel") - - - - - -def split_vessel(vesselhandle, identity, resourcedata): - """ - - Split a vessel into two new vessels. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - vesselhandle - The vesselhandle of the vessel that is to be split. - identity - The identity of the owner of the vessel. - resourcedata - The resourcedata that describes one of the vessels to be split from the - original. The other vessel will have the remainder of the resources - minus some overhead from the split. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The original vessel no longer exists (meaning that the vesselhandle passed - in as an argument is no longer valid). The node instead has two new vessels. - - A tuple of the two new vesselhandles that resulted from the split. The - first element of the tuple is the vesselhandle of the vessel that has the - leftover resources from the split. The second element of the tuple is the - vesselhandle of the vessel that has the exact resources specified in the - resourcedata. - """ - _validate_vesselhandle(vesselhandle) - return _do_signed_vessel_request(identity, vesselhandle, "SplitVessel", resourcedata) - - - - - -def join_vessels(identity, vesselhandle1, vesselhandle2): - """ - - Join (combine) two vessels on the same node into one, larger vessel. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - identity - The identity of the owner of the vessel. - vesselhandle1 - The vesselhandle of the one of the vessels to be comined. - vesselhandle2 - The vesselhandle of the the other vessel to be combined. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - Neither of the original two vessel exist (meaning that neither vesselhandle1 - nor vesselhandle2 are valid anymore). The node has one new vessel whose - resources are the combination of the resource of the original two vessels - plus some additional resources because of less overhead from less splits. - - The vesselhandle of the newly created vessel. - """ - _validate_vesselhandle(vesselhandle1) - _validate_vesselhandle(vesselhandle2) - vesselname2 = vesselhandle2.split(":")[1] - return _do_signed_vessel_request(identity, vesselhandle1, "JoinVessels", vesselname2) - - - - - -def set_vessel_owner(vesselhandle, identity, new_owner_identity): - """ - - Change the owner of a vessel. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - vesselhandle - The vesselhandle of the vessel whose owner is to be changed. - identity - The identity of the current owner of the vessel. This identity must have - a private key. - new_owner_identity - The identity that the owner of the vessel is to be changed to. This - identity only needs to have a public key. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The owner of the vessel has been changed. - - None - """ - _validate_vesselhandle(vesselhandle) - _do_signed_vessel_request(identity, vesselhandle, "ChangeOwner", new_owner_identity['publickey_str']) - - - - - -def set_vessel_advertise(vesselhandle, identity, advertise_enabled): - """ - - Set whether the vessel should be advertising or not. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - vesselhandle - The vesselhandle of the vessel whose advertise status is to be set. - identity - The identity of the owner of the vessel. - advertise_enabled - True if the vessel should be advertising, False if it should not be. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The vessel either will be advertising or will not be. - - None - """ - _validate_vesselhandle(vesselhandle) - - if not isinstance(advertise_enabled, bool): - raise TypeError("advertise_enabled must be a boolean.") - - _do_signed_vessel_request(identity, vesselhandle, "ChangeAdvertise", str(advertise_enabled)) - - - - - -def set_vessel_ownerinfo(vesselhandle, identity, ownerinfo): - """ - - Set the owner info of a vessel. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - vesselhandle - The vesselhandle of the vessel whose advertise status is to be set. - identity - The identity of the owner of the vessel. - ownerinfo - The ownerinfo to be set on the vessel. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The ownerinfo of the vessel has been set. - - None - """ - _validate_vesselhandle(vesselhandle) - _do_signed_vessel_request(identity, vesselhandle, "ChangeOwnerInformation", ownerinfo) - - - - - -def set_vessel_users(vesselhandle, identity, userkeystringlist): - """ - - Change the owner of a vessel. - - THIS OPERATION IS ONLY AVAILABLE TO THE OWNER OF THE VESSEL. - If you have acquired the vessel through SeattleGENI, you are a user of the - vessel, not an owner. - - vesselhandle - The vesselhandle of the vessel whose users are to be set. - identity - The identity of the owner of the vessel. - userkeystringlist - A list of key strings. The key strings must be in the format of the data - stored in key files. That is, each should be a string that consists of - the modulus, followed by a space, followed by the public exponent. - - NodeCommunicationError - If communication with the node failed, either because the node is down, - the communication timed out, the signature was invalid, or the identity - unauthorized for this action. - - The user keys in userkeystringlist are the only users of the vessel. - - None - """ - _validate_vesselhandle(vesselhandle) - # TODO: Arguably the argument should be a list of identities rather than a - # list of key strings. - formatteduserkeys = '|'.join(userkeystringlist) - _do_signed_vessel_request(identity, vesselhandle, "ChangeUsers", formatteduserkeys) - - - - - -def get_nodeid_and_vesselname(vesselhandle): - """ - - Given a vesselhandle, returns the nodeid and vesselname. - - vesselhandle - The vesselhandle of the vessel whose nodeid and vesselname are to be - returned. - - None - - None - - A tuple of (nodeid, vesselname) - """ - _validate_vesselhandle(vesselhandle) - return vesselhandle.split(":") - - - - - -def get_vesselhandle(nodeid, vesselname): - """ - - Given a nodeid and vesselname, returns a vesselhandle that represents the - vessel. - - nodeid - The nodeid of the node that the vessel is on. - vesselname - The name of the vessel. - - None - - None - - A vesselhandle. - """ - return nodeid + ":" + vesselname - - - - - -def get_host_and_port(nodelocation): - """ - - Given a nodelocation, returns the host and port of the node. - - nodelocation - The nodelocation of the node whose host and port are to be returned. - - None - - None - - A tuple of (host, port), where host is a string and port is an int. - The host may be an IP address or an identifier used by NAT forwarders. - """ - _validate_nodelocation(nodelocation) - host, portstr = nodelocation.split(":") - return host, int(portstr) - - - - - -def get_node_location(nodeid, ignorecache=False): - """ - - Determine a nodelocation given a nodeid. - - nodeid - The nodeid of the node whose location is to be determined. - ignorecache - (optional, default is False) Whether to ignore cached values for this - node's location, forcing an advertise lookup and possibly also - attempting to contact potential nodelocations. - - NodeLocationLookupError - If no node locations are being advertised under the nodeid or if a - NodeCommunicationError - If multiple node locations are being advertised under the nodeid but - successful communication cannot be performed with any of the locations. - - If the node location isn't already known (or if ignorecache is True), - then an advertise lookup of the nodeid is done. In that case, if - multiple nodelocations are advertised under the nodeid, then each location - will be contacted until one is determined to be a valid nodelocation - that can be communicated with. - - A nodelocation. This nodelocation may or may not have been communicated - with and is instead only the most likely location of a node at the time - this function was called. - """ - if ignorecache or nodeid not in _node_location_cache: - locationlist = lookup_node_locations_by_nodeid(nodeid) - if not locationlist: - raise NodeLocationLookupError("Nothing advertised under node's key.") - # If there is more than one advertised location, we need to figure out - # which one is valid. For example, if a node moves then there will be - # a period of time in which the old advertised location and the new - # one are both returned. We need to determine the correct one. - elif len(locationlist) > 1: - for possiblelocation in locationlist: - host, portstr = possiblelocation.split(':') - try: - # We create an nmhandle directly because we want to use it to test - # basic communication, which is done when an nmhandle is created. - nmhandle = fastnmclient.nmclient_createhandle(host, int(portstr)) - except fastnmclient.NMClientException, e: - continue - else: - fastnmclient.nmclient_destroyhandle(nmhandle) - _node_location_cache[nodeid] = possiblelocation - break - else: - raise NodeCommunicationError("Multiple node locations advertised but none " + - "can be communicated with: " + str(locationlist)) - else: - _node_location_cache[nodeid] = locationlist[0] - - return _node_location_cache[nodeid] - - - - - -def get_nodeid(nodelocation): - """ - - Determine a nodelocation given a nodeid. Note that if you have already - obtained a vesselhandle for a vessel on the node, you can get the nodeid - using get_nodeid_and_vesselname(vesselhandle), which would avoid having - to contact the node. - - nodelocation - The nodelocation of the node whose nodeid is to be determined. - - NodeCommunicationError - If a failure occurs in communicating with the node. - - None - - A nodeid. - """ - # We assume at least one vessel on the node. This is a safe assumption - # unless there's something very wrong with the node. - return browse_node(nodelocation)[0]['nodeid'] - - - - - -def _call_seattlegeni_func(func, *args, **kwargs): - """ - Helper function to limit the potential errors raised by seattlegeni_* - functions to SeattleClearinghouseError or classes that extend it. The seattleclearinghouse_xmlrpc - module doesn't catch ProtocolError or unexpected xmlrpc faults. At the level - of the experimentlib, though, we just consider these generic failures for the - purpose of simlifying error handling when using the experimentlib. - """ - try: - return func(*args, **kwargs) - except xmlrpclib.ProtocolError: - raise SeattleClearinghouseError("Failed to communicate with SeattleGENI. " + - "Are you using the correct xmlrpc url? " + tracebackrepy.format_exception()) - except xmlrpclib.Fault: - raise SeattleClearinghouseError("Unexpected XML-RPC fault when talking to SeattleGENI. " + - "Are you using a current version of experimentlib.py and " + - "seattleclearinghouse_xmlrpc.py? " + tracebackrepy.format_exception()) - - - - - -def _get_seattlegeni_client(identity): - - if "seattlegeniclient" not in identity: - _validate_identity(identity, require_private_key=True, require_username=True) - private_key_string = rsa.rsa_privatekey_to_string(identity["privatekey_dict"]) - # We use _call_seattlegeni_func because the SeattleClearinghouseClient constructor - # may attempt to communicate with SeattleClearinghouse. - client = _call_seattlegeni_func(seattleclearinghouse_xmlrpc.SeattleClearinghouseClient, - identity['username'], - private_key_string=private_key_string, - xmlrpc_url=SEATTLECLEARINGHOUSE_XMLRPC_URL, - allow_ssl_insecure=SEATTLECLEARINGHOUSE_ALLOW_SSL_INSECURE, - ca_certs_file=SEATTLECLEARINGHOUSE_CA_CERTS_FILES) - identity["seattlegeniclient"] = client - - return identity["seattlegeniclient"] - - - - - -def _seattlegeni_cache_node_locations(seattlegeni_vessel_list): - """ - This takes a list of vessel dicts that aren't the standard vesseldict this - module normally deals with. Instead, these are dicts with the keys that are - directly returned by the seattlegeni xmlrpc api. - """ - for seattlegeni_vessel in seattlegeni_vessel_list: - nodeid = seattlegeni_vessel['node_id'] - ip = seattlegeni_vessel['node_ip'] - portstr = str(seattlegeni_vessel['node_port']) - _node_location_cache[nodeid] = ip + ':' + portstr - - - - - -def seattlegeni_acquire_vessels(identity, vesseltype, number): - """ - - Acquire vessels of a certain type from SeattleGENI. This is an - all-or-nothing request. Either the number requested will be acquired or - no vessels will be acquired. - - identity - The identity to use for communicating with SeattleGENI. - vesseltype - The type of vessels to be acquired. This must be one of the constants - named SEATTLECLEARINGHOUSE_VESSEL_TYPE_* - number - The number of vessels to be acquired. - - The common SeattleGENI exceptions described in the module comments, as well as: - SeattleClearinghouseNotEnoughCreditsError - If the account does not have enough available vessel credits to fulfill - the request. - - Either the full number of vessels requested are acquired or none are. - - A list of vesselhandles of the acquired vessels. - """ - client = _get_seattlegeni_client(identity) - seattlegeni_vessel_list = _call_seattlegeni_func(client.acquire_resources, vesseltype, number) - - _seattlegeni_cache_node_locations(seattlegeni_vessel_list) - - return _create_list_from_key_in_dictlist(seattlegeni_vessel_list, "handle") - - - - - -def seattlegeni_acquire_specific_vessels(identity, vesselhandle_list): - """ - - Acquire specific vessels from SeattleGENI. This is not an all-or-nothing - request. - - identity - The identity to use for communicating with SeattleGENI. - vesselhandle_list - A list of vesselhandles. Even though the request may be only partially - fulfilled, the size of this list must not be greater than the number of - vessels the account has available to acquire. - - The common SeattleGENI exceptions described in the module comments, as well as: - SeattleClearinghouseNotEnoughCreditsError - If the account does not have enough available vessel credits to fulfill - the request. - - If successful, zero or more vessels from handlelist have been acquired. - - A list of vesselhandles of the acquired vessels. - """ - client = _get_seattlegeni_client(identity) - seattlegeni_vessel_list = _call_seattlegeni_func(client.acquire_specific_vessels, vesselhandle_list) - - _seattlegeni_cache_node_locations(seattlegeni_vessel_list) - - return _create_list_from_key_in_dictlist(seattlegeni_vessel_list, "handle") - - - - - -def seattlegeni_release_vessels(identity, vesselhandle_list): - """ - - Release vessels from SeattleGENI. - - identity - The identity to use for communicating with SeattleGENI. - vesselhandle_list - The vessels to be released. - - The common SeattleGENI exceptions described in the module comments. - - The vessels are released from the SeattleGENI account. - - None - """ - _validate_vesselhandle_list(vesselhandle_list) - - client = _get_seattlegeni_client(identity) - _call_seattlegeni_func(client.release_resources, vesselhandle_list) - - - - - -def seattlegeni_renew_vessels(identity, vesselhandle_list): - """ - - Renew vessels previously acquired from SeattleGENI. - - identity - The identity to use for communicating with SeattleGENI. - vesselhandle_list - The vessels to be renewed. - - The common SeattleGENI exceptions described in the module comments, as well as: - SeattleGENINotEnoughCredits - If the account is currently over its vessel credit limit, then vessels - cannot be renewed until the account is no longer over its credit limit. - - The expiration time of the vessels is is reset to the maximum. - - None - """ - _validate_vesselhandle_list(vesselhandle_list) - - client = _get_seattlegeni_client(identity) - _call_seattlegeni_func(client.renew_resources, vesselhandle_list) - - - - - -def seattlegeni_get_acquired_vessels(identity): - """ - - Obtain a list of vesselhandles corresponding to the vessels acquired through - SeattleGENI. - - In order to return a data format that is most useful with the other functions - in this module, this function drops some potentially useful info. Therefore, - there's a separate function: - seattlegeni_get_acquired_vessels_details() - for obtaining all of the vessel information returned by seattlegeni. - - identity - The identity to use for communicating with SeattleGENI. - - The common SeattleGENI exceptions described in the module comments. - - None - - A list of vesselhandles. - """ - vesseldict_list = seattlegeni_get_acquired_vessels_details(identity) - - # We look for the vesselhandle key rather than 'handle' because these - # are vesseldicts, by our definition of them, not the raw dictionaries - # that seattlegeni hands back. - return _create_list_from_key_in_dictlist(vesseldict_list, "vesselhandle") - - - - - -def seattlegeni_get_acquired_vessels_details(identity): - """ - - Obtain a list of vesseldicts corresponding to the the vessels acquired - through SeattleGENI. - - identity - The identity to use for communicating with SeattleGENI. - - The common SeattleGENI exceptions described in the module comments. - - None - - A list of vesseldicts that have the additional key 'expires_in_seconds'. - """ - client = _get_seattlegeni_client(identity) - seattlegeni_vessel_list = _call_seattlegeni_func(client.get_resource_info) - - _seattlegeni_cache_node_locations(seattlegeni_vessel_list) - - # Convert these dicts into dicts that have the required keys for us to - # consider them "vesseldicts", by the definition given in the module - # comments. - vesseldict_list = [] - for seattlegeni_vessel in seattlegeni_vessel_list: - vesseldict = {} - vesseldict_list.append(vesseldict) - - nodeid = seattlegeni_vessel['node_id'] - ip = seattlegeni_vessel['node_ip'] - portstr = str(seattlegeni_vessel['node_port']) - vesselname = seattlegeni_vessel['vessel_id'] - - # Required keys in vesseldicts (see the module comments for more info). - vesseldict['vesselhandle'] = nodeid + ":" + vesselname - vesseldict['nodelocation'] = ip + ':' + portstr - vesseldict['vesselname'] = vesselname - vesseldict['nodeid'] = nodeid - # Additional keys that browse_node provides. - vesseldict['expires_in_seconds'] = seattlegeni_vessel['expires_in_seconds'] - - return vesseldict_list - - - - - -def seattlegeni_max_vessels_allowed(identity): - """ - - Determine the maximum number of vessels that can be acquired by this - account through SeattleGENI, regardless of the number currently acquired. - That is, this is an absolute maximum, not the number that can still be - acquired based on the number already acquired. - - identity - The identity to use for communicating with SeattleGENI. - - The common SeattleGENI exceptions described in the module comments. - - None - - The maximum number of vessels the account can acquire (an integer). - """ - client = _get_seattlegeni_client(identity) - # We can't cache this value because it may change as the user's donations - # come online and go offline. - return _call_seattlegeni_func(client.get_account_info)['max_vessels'] - - - - - -def seattlegeni_user_port(identity): - """ - - Determine the port which SeattleGENI guarantees will be usable by the - account on all acquired vessels. - - identity - The identity to use for communicating with SeattleGENI. - - The common SeattleGENI exceptions described in the module comments. - - None - - The port number (an integer). - """ - client = _get_seattlegeni_client(identity) - # The user port won't change, so let's not make a new seattlegeni request - # each time just in case someone uses this a lot in their program. We'll go - # ahead and keep in this in the identity. It's not a documented part of - # the identity so nobody should be trying to access it directly. - if 'user_port' not in identity: - identity['user_port'] = _call_seattlegeni_func(client.get_account_info)['user_port'] - return identity['user_port'] From 39d606eea5adfdbe850205c67fbc5a6ade968678 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:08 -0500 Subject: [PATCH 07/17] Delete metainfo --- metainfo | 279 ------------------------------------------------------- 1 file changed, 279 deletions(-) delete mode 100644 metainfo diff --git a/metainfo b/metainfo deleted file mode 100644 index 9258d9f..0000000 --- a/metainfo +++ /dev/null @@ -1,279 +0,0 @@ -Linux_resources.py 95a3e7e17a1ff2fce3d9fafb5f4d03b951cb2ab1 17825 -nmstatusmonitor.py 618ebc041617c2185e0806eb6cad88e665b32119 5176 -repyportability.py 84bd967a0e7beb7ce8d03c5ab9287bc57c9145fb 10807 -urllib.r2py 2914c93eeb8b47b45752e9bc205287e0e7138209 4401 -encasementlib.r2py 94cd1526233823e38da6918d95a7d58683f72e6d 6836 -sshkey.r2py 6cf36ded75df694949141986808c291203bc5b55 7214 -servicelogger.py aef4fb928ae93089716fa1798d7758d60f052b6a 10211 -nmrequesthandler.py de50258f43cac130e2cf07e1cd4c5e9203e68fdc 11111 -centralizedadvertise_v2.r2py 089d5d9fe21803dedf396497f138d9506968c538 5783 -nonportable.py 6b3e74d3c7f8d2c63048075f3ec68567e0938516 28080 -librepyrunloop.r2py b97b9700c948354ed765c9ea3b082a8b82966ebb 9949 -loggingaffix.r2py be4172d5f51c1c48e3f4f72ae4d45a00b11483cf 5656 -nmrestrictionsprocessor.py 84aa8fcf1037d820a11d5f5db52c25b121de2be1 5840 -semaphore.r2py 6c4b8c816657cb6339c41687003ea969f43f2e5c 5191 -seattleclearinghouse_xmlrpc.py d1dc98bbd62c8c0f01c0db904f5fdcca99d3d795 19920 -writemetainfo.py 0d0dea66e73b618c82ccba45d038eb00f2d1c42f 4543 -darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -affix_repy_network_api_wrapper.r2py dbf65a834861afce3ddf3bb4abd8aa9e3dce5f6b 2927 -httpserver.r2py 3e517f4758579d830d874e367f35f920f5069bf1 28584 -serialize.r2py 98bddde889d407058e4ced9e3de2d2398f02a531 6573 -windows_api.py dc6b656b97450bd00f11cb3c423b00e4cef4522d 44407 -affix_stack.r2py 8cbc96595b620143335e9ec2930e124aa4b0ba8e 12383 -sha.r2py bba9db372fa51dba3c89e4056316d8e43ae43bc8 9982 -runonce.py 24bba4d84fe5c6ca371dea45e189bbb96585bc5a 8178 -daemon.py 7cd120327a44f5a17125072e4063249c48678828 1307 -wrapper.r2py 774103f5bcd516b6a2182d00a2c9b0a682d3696e 18806 -seash_dictionary.py 0f44ea7dcb3e625aae1eb5b60173377661d126c3 78870 -argparse.r2py 9ee82545246d712f9909b6db0db4192c1b272159 5823 -convert_print_to_log.sh 9e402d4e7fa235498535e3d1d51ed65cf9d6b1e9 1103 -softwareupdater.py~ bf23d2a30e97c68ed3b29f36ddfc8c5786f3103f 31171 -nmmain.py 0d781429780946a0d499f25e045fc5bcaaabf5dc 25667 -dylink_r2py.py e699bc65526d4c6bcfd06cb4825621fb9a76e31e 18516 -harshexit.py b14c080ea38ecba8caa385ed04717a2d41936f36 4435 -restrictions.default d9ac5b281e1d9ff01ccbe62e8887c72858dcb6d4 7177 -xmlrpc_client.r2py 96f88ec041d7a4f3d92015ae882dc738a10d6b11 3157 -nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -xmlrpc_common.r2py 12fd671718b3a0f426314b65cf6a5a37ad085dd6 14660 -secureclient.r2py 6e83e59297de02bc5b5fa3734d09ce24922b5962 2893 -exception_hierarchy.py 11fdcc0927285d8b5d7f7d11f264c30189f668ad 6492 -nmAPI.py abe232a9ff4acc0cafc854a733b38bb610e8aef5 29629 -cachedadvertise.r2py 97a2cb70e8b0111e61fc8b23999506ba11f5e71a 3361 -registerhttpcallback.r2py 616c12e27140603287b9f3372d6aca324fbb6402 29537 -makemehearaffix.r2py 962b5346c0d0c5e6b53218ac97733a89a5101205 9593 -Mac_BSD_resources.py bf8e570bb554607af6d9fec1e732a1b9b744f297 6811 -binascii.r2py 5d558d6da9f8320c883bf0868cd3d5c63dba81e9 5133 -resourcemanipulation.py f82688d2ea6c2a4249020bdb8beb004a747a2e62 12395 -seattleinstaller.py a20c2582e6d993a1cb70eb6e2b2e415445c36273 88448 -freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -centralizedadvertise.r2py 48386a562f8ec57790a48d1bf1b9aa9d7954389d 5764 -command_callbacks.py bd938955ffc87a8fe55369c475d90fc70522ca35 95378 -safe_eval.r2py db53a8cd70ecbdb33ea1056218592eab2bc34913 1430 -nmconnectionmanager.py d104a5d4ef1168046b28b541a661a33a7b63a730 7002 -repy_constants.py 6fa03ecfc6a807901d7d9c4a3d23fe5923cc04ff 1924 -seash_global_variables.py d3c1113ac82b94aaab9ab43e0b221f06b807e9a0 1046 -nmthreadingerror.py ea77731639370a200f17d34c8e6a33b27c1918ec 3787 -tcp_time.r2py 0697608917ee98c5f53feea37cc520e76120c844 3953 -time_interface.r2py bf4ac855b3231d2e91e34044f1d69f0d850e91c7 4647 -tcp_relay_common_lib.r2py 390388654ca19618435bc451e0963775bde65d21 641 -tab_completer.py 6416cfec94c590e8b58cbf21205e93afcfdf3f67 9401 -tracebackrepy.py 473addaf902a13bb3f8cfcc4f4a9a62536a16a9e 9185 -emulmisc.py 3dcef49d02982863683cd7b44e2e3f5c6fa83a3f 5662 -measure_random.py ce81da23b560aeeb0bf5fd4e3cb5f8a36dffa509 3660 -tcprelayaffix.r2py 724d1875b27e4cac9b67a56c3977c97e598ff93e 20443 -baseaffix.r2py 2eafaa45a026fc08eefd46388538ee9e3b07f753 7738 -nminit.py 03b8eef5834794fc46174bf141204c6366bd3e70 14125 -secureadvertise_client.r2py 02bade04d5d56aa4be8d55ddfaf936dd9671311b 7012 -librepyrandom.r2py e5975dbd3ed2db6aee250a5fc67a6fdcbfed3ba6 2370 -measuredisk.py 5a6dcebc2e8831268cb2c94a29e246a1027f0fbe 8182 -persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -nmmain.py~ 41aabef816d29d8a742bf8e9fd07888022d70d76 25674 -ntp_time.r2py 525dc64b1ac2187c88ced9fc2fb68670551ccddf 4352 -seash_modules.py d831748fdfd1ff477cd8ecf6e230eac82e0a12a5 18553 -repy.py 4eef470737e3abd09458fe4884f446da886dc3f8 13735 -createnodekeys.py 86040e2e68c7e06115197d7b6732a822b0f058d4 1522 -fastnmclient.py 9dc05c89b6c4267654ae1b5a407121fc5a539999 15785 -pycryptorsa.r2py 0386ef25a1a3e47c599e342e351cb633ca7c4694 36561 -librepythread.r2py 94b02bed815f24115c1febaeb3969376a96f588f 21271 -seash_exceptions.py c0f0aec96fc6c35a6e8b545c632a2d2a4b4dd90b 1058 -sockettimeout.r2py 7e563f5d6d0900de4e2ccb095c4ce19316e792fb 9308 -urlparse.r2py af3bd3ac682ffb5f5efe3676686181ed87468e57 5103 -nmresourcemath.py c9d5427b40ea8a82f3a83d8d0109cc525371ad97 2523 -safe_check.py 9f022501e65a38575e2bfcbf0bf0e7ddf0e6571f 757 -timeout_xmlrpclib.py f797757fea756ce7ec72172feb31a275a191700a 1051 -vessel.restrictions dc3a72229ab8fd8b78ab7572cb3efc4d27950875 1992 -nmadvertise.py fbbf576a1b3605d3f254de2fc10d2cdf277b2228 5933 -strace.py d2fb23abbabdf5aa1eeba1102a0df9f4954f7c09 7567 -emulfile.py 08f6c608d6210eb8b3e6107ef51650b5145249df 14829 -loggingrepy_core.py 22717381d3b5b2fdce3c3681a55c59ef30309f34 7208 -seash_helper.py dd72e6f9369dad29fd1c697a7cba81ca45857e1c 31705 -secureadvertise_server.r2py 6736b69820488a97fe5c962311f501940b21aeca 15079 -librepy.r2py 9a2660e75769a5403b6a13e5a55a747c5328245f 4403 -time.r2py cfccb52432216e3bbd4d55cfba21892d53466cc7 1196 -repyunit.r2py be0f4291a8b861e68ea314bbe45a5b4c2799b760 18807 -librepyfile.r2py d797fa41d7a229476e80e1545cba47b2ca32d2e4 20891 -textops.py 20ea2ab6eef2a4c9b2fa7fe6b979e794b00d7d1d 4881 -signeddata.r2py 910ac40b55b9320a077cb7ad388225305a5acdb2 15812 -httpretrieve.r2py 6084fa2aa1cfd30d87e6077a9ffa679c828a4797 18283 -tcp_relay.r2py 37ca81b0e0346b2273dc0887b0ef54d7464d6d34 25135 -linux_api.py a53c1b6a1a48c0aa00c70faa44848e3a5ca94084 8617 -Win_WinCE_resources.py 2c405bc4934d7a3b0632ba231a412ad08393b544 6936 -repyhelper.py e415995bcc1abd8b76c4c32589cc495d76d691bc 17998 -servicelookup.r2py 2f879b21e17b823db40a505c6ce9f5654c41c7cb 798 -nmstatusinterface.py ab11e19d5e32f596cc6a4db5482ab6fe59ff1a3f 5993 -win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -benchmark_resources.py 2d27cff92f2b95ad99e1f8f0b8cd5437940f885a 23151 -listops.r2py 0e7bac3e5604acf7c16b0a5c61cbb77b3d5d7663 2677 -doradvertise.r2py 97c90c519c00e61905c73ca24872877ca1d66727 4936 -opendhtadvertise.r2py ee7a6cce82d2928a804803c6f45ee3597f40e36c 8688 -pydes.r2py c7e6a0d3fff04fd9c4039df6a1368fb7c3b2441e 29527 -fastsigneddata.py 9c123071d179dc584a3264bb952a76d8714acbcf 16218 -namespace.py a2e0c11a3959c46ca0556e9777954b5394022ee7 39156 -xmlparse.r2py e93ee3e6df83e01f1f569c8c48a222e2cb76d69a 10968 -seash.py ab956d08016c547f6a9f1f0776894d98a509d9ef 11399 -cv.r2py ab0922c50e12f8d05edabc20fb677e7fa9df87df 6726 -nanny.py 9573b2ef4423e2c977f7c32ce679f355130cf1fd 14727 -getvesselresources.r2py 48dc056470a71f302ece1733e202d0b7dc5e7408 5313 -session.r2py deb9cb6f4d0b39b9eecf731d08d59ce7ed2cece5 3244 -librepysocket.r2py 656ada9708b75c7abba060e92d276c002d989816 35091 -geoip_client.r2py a6f5fd11b3f49a1d1ecca4f8148ef98fb84ea03f 3237 -emultimer.py a5535fba66a5fb297fefb86ed716673bc1c0a44e 3081 -md5py.r2py f0aa5d62e8cb393dd36f270a6728277c6f9d3a55 15429 -base64.r2py 9bec5a042571e78297f713166147d4f640297f77 7291 -statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -sshkey_paramiko.r2py 24ee4c516bad9fa72eed291f8f3f2210de00e78b 15431 -rsa.r2py 149c80a12b826e983b147585aa2d75a8cb84c034 22327 -seattleuninstaller.py 1d9ba105680afac079119d0c5cc62eaa76bf42fc 21742 -readline.so.mac 2c053c4fe51d542d51ac1155f078e0309706abe0 664640 -emulcomm.py 4d4821cb725ca3f348da226aad1945da03847a64 65128 -checkpythonversion.py 60c33ee7aa2e175ca7a659f3c4250a79dc20e4d1 989 -idhelper.py 9ad51b4f7b1f57e5d6523a94d25f370c18422e7f 2239 -udpcentralizedadvertise.r2py 1c7e93d78c1692ddcba7db8f613dd83a70d84ac8 10275 -advertisepipe.r2py 433d7db0c66eabe5f910cfa3fb7bc775f6e0e931 8239 -noopaffix.r2py abb0e66d1a295962d183d50334a94e87e2201404 515 -namingandresolveraffix.r2py 305408d21410934edc55205163abaf49c491accf 10830 -deserialize.r2py aa443adcd1af9a28debb425e3e25717e729420c0 10813 -dylink.r2py e63a9a4226f214482f7fe0910aace7993a68139c 17911 -advertiseserver_v2.r2py e3cb6a8e7fc3067fadbaf810a8b988f5b553c2dc 15204 -xmlrpc_server.r2py ed2fa2913294a634402bdeca7938772cf5f086fc 8220 -loggingrepy.py 84210f2a40443ba71390de25029a5e61c58d4ec5 3350 -resource_constants.py 5a434c415ca35b727f84c1502319f032228dafc6 1768 -repypp.py 77e7234447437e29845d89daa1082072dffd5c90 5804 -LICENSE 82936ad5453bcfa3137113ddfd64d867cf909dba 1082 -stop_all_seattle_processes.py c13e28dc664cf7cdf0801b77cd8f4660caf637e7 1259 -freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -affix_exceptions.r2py 1194298a76c5e28d559cb63903b62a3b88e0d5a9 1067 -parallelize.r2py c246c14feb28a6dbc4f3a2a06eff72f9a3a4acb4 9841 -coordinationaffix.r2py 8e062a97697b698caf8f11cdcb9f868ff4cbbf67 8781 -fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -domainnameinfo.r2py e0a44c370b3cc6da0128f1d751e39d9c51177a43 7175 -virtual_namespace.py dde62a37a2540fc084a7943f50ec9b95ea449e30 3691 -advertise.r2py 8341b8345792cb581f6105694b2766768d3f6df9 13868 -uniqueid.r2py 25940ca7516c76c29c257cc686e1e68904549f65 1205 -update_crontab_entry.py 539d081d8fa23fcee9b6993151d141273256a344 6194 -softwareupdater.py 6cacb509748046fcd14e819c6695c4282a5a46d4 30868 -create_installer_state.py f5a30f315828256e61195f3b11c68876e4a68758 12665 -safe.py 692da8a16154dcf8b4e787a19f7ab4eee91d944b 24761 -math.r2py bbcd7e751f3de9cbad73b9044fa8be12bf980113 1793 -nmclient.r2py 32727f6d429feedfcd40d9cb1f511740f864a9f4 16672 -random.r2py e5e5e0145387476055f7089d95e6cb14f231c60e 16344 -readline_windows.py 6c9d40fe3cb00f587df15a663fe397bb9a81c306 2633 -affix_wrapper_lib.r2py 2bfd630ab09871d5960aee5fb967c741653ac29c 5650 -canilisten.r2py 49009de0f62d5778d9f8fb9224e3decc9387292a 23334 -pyreadline/error.py 7febcedee8658d68c9327103f5cc4c24736982a0 496 -pyreadline/logger.py 807fcc985f97914e3d5c5b4579a08b52c14546c5 2117 -pyreadline/release.py ae25e82c3e5af4e437e769c65cc2aa71923eb8e4 3287 -pyreadline/get_doc.py 1cafee75ff5417054f6830231582e1f1c10679f2 496 -pyreadline/unicode_helper.py e262a5c36fba06c722cc18620c963542c215e3dd 1393 -pyreadline/__init__.py e5461fff668b699d00c310a6042e2b1ee63f186f 555 -pyreadline/logserver.py 2e93a42b549b1f51811e40def16fe47f106d6f18 1516 -pyreadline/rlmain.py 2c6ceef2c2fba101741086fa14acce4caa280700 21677 -pyreadline/keysyms/keysyms.py ee2ab767e218d3bb570a79d152deea0f94327630 5291 -pyreadline/keysyms/ironpython_keysyms.py e07130f0991285f5603cf72a3ac62e6695f10944 7160 -pyreadline/keysyms/__init__.py 02a47de47d7408a8b9780e2331aee0edbe3a9047 430 -pyreadline/keysyms/common.py dcb143e3ffe59951b0db6344173f0bbd7e1cbf46 5191 -pyreadline/keysyms/winconstants.py 1818342c0f5d5652d69c489a9362cb61b0bccbe8 2544 -pyreadline/console/event.py 692562631b9d9d4aace6f41582ddb0fd99104610 1176 -pyreadline/console/console.py be95eaf3d7192781ad150c3ec7e8098e38b3d6cf 31974 -pyreadline/console/consolebase.py ee68cb0834be85fb4e279fa4ed9d5d817c29a7d6 1591 -pyreadline/console/__init__.py 9eb34082d09b008122f5802a449f9a97777888b5 461 -pyreadline/console/ironpython_console.py 18bde9cc357f00bd5cd0ea40efdaaa34ecfd39e6 14183 -pyreadline/console/ansi.py 64e284d7175d66c0c1e34ad9f632fbd45a74ee02 7539 -pyreadline/console/console_attributes.py db447f2d1df7a807690365ab79318ec2bc5328d3 542 -pyreadline/clipboard/ironpython_clipboard.py c8534915ca103cff7291b3c7068fd42315759b66 846 -pyreadline/clipboard/win32_clipboard.py b981ab9810d6a4152c19fffe3d46fe808d6de923 3746 -pyreadline/clipboard/__init__.py 3bc97551179b5bdcf97bbe4181f6567d86a3688e 2135 -pyreadline/clipboard/no_clipboard.py 12cf184e045e996dd4228ec4b64b64f61f367160 549 -pyreadline/lineeditor/history.py 6df2ad030d0a5b78a39fa64641f851231a70d7ea 10833 -pyreadline/lineeditor/wordmatcher.py 68b9ed1276da25492d99340454291b5279c5eca5 3432 -pyreadline/lineeditor/__init__.py da39a3ee5e6b4b0d3255bfef95601890afd80709 0 -pyreadline/lineeditor/lineobj.py f3f367f6631babe018732860fe7ddb1ca1c07203 25719 -pyreadline/modes/vi.py 6b57c8096252b274d126952d7b369bfd46208529 39664 -pyreadline/modes/basemode.py cd79df69caff98bb7b5f11358563fbb5a800f67d 21945 -pyreadline/modes/emacs.py b35bffb5293191d1fcd8417c406a471eb3d88085 31847 -pyreadline/modes/__init__.py 80e3c50b7c223ad236d6f4e5d188c8fbf988560c 180 -pyreadline/modes/notemacs.py 69c04727bcddf1a90cb0716597d63cbeed074870 25690 -modules/geoip/__init__.py 589daf32994ca46328591a155d495770867775a8 5136 -modules/clearinghouse/command_callbacks.py 3cd7bf81139e8001d56e71a4fb2edc1d7be094b1 10394 -modules/clearinghouse/clearinghouse.py 3cdad578ee04c5e70e688bfa458f318af14c1e64 3269 -modules/clearinghouse/__init__.py 69c66945e410623262de002a0f8f136551bef35a 163 -modules/modules/command_callbacks.py 3e81f98835cb8e39c65ff830ce4ec3fc0937eca7 5289 -modules/modules/__init__.py 39177f89555a5ce093212a2026d9759c354f5810 4514 -modules/factoids/factoid.txt c5b8beecb1cc46a2efa6dd7c081c7d5f044edc7d 426 -modules/factoids/__init__.py d2d17fbc2acce201c9c88a5937fa1fd5ecf4ab72 5184 -modules/variables/__init__.py 8801fd73e3e747d4e108b8d7d7e2329c781f3150 7706 -repyV2/repyportability.py 84bd967a0e7beb7ce8d03c5ab9287bc57c9145fb 10807 -repyV2/servicelogger.py aef4fb928ae93089716fa1798d7758d60f052b6a 10211 -repyV2/nonportable.py 6b3e74d3c7f8d2c63048075f3ec68567e0938516 28080 -repyV2/darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -repyV2/windows_api.py dc6b656b97450bd00f11cb3c423b00e4cef4522d 44407 -repyV2/harshexit.py b14c080ea38ecba8caa385ed04717a2d41936f36 4435 -repyV2/nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -repyV2/exception_hierarchy.py 11fdcc0927285d8b5d7f7d11f264c30189f668ad 6492 -repyV2/resourcemanipulation.py f82688d2ea6c2a4249020bdb8beb004a747a2e62 12395 -repyV2/freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -repyV2/repy_constants.py 6fa03ecfc6a807901d7d9c4a3d23fe5923cc04ff 1924 -repyV2/tracebackrepy.py 473addaf902a13bb3f8cfcc4f4a9a62536a16a9e 9185 -repyV2/emulmisc.py 3dcef49d02982863683cd7b44e2e3f5c6fa83a3f 5662 -repyV2/persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -repyV2/repy.py 4eef470737e3abd09458fe4884f446da886dc3f8 13735 -repyV2/safe_check.py 9f022501e65a38575e2bfcbf0bf0e7ddf0e6571f 757 -repyV2/emulfile.py 08f6c608d6210eb8b3e6107ef51650b5145249df 14829 -repyV2/loggingrepy_core.py 22717381d3b5b2fdce3c3681a55c59ef30309f34 7208 -repyV2/textops.py 20ea2ab6eef2a4c9b2fa7fe6b979e794b00d7d1d 4881 -repyV2/linux_api.py a53c1b6a1a48c0aa00c70faa44848e3a5ca94084 8617 -repyV2/repyhelper.py e415995bcc1abd8b76c4c32589cc495d76d691bc 17998 -repyV2/nmstatusinterface.py ab11e19d5e32f596cc6a4db5482ab6fe59ff1a3f 5993 -repyV2/win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -repyV2/namespace.py a2e0c11a3959c46ca0556e9777954b5394022ee7 39156 -repyV2/nanny.py 9573b2ef4423e2c977f7c32ce679f355130cf1fd 14727 -repyV2/emultimer.py a5535fba66a5fb297fefb86ed716673bc1c0a44e 3081 -repyV2/statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -repyV2/portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -repyV2/emulcomm.py 4d4821cb725ca3f348da226aad1945da03847a64 65128 -repyV2/checkpythonversion.py 60c33ee7aa2e175ca7a659f3c4250a79dc20e4d1 989 -repyV2/idhelper.py 9ad51b4f7b1f57e5d6523a94d25f370c18422e7f 2239 -repyV2/dylink.r2py e63a9a4226f214482f7fe0910aace7993a68139c 17911 -repyV2/loggingrepy.py 84210f2a40443ba71390de25029a5e61c58d4ec5 3350 -repyV2/resource_constants.py 5a434c415ca35b727f84c1502319f032228dafc6 1768 -repyV2/freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -repyV2/fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -repyV2/virtual_namespace.py dde62a37a2540fc084a7943f50ec9b95ea449e30 3691 -repyV2/safe.py 692da8a16154dcf8b4e787a19f7ab4eee91d944b 24761 -repyV1/nonportable.py 2d312092df633fb6a26e968cc95f789e9227bc12 30783 -repyV1/darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -repyV1/windows_api.py 4f52afb3c0fbc75aa58fabccc5c270aa74594fec 53904 -repyV1/restrictions.py 379d2fc8edadebf72f3c8158bd4ebe964dbaa410 11274 -repyV1/misc.py 5756c0d5349034902c7e22d9b66491d25f215d82 1244 -repyV1/harshexit.py 0980d311f2889c1c6c9bc9c9aaf326e96925be7a 4593 -repyV1/nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -repyV1/safety_exceptions.py c32d0e2df8a62bc12ae63f0f1db97ac2df94f3f2 661 -repyV1/freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -repyV1/repy_constants.py e634a41b5af07963eb7a9331010f3339e94049f1 1878 -repyV1/tracebackrepy.py 5dfdfc3dc00196fa4af21129fcc8c8eb26843c74 8473 -repyV1/emulmisc.py 8d705f3ee5f8ae93daf2f84366d26d1296bc4a29 4842 -repyV1/persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -repyV1/repy.py 3141cb113631bc58378d24765a64e346b309734c 14992 -repyV1/safe_check.py 1bacea731d95459397c6c879787fdfc0bcf10af6 1914 -repyV1/emulfile.py 12fb1f3a0331c894097e514b479931f8c5c91192 14689 -repyV1/loggingrepy_core.py 9945eb07c2afe82afdf54861b1a84a311a972de9 7327 -repyV1/linux_api.py 4463549ecca31389fc8342513e9e8066e6fcae34 8403 -repyV1/nmstatusinterface.py d2556a7683313c6bc896bcde264e19dc716dd61f 6019 -repyV1/win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -repyV1/nanny_resource_limits.py 72b968439f90aaddbd19922f1a34c63deced4695 5415 -repyV1/namespace.py 2e40739fa7fe1bd046bc65741646662b7695f620 50123 -repyV1/nanny.py f988f895fb7d7d09d7f7e09e9e536e58eb5db4e4 10785 -repyV1/emultimer.py 03b6039667353cff506c11cda224eabb3d8e8f5e 5098 -repyV1/statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -repyV1/portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -repyV1/emulcomm.py 96ffb8080563ddd8db9cb991567a39a59dbff518 59174 -repyV1/checkpythonversion.py 9bb432d9dd55c3527ad0ee91039185a1d64159a7 902 -repyV1/idhelper.py 0baaf2acef16061294ffb6c3720db931f9322655 2239 -repyV1/loggingrepy.py e3fe786a37f9a52fd567cd332985c08b51d6e197 3628 -repyV1/freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -repyV1/fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -repyV1/virtual_namespace.py da3000fb47ca8c7fe8d15261eeaa4ec83539f8e6 3662 -repyV1/safe.py 52bf543a4c10eb091a37f64470c7067ebaa9f2fa 25534 - -!65537 557866932990265564813346837945216009650100269457008520807230636336625167921577949894286526920002881734238855860419174932656951974362343370020026416834321269774635925661641011339598577331737636761274226028271688930424687919871923801742501794448068494072609103437737982749692756006161176726089192600874582215124692507832609481032007749826359648626787971922401128297007762129348613349762437143430914234287109254850729729466170035485550189216748509928504973803750736136819602562716267799810916404720079951409116148926895842991828581263819634148035983372354022006054256256120951653628457503761092337713093460241877514448114549969960441491861760925657047572994694940450502582564954611767300886766717790058572862787391819492925191241274407608342577137059329893465874737063037657793566485636266599424376908993610043117296210055549809254537974537268788874232304233385220617379230709378779826506518307375164456309198740112260552071438089911509238095878652936893835510922813827902181777606889182196672095080774920735878534028121730622921046009637098146361228514191005791641307498215357501668661762250906788141603045313161295059343903517121205607026681706591254437404232864291751019270693955955655945893161832200510482287924213320459659588234931!3626016533.18!3628608533.18!None!None! 145811786321894192170789311542231659254679435026245340312070310185338197903383499422844742243172308763511766166125804749853927657502275872491581278264830471640577661687947091844595759952969564472525462951105540316128797449677257623135636513232891526298176637302784620355490120419574214122134461493784700529089245674394436335182927753752769462457350058490967283675391613023023818205447447603812860309318701093354599377827923179624331532223875578555635751715219453526732234061553254092004259952025928141820506671009601106684121001078578819069084032812915041010932091756638388245375032089583282262848930676181457084467565265549459539610208458846607060510665844018612230519789561311775040599044116694871371200521214447879709829249600903296535134612856659469407686728007323898720518187287078935277687626394603729569086241098697207130035228193287861306918444425322234363662743488633371972558092947777216833623020344884899252567790094175335732517159223296533030351136854617061345704459726618989974635397645192612606479516775319160167976085082001042690862606917551758711161597825424117297242093177341200092788029047820939299344863492901751995239018182039879351278133187397230195845674841567153547320379770189345430598380809386207210645578546 From 18ecf68b5feb77f83c7f98b7ce9df717d599eba7 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:15 -0500 Subject: [PATCH 08/17] Delete metainfo_new --- metainfo_new | 279 --------------------------------------------------- 1 file changed, 279 deletions(-) delete mode 100644 metainfo_new diff --git a/metainfo_new b/metainfo_new deleted file mode 100644 index 77e2caf..0000000 --- a/metainfo_new +++ /dev/null @@ -1,279 +0,0 @@ -Linux_resources.py 95a3e7e17a1ff2fce3d9fafb5f4d03b951cb2ab1 17825 -nmstatusmonitor.py 618ebc041617c2185e0806eb6cad88e665b32119 5176 -repyportability.py 84bd967a0e7beb7ce8d03c5ab9287bc57c9145fb 10807 -urllib.r2py 2914c93eeb8b47b45752e9bc205287e0e7138209 4401 -encasementlib.r2py 94cd1526233823e38da6918d95a7d58683f72e6d 6836 -sshkey.r2py 6cf36ded75df694949141986808c291203bc5b55 7214 -servicelogger.py aef4fb928ae93089716fa1798d7758d60f052b6a 10211 -nmrequesthandler.py de50258f43cac130e2cf07e1cd4c5e9203e68fdc 11111 -centralizedadvertise_v2.r2py 089d5d9fe21803dedf396497f138d9506968c538 5783 -nonportable.py 6b3e74d3c7f8d2c63048075f3ec68567e0938516 28080 -librepyrunloop.r2py b97b9700c948354ed765c9ea3b082a8b82966ebb 9949 -loggingaffix.r2py be4172d5f51c1c48e3f4f72ae4d45a00b11483cf 5656 -nmrestrictionsprocessor.py 84aa8fcf1037d820a11d5f5db52c25b121de2be1 5840 -semaphore.r2py 6c4b8c816657cb6339c41687003ea969f43f2e5c 5191 -seattleclearinghouse_xmlrpc.py d1dc98bbd62c8c0f01c0db904f5fdcca99d3d795 19920 -writemetainfo.py 0d0dea66e73b618c82ccba45d038eb00f2d1c42f 4543 -darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -affix_repy_network_api_wrapper.r2py dbf65a834861afce3ddf3bb4abd8aa9e3dce5f6b 2927 -httpserver.r2py 3e517f4758579d830d874e367f35f920f5069bf1 28584 -serialize.r2py 98bddde889d407058e4ced9e3de2d2398f02a531 6573 -windows_api.py dc6b656b97450bd00f11cb3c423b00e4cef4522d 44407 -affix_stack.r2py 8cbc96595b620143335e9ec2930e124aa4b0ba8e 12383 -sha.r2py bba9db372fa51dba3c89e4056316d8e43ae43bc8 9982 -runonce.py 24bba4d84fe5c6ca371dea45e189bbb96585bc5a 8178 -daemon.py 7cd120327a44f5a17125072e4063249c48678828 1307 -wrapper.r2py 774103f5bcd516b6a2182d00a2c9b0a682d3696e 18806 -seash_dictionary.py 0f44ea7dcb3e625aae1eb5b60173377661d126c3 78870 -argparse.r2py 9ee82545246d712f9909b6db0db4192c1b272159 5823 -convert_print_to_log.sh 9e402d4e7fa235498535e3d1d51ed65cf9d6b1e9 1103 -softwareupdater.py~ b5f8a105b2a6cbd4fc8a12f89af661200929afe1 30868 -nmmain.py 111d5e72b613dc56a5e19c3cc955ede0977db90e 25663 -dylink_r2py.py 1d928548279d551aadd64178856ba16ce85459b4 18516 -harshexit.py b14c080ea38ecba8caa385ed04717a2d41936f36 4435 -restrictions.default d9ac5b281e1d9ff01ccbe62e8887c72858dcb6d4 7177 -xmlrpc_client.r2py 96f88ec041d7a4f3d92015ae882dc738a10d6b11 3157 -nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -xmlrpc_common.r2py 12fd671718b3a0f426314b65cf6a5a37ad085dd6 14660 -secureclient.r2py 6e83e59297de02bc5b5fa3734d09ce24922b5962 2893 -exception_hierarchy.py 11fdcc0927285d8b5d7f7d11f264c30189f668ad 6492 -nmAPI.py abe232a9ff4acc0cafc854a733b38bb610e8aef5 29629 -cachedadvertise.r2py 97a2cb70e8b0111e61fc8b23999506ba11f5e71a 3361 -registerhttpcallback.r2py 616c12e27140603287b9f3372d6aca324fbb6402 29537 -makemehearaffix.r2py 962b5346c0d0c5e6b53218ac97733a89a5101205 9593 -Mac_BSD_resources.py bf8e570bb554607af6d9fec1e732a1b9b744f297 6811 -binascii.r2py 5d558d6da9f8320c883bf0868cd3d5c63dba81e9 5133 -resourcemanipulation.py f82688d2ea6c2a4249020bdb8beb004a747a2e62 12395 -seattleinstaller.py a20c2582e6d993a1cb70eb6e2b2e415445c36273 88448 -freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -centralizedadvertise.r2py 48386a562f8ec57790a48d1bf1b9aa9d7954389d 5764 -command_callbacks.py bd938955ffc87a8fe55369c475d90fc70522ca35 95378 -safe_eval.r2py db53a8cd70ecbdb33ea1056218592eab2bc34913 1430 -nmconnectionmanager.py d104a5d4ef1168046b28b541a661a33a7b63a730 7002 -repy_constants.py 6fa03ecfc6a807901d7d9c4a3d23fe5923cc04ff 1924 -seash_global_variables.py d3c1113ac82b94aaab9ab43e0b221f06b807e9a0 1046 -nmthreadingerror.py ea77731639370a200f17d34c8e6a33b27c1918ec 3787 -tcp_time.r2py 0697608917ee98c5f53feea37cc520e76120c844 3953 -time_interface.r2py bf4ac855b3231d2e91e34044f1d69f0d850e91c7 4647 -tcp_relay_common_lib.r2py 390388654ca19618435bc451e0963775bde65d21 641 -tab_completer.py 6416cfec94c590e8b58cbf21205e93afcfdf3f67 9401 -tracebackrepy.py 473addaf902a13bb3f8cfcc4f4a9a62536a16a9e 9185 -emulmisc.py 3dcef49d02982863683cd7b44e2e3f5c6fa83a3f 5662 -measure_random.py ce81da23b560aeeb0bf5fd4e3cb5f8a36dffa509 3660 -tcprelayaffix.r2py 724d1875b27e4cac9b67a56c3977c97e598ff93e 20443 -baseaffix.r2py 2eafaa45a026fc08eefd46388538ee9e3b07f753 7738 -nminit.py 03b8eef5834794fc46174bf141204c6366bd3e70 14125 -secureadvertise_client.r2py 02bade04d5d56aa4be8d55ddfaf936dd9671311b 7012 -librepyrandom.r2py e5975dbd3ed2db6aee250a5fc67a6fdcbfed3ba6 2370 -measuredisk.py 5a6dcebc2e8831268cb2c94a29e246a1027f0fbe 8182 -persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -nmmain.py~ 41aabef816d29d8a742bf8e9fd07888022d70d76 25674 -ntp_time.r2py 525dc64b1ac2187c88ced9fc2fb68670551ccddf 4352 -seash_modules.py d831748fdfd1ff477cd8ecf6e230eac82e0a12a5 18553 -repy.py 4eef470737e3abd09458fe4884f446da886dc3f8 13735 -createnodekeys.py 86040e2e68c7e06115197d7b6732a822b0f058d4 1522 -fastnmclient.py 9dc05c89b6c4267654ae1b5a407121fc5a539999 15785 -pycryptorsa.r2py 0386ef25a1a3e47c599e342e351cb633ca7c4694 36561 -librepythread.r2py 94b02bed815f24115c1febaeb3969376a96f588f 21271 -seash_exceptions.py c0f0aec96fc6c35a6e8b545c632a2d2a4b4dd90b 1058 -sockettimeout.r2py 7e563f5d6d0900de4e2ccb095c4ce19316e792fb 9308 -urlparse.r2py af3bd3ac682ffb5f5efe3676686181ed87468e57 5103 -nmresourcemath.py c9d5427b40ea8a82f3a83d8d0109cc525371ad97 2523 -safe_check.py 9f022501e65a38575e2bfcbf0bf0e7ddf0e6571f 757 -timeout_xmlrpclib.py f797757fea756ce7ec72172feb31a275a191700a 1051 -vessel.restrictions dc3a72229ab8fd8b78ab7572cb3efc4d27950875 1992 -nmadvertise.py fbbf576a1b3605d3f254de2fc10d2cdf277b2228 5933 -strace.py d2fb23abbabdf5aa1eeba1102a0df9f4954f7c09 7567 -emulfile.py 08f6c608d6210eb8b3e6107ef51650b5145249df 14829 -loggingrepy_core.py 22717381d3b5b2fdce3c3681a55c59ef30309f34 7208 -seash_helper.py dd72e6f9369dad29fd1c697a7cba81ca45857e1c 31705 -secureadvertise_server.r2py 6736b69820488a97fe5c962311f501940b21aeca 15079 -librepy.r2py 9a2660e75769a5403b6a13e5a55a747c5328245f 4403 -time.r2py cfccb52432216e3bbd4d55cfba21892d53466cc7 1196 -repyunit.r2py be0f4291a8b861e68ea314bbe45a5b4c2799b760 18807 -librepyfile.r2py d797fa41d7a229476e80e1545cba47b2ca32d2e4 20891 -textops.py 20ea2ab6eef2a4c9b2fa7fe6b979e794b00d7d1d 4881 -signeddata.r2py 910ac40b55b9320a077cb7ad388225305a5acdb2 15812 -httpretrieve.r2py 6084fa2aa1cfd30d87e6077a9ffa679c828a4797 18283 -tcp_relay.r2py 37ca81b0e0346b2273dc0887b0ef54d7464d6d34 25135 -linux_api.py a53c1b6a1a48c0aa00c70faa44848e3a5ca94084 8617 -Win_WinCE_resources.py 2c405bc4934d7a3b0632ba231a412ad08393b544 6936 -repyhelper.py e415995bcc1abd8b76c4c32589cc495d76d691bc 17998 -servicelookup.r2py 2f879b21e17b823db40a505c6ce9f5654c41c7cb 798 -nmstatusinterface.py ab11e19d5e32f596cc6a4db5482ab6fe59ff1a3f 5993 -win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -benchmark_resources.py 2d27cff92f2b95ad99e1f8f0b8cd5437940f885a 23151 -listops.r2py 0e7bac3e5604acf7c16b0a5c61cbb77b3d5d7663 2677 -doradvertise.r2py 97c90c519c00e61905c73ca24872877ca1d66727 4936 -opendhtadvertise.r2py ee7a6cce82d2928a804803c6f45ee3597f40e36c 8688 -pydes.r2py c7e6a0d3fff04fd9c4039df6a1368fb7c3b2441e 29527 -fastsigneddata.py 9c123071d179dc584a3264bb952a76d8714acbcf 16218 -namespace.py a2e0c11a3959c46ca0556e9777954b5394022ee7 39156 -xmlparse.r2py e93ee3e6df83e01f1f569c8c48a222e2cb76d69a 10968 -seash.py ab956d08016c547f6a9f1f0776894d98a509d9ef 11399 -cv.r2py ab0922c50e12f8d05edabc20fb677e7fa9df87df 6726 -nanny.py 9573b2ef4423e2c977f7c32ce679f355130cf1fd 14727 -getvesselresources.r2py 48dc056470a71f302ece1733e202d0b7dc5e7408 5313 -session.r2py deb9cb6f4d0b39b9eecf731d08d59ce7ed2cece5 3244 -librepysocket.r2py 656ada9708b75c7abba060e92d276c002d989816 35091 -geoip_client.r2py a6f5fd11b3f49a1d1ecca4f8148ef98fb84ea03f 3237 -emultimer.py a5535fba66a5fb297fefb86ed716673bc1c0a44e 3081 -md5py.r2py f0aa5d62e8cb393dd36f270a6728277c6f9d3a55 15429 -base64.r2py 9bec5a042571e78297f713166147d4f640297f77 7291 -statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -sshkey_paramiko.r2py 24ee4c516bad9fa72eed291f8f3f2210de00e78b 15431 -rsa.r2py 149c80a12b826e983b147585aa2d75a8cb84c034 22327 -seattleuninstaller.py 1d9ba105680afac079119d0c5cc62eaa76bf42fc 21742 -readline.so.mac 2c053c4fe51d542d51ac1155f078e0309706abe0 664640 -emulcomm.py 4d4821cb725ca3f348da226aad1945da03847a64 65128 -checkpythonversion.py 60c33ee7aa2e175ca7a659f3c4250a79dc20e4d1 989 -idhelper.py 9ad51b4f7b1f57e5d6523a94d25f370c18422e7f 2239 -udpcentralizedadvertise.r2py 1c7e93d78c1692ddcba7db8f613dd83a70d84ac8 10275 -advertisepipe.r2py 433d7db0c66eabe5f910cfa3fb7bc775f6e0e931 8239 -noopaffix.r2py abb0e66d1a295962d183d50334a94e87e2201404 515 -namingandresolveraffix.r2py 305408d21410934edc55205163abaf49c491accf 10830 -deserialize.r2py aa443adcd1af9a28debb425e3e25717e729420c0 10813 -dylink.r2py e63a9a4226f214482f7fe0910aace7993a68139c 17911 -advertiseserver_v2.r2py e3cb6a8e7fc3067fadbaf810a8b988f5b553c2dc 15204 -xmlrpc_server.r2py ed2fa2913294a634402bdeca7938772cf5f086fc 8220 -loggingrepy.py 84210f2a40443ba71390de25029a5e61c58d4ec5 3350 -resource_constants.py 5a434c415ca35b727f84c1502319f032228dafc6 1768 -repypp.py 77e7234447437e29845d89daa1082072dffd5c90 5804 -LICENSE 82936ad5453bcfa3137113ddfd64d867cf909dba 1082 -stop_all_seattle_processes.py c13e28dc664cf7cdf0801b77cd8f4660caf637e7 1259 -freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -affix_exceptions.r2py 1194298a76c5e28d559cb63903b62a3b88e0d5a9 1067 -parallelize.r2py c246c14feb28a6dbc4f3a2a06eff72f9a3a4acb4 9841 -coordinationaffix.r2py 8e062a97697b698caf8f11cdcb9f868ff4cbbf67 8781 -fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -domainnameinfo.r2py e0a44c370b3cc6da0128f1d751e39d9c51177a43 7175 -virtual_namespace.py dde62a37a2540fc084a7943f50ec9b95ea449e30 3691 -advertise.r2py 8341b8345792cb581f6105694b2766768d3f6df9 13868 -uniqueid.r2py 25940ca7516c76c29c257cc686e1e68904549f65 1205 -update_crontab_entry.py 539d081d8fa23fcee9b6993151d141273256a344 6194 -softwareupdater.py 646bc3c2e03ecb5dc55e2282711f81f3c2c0cc86 30868 -create_installer_state.py f5a30f315828256e61195f3b11c68876e4a68758 12665 -safe.py 692da8a16154dcf8b4e787a19f7ab4eee91d944b 24761 -math.r2py bbcd7e751f3de9cbad73b9044fa8be12bf980113 1793 -nmclient.r2py 32727f6d429feedfcd40d9cb1f511740f864a9f4 16672 -random.r2py e5e5e0145387476055f7089d95e6cb14f231c60e 16344 -readline_windows.py 6c9d40fe3cb00f587df15a663fe397bb9a81c306 2633 -affix_wrapper_lib.r2py 2bfd630ab09871d5960aee5fb967c741653ac29c 5650 -canilisten.r2py 49009de0f62d5778d9f8fb9224e3decc9387292a 23334 -pyreadline/error.py 7febcedee8658d68c9327103f5cc4c24736982a0 496 -pyreadline/logger.py 807fcc985f97914e3d5c5b4579a08b52c14546c5 2117 -pyreadline/release.py ae25e82c3e5af4e437e769c65cc2aa71923eb8e4 3287 -pyreadline/get_doc.py 1cafee75ff5417054f6830231582e1f1c10679f2 496 -pyreadline/unicode_helper.py e262a5c36fba06c722cc18620c963542c215e3dd 1393 -pyreadline/__init__.py e5461fff668b699d00c310a6042e2b1ee63f186f 555 -pyreadline/logserver.py 2e93a42b549b1f51811e40def16fe47f106d6f18 1516 -pyreadline/rlmain.py 2c6ceef2c2fba101741086fa14acce4caa280700 21677 -pyreadline/keysyms/keysyms.py ee2ab767e218d3bb570a79d152deea0f94327630 5291 -pyreadline/keysyms/ironpython_keysyms.py e07130f0991285f5603cf72a3ac62e6695f10944 7160 -pyreadline/keysyms/__init__.py 02a47de47d7408a8b9780e2331aee0edbe3a9047 430 -pyreadline/keysyms/common.py dcb143e3ffe59951b0db6344173f0bbd7e1cbf46 5191 -pyreadline/keysyms/winconstants.py 1818342c0f5d5652d69c489a9362cb61b0bccbe8 2544 -pyreadline/console/event.py 692562631b9d9d4aace6f41582ddb0fd99104610 1176 -pyreadline/console/console.py be95eaf3d7192781ad150c3ec7e8098e38b3d6cf 31974 -pyreadline/console/consolebase.py ee68cb0834be85fb4e279fa4ed9d5d817c29a7d6 1591 -pyreadline/console/__init__.py 9eb34082d09b008122f5802a449f9a97777888b5 461 -pyreadline/console/ironpython_console.py 18bde9cc357f00bd5cd0ea40efdaaa34ecfd39e6 14183 -pyreadline/console/ansi.py 64e284d7175d66c0c1e34ad9f632fbd45a74ee02 7539 -pyreadline/console/console_attributes.py db447f2d1df7a807690365ab79318ec2bc5328d3 542 -pyreadline/clipboard/ironpython_clipboard.py c8534915ca103cff7291b3c7068fd42315759b66 846 -pyreadline/clipboard/win32_clipboard.py b981ab9810d6a4152c19fffe3d46fe808d6de923 3746 -pyreadline/clipboard/__init__.py 3bc97551179b5bdcf97bbe4181f6567d86a3688e 2135 -pyreadline/clipboard/no_clipboard.py 12cf184e045e996dd4228ec4b64b64f61f367160 549 -pyreadline/lineeditor/history.py 6df2ad030d0a5b78a39fa64641f851231a70d7ea 10833 -pyreadline/lineeditor/wordmatcher.py 68b9ed1276da25492d99340454291b5279c5eca5 3432 -pyreadline/lineeditor/__init__.py da39a3ee5e6b4b0d3255bfef95601890afd80709 0 -pyreadline/lineeditor/lineobj.py f3f367f6631babe018732860fe7ddb1ca1c07203 25719 -pyreadline/modes/vi.py 6b57c8096252b274d126952d7b369bfd46208529 39664 -pyreadline/modes/basemode.py cd79df69caff98bb7b5f11358563fbb5a800f67d 21945 -pyreadline/modes/emacs.py b35bffb5293191d1fcd8417c406a471eb3d88085 31847 -pyreadline/modes/__init__.py 80e3c50b7c223ad236d6f4e5d188c8fbf988560c 180 -pyreadline/modes/notemacs.py 69c04727bcddf1a90cb0716597d63cbeed074870 25690 -modules/geoip/__init__.py 589daf32994ca46328591a155d495770867775a8 5136 -modules/clearinghouse/command_callbacks.py 3cd7bf81139e8001d56e71a4fb2edc1d7be094b1 10394 -modules/clearinghouse/clearinghouse.py 3cdad578ee04c5e70e688bfa458f318af14c1e64 3269 -modules/clearinghouse/__init__.py 69c66945e410623262de002a0f8f136551bef35a 163 -modules/modules/command_callbacks.py 3e81f98835cb8e39c65ff830ce4ec3fc0937eca7 5289 -modules/modules/__init__.py 39177f89555a5ce093212a2026d9759c354f5810 4514 -modules/factoids/factoid.txt c5b8beecb1cc46a2efa6dd7c081c7d5f044edc7d 426 -modules/factoids/__init__.py d2d17fbc2acce201c9c88a5937fa1fd5ecf4ab72 5184 -modules/variables/__init__.py 8801fd73e3e747d4e108b8d7d7e2329c781f3150 7706 -repyV2/repyportability.py 84bd967a0e7beb7ce8d03c5ab9287bc57c9145fb 10807 -repyV2/servicelogger.py aef4fb928ae93089716fa1798d7758d60f052b6a 10211 -repyV2/nonportable.py 6b3e74d3c7f8d2c63048075f3ec68567e0938516 28080 -repyV2/darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -repyV2/windows_api.py dc6b656b97450bd00f11cb3c423b00e4cef4522d 44407 -repyV2/harshexit.py b14c080ea38ecba8caa385ed04717a2d41936f36 4435 -repyV2/nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -repyV2/exception_hierarchy.py 11fdcc0927285d8b5d7f7d11f264c30189f668ad 6492 -repyV2/resourcemanipulation.py f82688d2ea6c2a4249020bdb8beb004a747a2e62 12395 -repyV2/freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -repyV2/repy_constants.py 6fa03ecfc6a807901d7d9c4a3d23fe5923cc04ff 1924 -repyV2/tracebackrepy.py 473addaf902a13bb3f8cfcc4f4a9a62536a16a9e 9185 -repyV2/emulmisc.py 3dcef49d02982863683cd7b44e2e3f5c6fa83a3f 5662 -repyV2/persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -repyV2/repy.py 4eef470737e3abd09458fe4884f446da886dc3f8 13735 -repyV2/safe_check.py 9f022501e65a38575e2bfcbf0bf0e7ddf0e6571f 757 -repyV2/emulfile.py 08f6c608d6210eb8b3e6107ef51650b5145249df 14829 -repyV2/loggingrepy_core.py 22717381d3b5b2fdce3c3681a55c59ef30309f34 7208 -repyV2/textops.py 20ea2ab6eef2a4c9b2fa7fe6b979e794b00d7d1d 4881 -repyV2/linux_api.py a53c1b6a1a48c0aa00c70faa44848e3a5ca94084 8617 -repyV2/repyhelper.py e415995bcc1abd8b76c4c32589cc495d76d691bc 17998 -repyV2/nmstatusinterface.py ab11e19d5e32f596cc6a4db5482ab6fe59ff1a3f 5993 -repyV2/win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -repyV2/namespace.py a2e0c11a3959c46ca0556e9777954b5394022ee7 39156 -repyV2/nanny.py 9573b2ef4423e2c977f7c32ce679f355130cf1fd 14727 -repyV2/emultimer.py a5535fba66a5fb297fefb86ed716673bc1c0a44e 3081 -repyV2/statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -repyV2/portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -repyV2/emulcomm.py 4d4821cb725ca3f348da226aad1945da03847a64 65128 -repyV2/checkpythonversion.py 60c33ee7aa2e175ca7a659f3c4250a79dc20e4d1 989 -repyV2/idhelper.py 9ad51b4f7b1f57e5d6523a94d25f370c18422e7f 2239 -repyV2/dylink.r2py e63a9a4226f214482f7fe0910aace7993a68139c 17911 -repyV2/loggingrepy.py 84210f2a40443ba71390de25029a5e61c58d4ec5 3350 -repyV2/resource_constants.py 5a434c415ca35b727f84c1502319f032228dafc6 1768 -repyV2/freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -repyV2/fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -repyV2/virtual_namespace.py dde62a37a2540fc084a7943f50ec9b95ea449e30 3691 -repyV2/safe.py 692da8a16154dcf8b4e787a19f7ab4eee91d944b 24761 -repyV1/nonportable.py 2d312092df633fb6a26e968cc95f789e9227bc12 30783 -repyV1/darwin_api.py b909fc0a65880f8bdceadac27fbfeb4f177a1e6c 11190 -repyV1/windows_api.py 4f52afb3c0fbc75aa58fabccc5c270aa74594fec 53904 -repyV1/restrictions.py 379d2fc8edadebf72f3c8158bd4ebe964dbaa410 11274 -repyV1/misc.py 5756c0d5349034902c7e22d9b66491d25f215d82 1244 -repyV1/harshexit.py 0980d311f2889c1c6c9bc9c9aaf326e96925be7a 4593 -repyV1/nix_common_api.py 559425c89e97dbd55fd280aeec8f43e9cbbf3ee6 5428 -repyV1/safety_exceptions.py c32d0e2df8a62bc12ae63f0f1db97ac2df94f3f2 661 -repyV1/freebsd_kinfo.py 56fde89ec98aa389417bc7f2e305111da350a006 31261 -repyV1/repy_constants.py e634a41b5af07963eb7a9331010f3339e94049f1 1878 -repyV1/tracebackrepy.py 5dfdfc3dc00196fa4af21129fcc8c8eb26843c74 8473 -repyV1/emulmisc.py 8d705f3ee5f8ae93daf2f84366d26d1296bc4a29 4842 -repyV1/persist.py 5842ad06d741d33aa18669e742594ce330dbd284 6641 -repyV1/repy.py 3141cb113631bc58378d24765a64e346b309734c 14992 -repyV1/safe_check.py 1bacea731d95459397c6c879787fdfc0bcf10af6 1914 -repyV1/emulfile.py 12fb1f3a0331c894097e514b479931f8c5c91192 14689 -repyV1/loggingrepy_core.py 9945eb07c2afe82afdf54861b1a84a311a972de9 7327 -repyV1/linux_api.py 4463549ecca31389fc8342513e9e8066e6fcae34 8403 -repyV1/nmstatusinterface.py d2556a7683313c6bc896bcde264e19dc716dd61f 6019 -repyV1/win_cpu_nanny.py d2d0144a1022a9b57f28684a9de73ca98adda57f 1635 -repyV1/nanny_resource_limits.py 72b968439f90aaddbd19922f1a34c63deced4695 5415 -repyV1/namespace.py 2e40739fa7fe1bd046bc65741646662b7695f620 50123 -repyV1/nanny.py f988f895fb7d7d09d7f7e09e9e536e58eb5db4e4 10785 -repyV1/emultimer.py 03b6039667353cff506c11cda224eabb3d8e8f5e 5098 -repyV1/statusstorage.py cc1ae962da79e474987443188165508884cbccd9 3014 -repyV1/portable_popen.py a0c0c10afcafc6d5c5242588a7d26ac2eade8859 951 -repyV1/emulcomm.py 96ffb8080563ddd8db9cb991567a39a59dbff518 59174 -repyV1/checkpythonversion.py 9bb432d9dd55c3527ad0ee91039185a1d64159a7 902 -repyV1/idhelper.py 0baaf2acef16061294ffb6c3720db931f9322655 2239 -repyV1/loggingrepy.py e3fe786a37f9a52fd567cd332985c08b51d6e197 3628 -repyV1/freebsd_api.py 28c2b339008dd738120e9c33193eb4db36187c83 9131 -repyV1/fakelinecache.py f200d1f04dfd847afe4b866a7ee45d8d2f193600 172 -repyV1/virtual_namespace.py da3000fb47ca8c7fe8d15261eeaa4ec83539f8e6 3662 -repyV1/safe.py 52bf543a4c10eb091a37f64470c7067ebaa9f2fa 25534 - -!65537 637025421205050873596952365261771589773951322631598475106072787020271993053175764188117250704423518194132255633483673262786885335768938773384277083799610885609007389659628664009437678679547920353536220242626289496564286851908334878817556058264391117801101600261049148163810713672381489146862405173179694757012124700418784698863900022574723314468742948087760377648874820849722447060951547948966273618238367948950338286051116019772088223843609162095459426393334680529722250866695065650620212198838326952459691203415045593749611334748080296287149032472439198726118625478070245277378757441525359087444638854258738354342671602496922366016004736236214347025516130747701580901920727684108661222403912764291731327147737837683772263557563688440473804536076248109992516391217595312055691798922657899501386669019455042617664245392970449575870338454728331871102094285124319960621330393021572186170740044818946739408359631036940686520915351289107751181604209476464627301953229727051590037149102889990938284590213971613156943445571639683977746161053986604365951650449726948385512772016178523653987213735745284871644785947680521097303992011381545470392037724783259014902814829265564292625674624573189198192669832632146320948698286979867829240183059!3626372162.08!3628964162.08!None!None! 445219046961825869688433255448279615450086762356307207009026286998589719618876542189437982237242521206048008162350307113738583749270087796213360492196504670373639159052902921850351892088804829938180605913016141535510001525069072271347637436917362632561115055697829230278611021565612704627585640358825906751610752331688134514534768392542573374290388860340174589193240777783137520379673442684047379160063123600806453965222330257530032447325970734722735172240755163570331313283574545898103494211687991223081986901800221998836051097785678606368850360200911491699082379445845694513711642727918865114527087339991657137233888747012644714518290531036380443887286352144438007522247053797905539094148125133425024279968621363095728793956487702906133042214850773566330524796197752301285397919533500166198007497433055161591353065910968569853471458133775642901989677835015066560059873718132070545170686638747765886332428157386719572362712829554315906310761217214102092244011357135404583541065914535372791280921146057416137178192963422330287097244072379146413328799671473541978793506236843079244345869752617398469048511271445140352028728924119177485160222500043456896014194841162814576505342341419733069819805735175616980521093173083283310837004243 From 8747ec6c581bf908c8cd6b80856d1e94f7a67537 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:24 -0500 Subject: [PATCH 09/17] Delete namespace.py --- namespace.py | 1221 -------------------------------------------------- 1 file changed, 1221 deletions(-) delete mode 100644 namespace.py diff --git a/namespace.py b/namespace.py deleted file mode 100644 index 7b3c49f..0000000 --- a/namespace.py +++ /dev/null @@ -1,1221 +0,0 @@ -""" - - namespace.py - - - September 2009 - - - Justin Samuel - - - This is the namespace layer that ensures separation of the namespaces of - untrusted code and our code. It provides a single public function to be - used to setup the context in which untrusted code is exec'd (that is, the - context that is seen as the __builtins__ by the untrusted code). - - The general idea is that any function or object that is available between - trusted and untrusted code gets wrapped in a function or object that does - validation when the function or object is used. In general, if user code - is not calling any functions improperly, neither the user code nor our - trusted code should ever notice that the objects and functions they are - dealing with have been wrapped by this namespace layer. - - All of our own api functions are wrapped in NamespaceAPIFunctionWrapper - objects whose wrapped_function() method is mapped in to the untrusted - code's context. When called, the wrapped_function() method performs - argument, return value, and exception validation as well as additional - wrapping and unwrapping, as needed, that is specific to the function - that was ultimately being called. If the return value or raised exceptions - are not considered acceptable, a NamespaceViolationError is raised. If the - arguments are not acceptable, a TypeError is raised. - - Note that callback functions that are passed from untrusted user code - to trusted code are also wrapped (these are arguments to wrapped API - functions, so we get to wrap them before calling the underlying function). - The reason we wrap these is so that we can intercept calls to the callback - functions and wrap arguments passed to them, making sure that handles - passed as arguments to the callbacks get wrapped before user code sees them. - - The function and object wrappers have been defined based on the API as - documented at https://seattle.cs.washington.edu/wiki/RepyLibrary - - Example of using this module (this is really the only way to use the module): - - import namespace - usercontext = {} - namespace.wrap_and_insert_api_functions(usercontext) - safe.safe_exec(usercode, usercontext) - - The above code will result in the dict usercontext being populated with keys - that are the names of the functions available to the untrusted code (such as - 'open') and the values are the wrapped versions of the actual functions to be - called (such as 'emulfile.emulated_open'). - - Note that some functions wrapped by this module lose some python argument - flexibility. Wrapped functions can generally only have keyword args in - situations where the arguments are optional. Using keyword arguments for - required args may not be supported, depending on the implementation of the - specific argument check/wrapping/unwrapping helper functions for that - particular wrapped function. If this becomes a problem, it can be dealt with - by complicating some of the argument checking/wrapping/unwrapping code in - this module to make the checking functions more flexible in how they take - their arguments. - - Implementation details: - - The majority of the code in this module is made up of helper functions to do - argument checking, etc. for specific wrapped functions. - - The most important parts to look at in this module for maintenance and - auditing are the following: - - USERCONTEXT_WRAPPER_INFO - - The USERCONTEXT_WRAPPER_INFO is a dictionary that defines the API - functions that are wrapped and inserted into the user context when - wrap_and_insert_api_functions() is called. - - FILE_OBJECT_WRAPPER_INFO - LOCK_OBJECT_WRAPPER_INFO - TCP_SOCKET_OBJECT_WRAPPER_INFO - TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO - UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO - VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO - - The above four dictionaries define the methods available on the wrapped - objects that are returned by wrapped functions. Additionally, timerhandle - and commhandle objects are wrapped but instances of these do not have any - public methods and so no *_WRAPPER_INFO dictionaries are defined for them. - - NamespaceObjectWrapper - NamespaceAPIFunctionWrapper - - The above two classes are the only two types of objects that will be - allowed in untrusted code. In fact, instances of NamespaceAPIFunctionWrapper - are never actually allowed in untrusted code. Rather, each function that - is wrapped has a single NamespaceAPIFunctionWrapper instance created - when wrap_and_insert_api_functions() is called and what is actually made - available to the untrusted code is the wrapped_function() method of each - of the corresponding NamespaceAPIFunctionWrapper instances. - - NamespaceInternalError - - If this error is raised anywhere (along with any other unexpected exceptions), - it should result in termination of the running program (see the except blocks - in NamespaceAPIFunctionWrapper.wrapped_function). -""" - -import types - -# To check if objects are thread.LockType objects. -import thread - -import emulcomm -import emulfile -import emulmisc -import emultimer -import nonportable -import safe # Used to get SafeDict -import tracebackrepy -import virtual_namespace - -from exception_hierarchy import * - -# Save a copy of a few functions not available at runtime. -_saved_getattr = getattr -_saved_callable = callable -_saved_hash = hash -_saved_id = id - - -############################################################################## -# Public functions of this module to be called from the outside. -############################################################################## - -def wrap_and_insert_api_functions(usercontext): - """ - This is the main public function in this module at the current time. It will - wrap each function in the usercontext dict in a wrapper with custom - restrictions for that specific function. These custom restrictions are - defined in the dictionary USERCONTEXT_WRAPPER_INFO. - """ - - _init_namespace() - - for function_name in USERCONTEXT_WRAPPER_INFO: - function_info = USERCONTEXT_WRAPPER_INFO[function_name] - wrapperobj = NamespaceAPIFunctionWrapper(function_info) - usercontext[function_name] = wrapperobj.wrapped_function - - - - - -############################################################################## -# Helper functions for the above public function. -############################################################################## - -# Whether _init_namespace() has already been called. -initialized = False - -def _init_namespace(): - """ - Performs one-time initialization of the namespace module. - """ - global initialized - if not initialized: - initialized = True - _prepare_wrapped_functions_for_object_wrappers() - - - - - -# These dictionaries will ultimately contain keys whose names are allowed -# methods that can be called on the objects and values which are the wrapped -# versions of the functions which are exposed to users. If a dictionary -# is empty, it means no methods can be called on a wrapped object of that type. -file_object_wrapped_functions_dict = {} -lock_object_wrapped_functions_dict = {} -tcp_socket_object_wrapped_functions_dict = {} -tcp_server_socket_object_wrapped_functions_dict = {} -udp_server_socket_object_wrapped_functions_dict = {} -virtual_namespace_object_wrapped_functions_dict = {} - -def _prepare_wrapped_functions_for_object_wrappers(): - """ - Wraps functions that will be used whenever a wrapped object is created. - After this has been called, the dictionaries such as - file_object_wrapped_functions_dict have been populated and therefore can be - used by functions such as wrap_socket_obj(). - """ - objects_tuples = [(FILE_OBJECT_WRAPPER_INFO, file_object_wrapped_functions_dict), - (LOCK_OBJECT_WRAPPER_INFO, lock_object_wrapped_functions_dict), - (TCP_SOCKET_OBJECT_WRAPPER_INFO, tcp_socket_object_wrapped_functions_dict), - (TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, tcp_server_socket_object_wrapped_functions_dict), - (UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, udp_server_socket_object_wrapped_functions_dict), - (VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)] - - for description_dict, wrapped_func_dict in objects_tuples: - for function_name in description_dict: - function_info = description_dict[function_name] - wrapperobj = NamespaceAPIFunctionWrapper(function_info, is_method=True) - wrapped_func_dict[function_name] = wrapperobj.wrapped_function - - - - - -############################################################################## -# Helper functions. -############################################################################## - -def _handle_internalerror(message, exitcode): - """ - Terminate the running program. This is used rather than - tracebackrepy.handle_internalerror directly in order to make testing easier.""" - tracebackrepy.handle_internalerror(message, exitcode) - - - - - -def _is_in(obj, sequence): - """ - A helper function to do identity ("is") checks instead of equality ("==") - when using X in [A, B, C] type constructs. So you would write: - if _is_in(type(foo), [int, long]): - instead of: - if type(foo) in [int, long]: - """ - for item in sequence: - if obj is item: - return True - return False - - - - - -############################################################################## -# Constants that define which functions should be wrapped and how. These are -# used by the functions wrap_and_insert_api_functions() and -# wrap_builtin_functions(). -############################################################################## - -class BaseProcessor(object): - """Base type for ValueProcess and ObjectProcessor.""" - - - - - -class ValueProcessor(BaseProcessor): - """ - This is for simple/builtin types and combinations of them. Basically, - anything that needs to be copied when used as an argument or return - value and doesn't need to be wrapped or unwrapped as it passes through - the namespace layer. - """ - - def check(self): - raise NotImplementedError - - def copy(self, val): - return _copy(val) - - - -class ObjectProcessor(BaseProcessor): - """ - This is for for anything that needs to be wrapped or unwrapped (not copied) - as it passes through the namespace layer. - """ - - def check(self): - raise NotImplementedError - - def wrap(self, val): - raise NotImplementedError - - def unwrap(self, val): - return val._wrapped__object - - - - - -class Str(ValueProcessor): - """Allows str or unicode.""" - - def __init__(self, maxlen=None, minlen=None): - self.maxlen = maxlen - self.minlen = minlen - - - - def check(self, val): - if not _is_in(type(val), [str, unicode]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - if self.maxlen is not None: - if len(val) > self.maxlen: - raise RepyArgumentError("Max string length is %s" % self.maxlen) - - if self.minlen is not None: - if len(val) < self.minlen: - raise RepyArgumentError("Min string length is %s" % self.minlen) - - - - - -class Int(ValueProcessor): - """Allows int or long.""" - - def __init__(self, min=0): - self.min = min - - - - def check(self, val): - if not _is_in(type(val), [int, long]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - if val < self.min: - raise RepyArgumentError("Min value is %s." % self.min) - - -class NoneOrInt(ValueProcessor): - """Allows a NoneType or an int. This doesn't enforce min limit on the - ints.""" - - def check(self, val): - if val is not None and not _is_in(type(val), [int, long]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - - -class StrOrInt(ValueProcessor): - """Allows a string or int. This doesn't enforce max/min/length limits on the - strings and ints.""" - - def check(self, val): - if not _is_in(type(val), [int, long, str, unicode]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class Float(ValueProcessor): - """Allows float, int, or long.""" - - def __init__(self, allow_neg=False): - self.allow_neg = allow_neg - - - - def check(self, val): - if not _is_in(type(val), [int, long, float]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - if not self.allow_neg: - if val < 0: - raise RepyArgumentError("Must be non-negative.") - - - - - -class Bool(ValueProcessor): - """Allows bool.""" - - def check(self, val): - if type(val) is not bool: - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class ListOfStr(ValueProcessor): - """Allows lists of strings. This doesn't enforce max/min/length limits on the - strings and ints.""" - - def check(self, val): - if not type(val) is list: - raise RepyArgumentError("Invalid type %s" % type(val)) - - for item in val: - Str().check(item) - - - - - -class List(ValueProcessor): - """Allows lists. The list may contain anything.""" - - def check(self, val): - if not type(val) is list: - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class Dict(ValueProcessor): - """Allows dictionaries. The dictionaries may contain anything.""" - - def check(self, val): - if not type(val) is dict: - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class DictOfStrOrInt(ValueProcessor): - """ - Allows a tuple that contains dictionaries that only contain string keys - and str or int values. This doesn't enforce max/min/length limits on the - strings and ints. - """ - - def check(self, val): - if not type(val) is dict: - raise RepyArgumentError("Invalid type %s" % type(val)) - - for key, value in val.items(): - Str().check(key) - StrOrInt().check(value) - - - - - -class Func(ValueProcessor): - """Allows a user-defined function object.""" - - def check(self, val): - if not _is_in(type(val), [types.FunctionType, types.LambdaType, types.MethodType]): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class NonCopiedVarArgs(ValueProcessor): - """Allows any number of arguments. This must be the last arg listed. """ - - def check(self, val): - pass - - - - def copy(self, val): - return val - - - - - -class File(ObjectProcessor): - """Allows File objects.""" - - def check(self, val): - if not isinstance(val, emulfile.emulated_file): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("file", val, file_object_wrapped_functions_dict) - - - - - -class Lock(ObjectProcessor): - """Allows Lock objects.""" - - def check(self, val): - if not isinstance(val, emulmisc.emulated_lock): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("lock", val, lock_object_wrapped_functions_dict) - - - - - -class UDPServerSocket(ObjectProcessor): - """Allows UDPServerSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.UDPServerSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, udp_server_socket_object_wrapped_functions_dict) - - - - - -class TCPServerSocket(ObjectProcessor): - """Allows TCPServerSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.TCPServerSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, tcp_server_socket_object_wrapped_functions_dict) - - - - - -class TCPSocket(ObjectProcessor): - """Allows TCPSocket objects.""" - - def check(self, val): - if not isinstance(val, emulcomm.EmulatedSocket): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("socket", val, tcp_socket_object_wrapped_functions_dict) - - - - - -class VirtualNamespace(ObjectProcessor): - """Allows VirtualNamespace objects.""" - - def check(self, val): - if not isinstance(val, virtual_namespace.VirtualNamespace): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - def wrap(self, val): - return NamespaceObjectWrapper("VirtualNamespace", val, - virtual_namespace_object_wrapped_functions_dict) - - - - - -class SafeDict(ObjectProcessor): - """Allows SafeDict objects.""" - - # TODO: provide a copy function that won't actually copy so that - # references are maintained. - def check(self, val): - if not isinstance(val, safe.SafeDict): - raise RepyArgumentError("Invalid type %s" % type(val)) - - - - - -class DictOrSafeDict(ObjectProcessor): - """Allows SafeDict objects or regular dict objects.""" - - # TODO: provide a copy function that won't actually copy so that - # references are maintained. - def check(self, val): - if type(val) is not dict: - SafeDict().check(val) - - - - - -# These are the functions in the user's name space excluding the builtins we -# allow. Each function is a key in the dictionary. Each value is a dictionary -# that defines the functions to be used by the wrapper when a call is -# performed. It is the same dictionary that is passed as a constructor to -# the NamespaceAPIFunctionWrapper class to create the actual wrappers. -# The public function wrap_and_insert_api_functions() uses this dictionary as -# the basis for what is populated in the user context. Anything function -# defined here will be wrapped and made available to untrusted user code. -USERCONTEXT_WRAPPER_INFO = { - 'gethostbyname' : - {'func' : emulcomm.gethostbyname, - 'args' : [Str()], - 'return' : Str()}, - 'getmyip' : - {'func' : emulcomm.getmyip, - 'args' : [], - 'return' : Str()}, - 'sendmessage' : - {'func' : emulcomm.sendmessage, - 'args' : [Str(), Int(), Str(), Str(), Int()], - 'return' : Int()}, - 'listenformessage' : - {'func' : emulcomm.listenformessage, - 'args' : [Str(), Int()], - 'return' : UDPServerSocket()}, - 'openconnection' : - {'func' : emulcomm.openconnection, - 'args' : [Str(), Int(), Str(), Int(), Float()], -# 'raise' : [AddressBindingError, PortRestrictedError, PortInUseError, -# ConnectionRefusedError, TimeoutError, RepyArgumentError], - 'return' : TCPSocket()}, - 'listenforconnection' : - {'func' : emulcomm.listenforconnection, - 'args' : [Str(), Int()], - 'return' : TCPServerSocket()}, - 'openfile' : - {'func' : emulfile.emulated_open, - 'args' : [Str(maxlen=120), Bool()], - 'return' : File()}, - 'listfiles' : - {'func' : emulfile.listfiles, - 'args' : [], - 'return' : ListOfStr()}, - 'removefile' : - {'func' : emulfile.removefile, - 'args' : [Str(maxlen=120)], - 'return' : None}, - 'exitall' : - {'func' : emulmisc.exitall, - 'args' : [], - 'return' : None}, - 'createlock' : - {'func' : emulmisc.createlock, - 'args' : [], - 'return' : Lock()}, - 'getruntime' : - {'func' : emulmisc.getruntime, - 'args' : [], - 'return' : Float()}, - 'randombytes' : - {'func' : emulmisc.randombytes, - 'args' : [], - 'return' : Str(maxlen=1024, minlen=1024)}, - 'createthread' : - {'func' : emultimer.createthread, - 'args' : [Func()], - 'return' : None}, - 'sleep' : - {'func' : emultimer.sleep, - 'args' : [Float()], - 'return' : None}, - 'log' : - {'func' : emulmisc.log, - 'args' : [NonCopiedVarArgs()], - 'return' : None}, - 'getthreadname' : - {'func' : emulmisc.getthreadname, - 'args' : [], - 'return' : Str()}, - 'createvirtualnamespace' : - {'func' : virtual_namespace.createvirtualnamespace, - 'args' : [Str(), Str()], - 'return' : VirtualNamespace()}, - 'getresources' : - {'func' : nonportable.get_resources, - 'args' : [], - 'return' : (Dict(), Dict(), List())}, -} - -FILE_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulfile.emulated_file.close, - 'args' : [], - 'return' : None}, - 'readat' : - {'func' : emulfile.emulated_file.readat, - 'args' : [NoneOrInt(), Int(min=0)], - 'return' : Str()}, - 'writeat' : - {'func' : emulfile.emulated_file.writeat, - 'args' : [Str(), Int(min=0)], - 'return' : None}, -} - -TCP_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.EmulatedSocket.close, - 'args' : [], - 'return' : Bool()}, - 'recv' : - {'func' : emulcomm.EmulatedSocket.recv, - #'args' : [Int(min=1)], - 'args' : [Int(min=0)], - 'return' : Str("")}, - 'send' : - {'func' : emulcomm.EmulatedSocket.send, - 'args' : [Str()], - 'return' : Int(min=0)}, -} - -# TODO: Figure out which real object should be wrapped. It doesn't appear -# to be implemented yet as there is no "getconnection" in the repy_v2 source. -TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.TCPServerSocket.close, - 'args' : [], - 'return' : Bool()}, - 'getconnection' : - {'func' : emulcomm.TCPServerSocket.getconnection, - 'args' : [], - 'return' : (Str(), Int(), TCPSocket())}, -} - -UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = { - 'close' : - {'func' : emulcomm.UDPServerSocket.close, - 'args' : [], - 'return' : Bool()}, - 'getmessage' : - {'func' : emulcomm.UDPServerSocket.getmessage, - 'args' : [], - 'return' : (Str(), Int(), Str())}, -} - -LOCK_OBJECT_WRAPPER_INFO = { - 'acquire' : - # A string for the target_func indicates a function by this name on the - # instance rather is what should be wrapped. - {'func' : 'acquire', - 'args' : [Bool()], - 'return' : Bool()}, - 'release' : - # A string for the target_func indicates a function by this name on the - # instance rather is what should be wrapped. - {'func' : 'release', - 'args' : [], - 'return' : None}, -} - -VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO = { - # Evaluate must take a dict or SafeDict, and can - # only return a SafeDict. We must _not_ copy the - # dict since that will screw up the references in the dict. - 'evaluate' : - {'func' : 'evaluate', - 'args' : [DictOrSafeDict()], - 'return' : SafeDict()}, -} - - -############################################################################## -# The classes we define from which actual wrappers are instantiated. -############################################################################## - -def _copy(obj, objectmap=None): - """ - - Create a deep copy of an object without using the python 'copy' module. - Using copy.deepcopy() doesn't work because builtins like id and hasattr - aren't available when this is called. - - obj - The object to make a deep copy of. - objectmap - A mapping between original objects and the corresponding copy. This is - used to handle circular references. - - TypeError - If an object is encountered that we don't know how to make a copy of. - NamespaceViolationError - If an unexpected error occurs while copying. This isn't the greatest - solution, but in general the idea is we just need to abort the wrapped - function call. - - A new reference is created to every non-simple type of object. That is, - everything except objects of type str, unicode, int, etc. - - The deep copy of obj with circular/recursive references preserved. - """ - try: - # If this is a top-level call to _copy, create a new objectmap for use - # by recursive calls to _copy. - if objectmap is None: - objectmap = {} - # If this is a circular reference, use the copy we already made. - elif _saved_id(obj) in objectmap: - return objectmap[_saved_id(obj)] - - # types.InstanceType is included because the user can provide an instance - # of a class of their own in the list of callback args to settimer. - if _is_in(type(obj), [str, unicode, int, long, float, complex, bool, frozenset, - types.NoneType, types.FunctionType, types.LambdaType, - types.MethodType, types.InstanceType]): - return obj - - elif type(obj) is list: - temp_list = [] - # Need to save this in the objectmap before recursing because lists - # might have circular references. - objectmap[_saved_id(obj)] = temp_list - - for item in obj: - temp_list.append(_copy(item, objectmap)) - - return temp_list - - elif type(obj) is tuple: - temp_list = [] - - for item in obj: - temp_list.append(_copy(item, objectmap)) - - # I'm not 100% confident on my reasoning here, so feel free to point - # out where I'm wrong: There's no way for a tuple to directly contain - # a circular reference to itself. Instead, it has to contain, for - # example, a dict which has the same tuple as a value. In that - # situation, we can avoid infinite recursion and properly maintain - # circular references in our copies by checking the objectmap right - # after we do the copy of each item in the tuple. The existence of the - # dictionary would keep the recursion from being infinite because those - # are properly handled. That just leaves making sure we end up with - # only one copy of the tuple. We do that here by checking to see if we - # just made a copy as a result of copying the items above. If so, we - # return the one that's already been made. - if _saved_id(obj) in objectmap: - return objectmap[_saved_id(obj)] - - retval = tuple(temp_list) - objectmap[_saved_id(obj)] = retval - return retval - - elif type(obj) is set: - temp_list = [] - # We can't just store this list object in the objectmap because it isn't - # a set yet. If it's possible to have a set contain a reference to - # itself, this could result in infinite recursion. However, sets can - # only contain hashable items so I believe this can't happen. - - for item in obj: - temp_list.append(_copy(item, objectmap)) - - retval = set(temp_list) - objectmap[_saved_id(obj)] = retval - return retval - - elif type(obj) is dict: - temp_dict = {} - # Need to save this in the objectmap before recursing because dicts - # might have circular references. - objectmap[_saved_id(obj)] = temp_dict - - for key, value in obj.items(): - temp_key = _copy(key, objectmap) - temp_dict[temp_key] = _copy(value, objectmap) - - return temp_dict - - # We don't copy certain objects. This is because copying an emulated file - # object, for example, will cause the destructor of the original one to - # be invoked, which will close the actual underlying file. As the object - # is wrapped and the client does not have access to it, it's safe to not - # wrap it. - elif isinstance(obj, (NamespaceObjectWrapper, emulfile.emulated_file, - emulcomm.EmulatedSocket, emulcomm.TCPServerSocket, - emulcomm.UDPServerSocket, thread.LockType, - virtual_namespace.VirtualNamespace)): - return obj - - else: - raise TypeError("_copy is not implemented for objects of type " + str(type(obj))) - - except Exception, e: - raise NamespaceInternalError("_copy failed on " + str(obj) + " with message " + str(e)) - - - - - -class NamespaceInternalError(Exception): - """Something went wrong and we should terminate.""" - - - - - -class NamespaceObjectWrapper(object): - """ - Instances of this class are used to wrap handles and objects returned by - api functions to the user code. - - The methods that can be called on these instances are mostly limited to - what is in the allowed_functions_dict passed to the constructor. The - exception is that a simple __repr__() is defined as well as an __iter__() - and next(). However, instances won't really be iterable unless a next() - method is defined in the allowed_functions_dict. - """ - - def __init__(self, wrapped_type_name, wrapped_object, allowed_functions_dict): - """ - - Constructor - - self - wrapped_type_name - The name (a string) of what type of wrapped object. For example, - this could be "timerhandle". - wrapped_object - The actual object to be wrapped. - allowed_functions_dict - A dictionary of the allowed methods that can be called on the object. - The keys should be the names of the methods, the values are the - wrapped functions that will be called. - """ - # Only one underscore at the front so python doesn't do its own mangling - # of the name. We're not trying to keep this private in the private class - # variable sense of python where nothing is really private, instead we just - # want a double-underscore in there as extra protection against untrusted - # code being able to access the values. - self._wrapped__type_name = wrapped_type_name - self._wrapped__object = wrapped_object - self._wrapped__allowed_functions_dict = allowed_functions_dict - - - - def __getattr__(self, name): - """ - When a method is called on an instance, we look for the method in the - allowed_functions_dict that was provided to the constructor. If there - is such a method in there, we return a function that will properly - invoke the method with the correct 'self' as the first argument. - """ - if name in self._wrapped__allowed_functions_dict: - wrapped_func = self._wrapped__allowed_functions_dict[name] - - def __do_func_call(*args, **kwargs): - return wrapped_func(self._wrapped__object, *args, **kwargs) - - return __do_func_call - - else: - # This is the standard way of handling "it doesn't exist as far as we - # are concerned" in __getattr__() methods. - raise AttributeError, name - - - - def __iter__(self): - """ - We provide __iter__() as part of the class rather than through __getattr__ - because python won't look for the attribute in the object to determine if - the object is iterable, instead it will look directly at the class the - object is an instance of. See the docstring for next() for more info. - """ - return self - - - - def next(self): - """ - We provide next() as part of the class rather than through __getattr__ - because python won't look for the attribute in the object to determine if - the object is iterable, instead it will look directly at the class the - object is an instance of. We don't want everything that is wrapped to - be considered iterable, though, so we return a TypeError if this gets - called but there isn't a wrapped next() method. - """ - if "next" in self._wrapped__allowed_functions_dict: - return self._wrapped__allowed_functions_dict["next"](self._wrapped__object) - - raise TypeError("You tried to iterate a non-iterator of type " + str(type(self._wrapped__object))) - - - - def __repr__(self): - return "" - - - - def __hash__(self): - return _saved_hash(self._wrapped__object) - - - - def __eq__(self, other): - """In addition to __hash__, this is necessary for use as dictionary keys.""" - # We could either assume "other" is a wrapped object and try to compare - # its wrapped object against this wrapped object, or we could just compare - # the hashes of each. If we try to unwrap the other object, it means you - # couldn't compare a wrapped object to an unwrapped one. - return _saved_hash(self) == _saved_hash(other) - - - - def __ne__(self, other): - """ - It's good for consistency to define __ne__ if one is defining __eq__, - though this is not needed for using objects as dictionary keys. - """ - return _saved_hash(self) != _saved_hash(other) - - - - -class NamespaceAPIFunctionWrapper(object): - """ - Instances of this class exist solely to provide function wrapping. This is - done by creating an instance of the class and then making available the - instance's wrapped_function() method to any code that should only be allowed - to call the wrapped version of the function. - """ - - def __init__(self, func_dict, is_method=False): - """ - - Constructor. - - self - func_dict - A dictionary whose with the following keys whose values are the - corresponding funcion: - func (required) -- a function or a string of the name - of the method on the underlying object. - args (required) - return (required) - is_method -- if this is an object's method being wrapped - rather than a regular function. - - None - - None - - None - """ - - # Required in func_dict. - self.__func = func_dict["func"] - self.__args = func_dict["args"] - self.__return = func_dict["return"] - self.__is_method = is_method - - # Make sure that the __target_func really is a function or a string - # indicating a function by that name on the underlying object should - # be called. - if not _saved_callable(self.__func) and type(self.__func) is not str: - raise TypeError("The func was neither callable nor a string when " + - "constructing a namespace-wrapped function. The object " + - "used for target_func was: " + repr(self.__func)) - - if type(self.__func) is str: - self.__func_name = self.__func - else: - self.__func_name = self.__func.__name__ - - - - def _process_args(self, args): - args_to_return = [] - - for index in range(len(args)): - # Armon: If there are more arguments than there are type specifications - # and we are using NonCopiedVarArgs, then check against that. - if index >= len(self.__args) and isinstance(self.__args[-1], NonCopiedVarArgs): - arg_type = self.__args[-1] - else: - arg_type = self.__args[index] - - # We only copy simple types, which means we only copy ValueProcessor not - # ObjectProcessor arguments. - if isinstance(arg_type, ValueProcessor): - temparg = arg_type.copy(args[index]) - elif isinstance(arg_type, ObjectProcessor): - temparg = arg_type.unwrap(args[index]) - else: - raise NamespaceInternalError("Unknown argument expectation.") - - arg_type.check(temparg) - - args_to_return.append(temparg) - - return args_to_return - - - - def _process_retval_helper(self, processor, retval): - try: - if isinstance(processor, ValueProcessor): - tempretval = processor.copy(retval) - processor.check(tempretval) - elif isinstance(processor, ObjectProcessor): - processor.check(retval) - tempretval = processor.wrap(retval) - elif processor is None: - if retval is not None: - raise InternalRepyError("Expected None but wasn't.") - tempretval = None - else: - raise InternalRepyError("Unknown retval expectation.") - return tempretval - - except RepyArgumentError, err: - raise InternalRepyError("Invalid retval type: %s" % err) - - - - def _process_retval(self, retval): - - try: - # Allow the return value to be a tuple of processors. - if type(retval) is tuple: - if len(retval) != len(self.__return): - raise InternalRepyError("Returned tuple of wrong size: %s" % str(retval)) - tempretval = [] - for index in range(len(retval)): - tempitem = self._process_retval_helper(self.__return[index], retval[index]) - tempretval.append(tempitem) - tempretval = tuple(tempretval) - else: - tempretval = self._process_retval_helper(self.__return, retval) - - except Exception, e: - raise InternalRepyError( - "Function '" + self.__func_name + "' returned with unallowed return type " + - str(type(retval)) + " : " + str(e)) - - - return tempretval - - - - def wrapped_function(self, *args, **kwargs): - """ - - Act as the function that is wrapped but perform all required sanitization - and checking of data that goes into and comes out of the underlying - function. - - self - *args - **kwargs - The arguments to the underlying function. - - NamespaceViolationError - If some aspect of the arguments or function call is not allowed. - Anything else that the underlying function may raise. - - Anything that the underyling function may do. - - Anything that the underlying function may return. - """ - try: - # We don't allow keyword args. - if kwargs: - raise RepyArgumentError("Keyword arguments not allowed when calling %s." % - self.__func_name) - - if self.__is_method: - # This is a method of an object instance rather than a standalone function. - # The "self" argument will be passed implicitly by python in some cases, so - # we remove it from the args we check. For the others, we'll add it back in - # after the check. - args_to_check = args[1:] - else: - args_to_check = args - - if len(args_to_check) != len(self.__args): - if not self.__args or not isinstance(self.__args[-1:][0], NonCopiedVarArgs): - raise RepyArgumentError("Function '" + self.__func_name + - "' takes " + str(len(self.__args)) + " arguments, not " + - str(len(args_to_check)) + " as you provided.") - - args_copy = self._process_args(args_to_check) - - args_to_use = None - - # If it's a string rather than a function, then this is our convention - # for indicating that we want to wrap the function of this particular - # object. We use this if the function to wrap isn't available without - # having the object around, such as with real lock objects. - if type(self.__func) is str: - func_to_call = _saved_getattr(args[0], self.__func) - args_to_use = args_copy - else: - func_to_call = self.__func - if self.__is_method: - # Sanity check the object we're adding back in as the "self" argument. - if not isinstance(args[0], (NamespaceObjectWrapper, emulfile.emulated_file, - emulcomm.EmulatedSocket, emulcomm.TCPServerSocket, - emulcomm.UDPServerSocket, thread.LockType, - virtual_namespace.VirtualNamespace)): - raise NamespaceInternalError("Wrong type for 'self' argument.") - # If it's a method but the function was not provided as a string, we - # actually do have to add the first argument back in. Yes, this whole - # area of code is ugly. - args_to_use = [args[0]] + args_copy - else: - args_to_use = args_copy - - retval = func_to_call(*args_to_use) - - return self._process_retval(retval) - - except RepyException: - # TODO: this should be changed to RepyError along with all references to - # RepyException in the rest of the repy code. - # We allow any RepyError to continue up to the client code. - raise - - except: - # Any other exception is unexpected and thus is a programming error on - # our side, so we terminate. - _handle_internalerror("Unexpected exception from within Repy API", 843) From c4b2d28c24c17a652769f8dbb68e768c8099d13a Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:32 -0500 Subject: [PATCH 10/17] Delete nminit.py --- nminit.py | 365 ------------------------------------------------------ 1 file changed, 365 deletions(-) delete mode 100644 nminit.py diff --git a/nminit.py b/nminit.py deleted file mode 100644 index 64e21c3..0000000 --- a/nminit.py +++ /dev/null @@ -1,365 +0,0 @@ -""" -Author: Justin Cappos - -Module: Node Manager initializer. It initializes the state needed to run the - node manager on the local node. This would most likely be run by the - installer. - -Start date: September 10rd, 2008 - -This initializes the node manager for Seattle. It sets up the starting -resources, creates a configuration file, etc. - -The design goals of this version are to be secure, simple, and reliable (in -that order). - -""" -from repyportability import * -_context = locals() -add_dy_support(_context) - -# need to generate a public key -dy_import_module_symbols('rsa.r2py') - -# need randomfloat... -import random -randomfloat = random.random - - -import os - -import persist - -import shutil - -import glob - -import optparse -# embedded here. Is this really the right thing to do? -justinpubkey = {'e':22599311712094481841033180665237806588790054310631222126405381271924089573908627143292516781530652411806621379822579071415593657088637116149593337977245852950266439908269276789889378874571884748852746045643368058107460021117918657542413076791486130091963112612854591789518690856746757312472362332259277422867, 'n':12178066700672820207562107598028055819349361776558374610887354870455226150556699526375464863913750313427968362621410763996856543211502978012978982095721782038963923296750730921093699612004441897097001474531375768746287550135361393961995082362503104883364653410631228896653666456463100850609343988203007196015297634940347643303507210312220744678194150286966282701307645064974676316167089003178325518359863344277814551559197474590483044733574329925947570794508677779986459413166439000241765225023677767754555282196241915500996842713511830954353475439209109249856644278745081047029879999022462230957427158692886317487753201883260626152112524674984510719269715422340038620826684431748131325669940064404757120601727362881317222699393408097596981355810257955915922792648825991943804005848347665699744316223963851263851853483335699321871483966176480839293125413057603561724598227617736944260269994111610286827287926594015501020767105358832476708899657514473423153377514660641699383445065369199724043380072146246537039577390659243640710339329506620575034175016766639538091937167987100329247642670588246573895990251211721839517713790413170646177246216366029853604031421932123167115444834908424556992662935981166395451031277981021820123445253} - -# Vessels need to have a public key in order to be accessed -print "Generating user keys..." -keylen = 2 ** 10 -publickeys = [] - -# Our unit tests need access to 4 guest users, from guest0...guest3 -num_guests = 4 - -for i in range(num_guests): - publickey, privatekey = rsa_gen_pubpriv_keys(keylen) - publickeys.append(publickey) - # The unit tests need access to these keys - publickey_file = 'guest' + str(i) + '.publickey' - privatekey_file = 'guest' + str(i) + '.privatekey' - - # We need to make sure the file that we're writing to doesn't yet - # exist. - for filename in (publickey_file, privatekey_file): - # We shouldn't rely on querying if the files exist before - # deleting, as someone can get to the file after we query - # and before we delete. - try: - removefile(filename) - except FileNotFoundError: - pass - - rsa_publickey_to_file(publickey, publickey_file) - rsa_privatekey_to_file(privatekey, privatekey_file) - -( guest0pubkey, - guest1pubkey, - guest2pubkey, - guest3pubkey) = publickeys - - -# This is the public key of the person who will control most of the resources. -#controllerpubkey = {'e': 1515278400394037168869631887206225761783197636247636149274740854708478416229147500580877416652289990968676310353790883501744269103521055894342395180721167L, 'n': 8811850224687278929671477591179591903829730117649785862652866020803862826558480006479605958786097112503418194852731900367494958963787480076175614578652735061071079458992502737148356289391380249696938882025028801032667062564713111819847043202173425187133883586347323838509679062142786013585264788548556099117804213139295498187634341184917970175566549405203725955179602584979965820196023950630399933075080549044334508921319264315718790337460536601263126663173385674250739895046814277313031265034275415434440823182691254039184953842629364697394327806074576199279943114384828602178957150547925812518281418481896604655037L} -controllerpubkey = {} - - -offcutresourcedata ="""# BUG: How do we come up with these values dynamically? -resource cpu .002 -resource memory 1000000 # 1 MiB -resource diskused 100000 # .1 MiB -resource events 2 -resource filewrite 1000 -resource fileread 1000 -resource filesopened 1 -resource insockets 0 -resource outsockets 0 -resource netsend 0 -resource netrecv 0 -resource loopsend 0 # would change with prompt functionality (?) -resource looprecv 0 -resource lograte 100 # the monitor might log something -resource random 0 # Shouldn't generate random numbers on our own -""" - -bigresourcedata = """resource cpu .08 -resource memory 100000000 # 100 MiB -resource diskused 80000000 # 80 MiB -resource events 50 -resource filewrite 100000 -resource fileread 100000 -resource filesopened 10 -resource insockets 10 -resource outsockets 10 -resource netsend 100000 -resource netrecv 100000 -resource loopsend 1000000 -resource looprecv 1000000 -resource lograte 30000 -resource random 100 -resource messport 11111 -resource messport 12222 -resource messport 13333 -resource messport 14444 -resource messport 15555 -resource messport 16666 -resource messport 17777 -resource messport 18888 -resource messport 19999 -resource connport 11111 -resource connport 12222 -resource connport 13333 -resource connport 14444 -resource connport 15555 -resource connport 16666 -resource connport 17777 -resource connport 18888 -resource connport 19999 - -call gethostbyname_ex allow -call sendmess allow -call recvmess allow -call openconn allow -call waitforconn allow -call stopcomm allow # it doesn't make sense to restrict -call socket.close allow # let's not restrict -call socket.send allow # let's not restrict -call socket.recv allow # let's not restrict - -# open and file.__init__ both have built in restrictions... -call open allow # can read / write -call file.__init__ allow # can read / write -call file.close allow # shouldn't restrict -call file.flush allow # they are free to use -call file.next allow # free to use as well... -call file.read allow # allow read -call file.readline allow # shouldn't restrict -call file.readlines allow # shouldn't restrict -call file.seek allow # seek doesn't restrict -call file.write allow # shouldn't restrict (open restricts) -call file.writelines allow # shouldn't restrict (open restricts) -call sleep allow # harmless -call settimer allow # we can't really do anything smart -call canceltimer allow # should be okay -call exitall allow # should be harmless - -call log.write allow -call log.writelines allow -call getmyip allow # They can get the external IP address -call listdir allow # They can list the files they created -call removefile allow # They can remove the files they create -call randomfloat allow # can get random numbers -call getruntime allow # can get the elapsed time -call getlock allow # can get a mutex -""" - -smallresourcedata = """resource cpu 0.02 -resource memory 30000000 # 30 MiB -resource diskused 20000000 # 20 MiB -resource events 15 -resource filewrite 100000.0 -resource fileread 100000.0 -resource filesopened 5 -resource insockets 5 -resource outsockets 5 -resource netsend 10000.0 -resource netrecv 10000.0 -resource loopsend 1000000.0 -resource looprecv 1000000.0 -resource lograte 30000.0 -resource random 100.0 -resource messport %s -resource messport %s -resource messport %s -resource messport %s -resource connport %s -resource connport %s -resource connport %s -resource connport %s - -call gethostbyname_ex allow -call sendmess allow -call recvmess allow -call openconn allow -call waitforconn allow -call stopcomm allow # it doesn't make sense to restrict -call socket.close allow # let's not restrict -call socket.send allow # let's not restrict -call socket.recv allow # let's not restrict - -# open and file.__init__ both have built in restrictions... -call open allow # can read / write -call file.__init__ allow # can read / write -call file.close allow # shouldn't restrict -call file.flush allow # they are free to use -call file.next allow # free to use as well... -call file.read allow # allow read -call file.readline allow # shouldn't restrict -call file.readlines allow # shouldn't restrict -call file.seek allow # seek doesn't restrict -call file.write allow # shouldn't restrict (open restricts) -call file.writelines allow # shouldn't restrict (open restricts) -call sleep allow # harmless -call settimer allow # we can't really do anything smart -call canceltimer allow # should be okay -call exitall allow # should be harmless - -call log.write allow -call log.writelines allow -call getmyip allow # They can get the external IP address -call listdir allow # They can list the files they created -call removefile allow # They can remove the files they create -call randomfloat allow # can get random numbers -call getruntime allow # can get the elapsed time -call getlock allow # can get a mutex -""" - - - - - -def make_vessel(vesselname, pubkey, resourcetemplate, resourceargs): - retdict = {'userkeys':[], 'ownerkey':pubkey, 'oldmetadata':None, 'stopfilename':vesselname+'.stop', 'logfilename':vesselname+'.log', 'statusfilename':vesselname+'.status', 'resourcefilename':'resource.'+vesselname, 'advertise':True, 'ownerinformation':'', 'status':'Fresh'} - - try: - WindowsError - - except NameError: # not on windows... - # make the vessel dirs... - try: - os.mkdir(vesselname) - except OSError,e: - if e[0] == 17: - # directory exists - pass - else: - raise - - else: # on Windows... - - # make the vessel dirs... - try: - os.mkdir(vesselname) - except (OSError,WindowsError),e: - if e[0] == 17 or e[0] == 183: - # directory exists - pass - else: - raise - - - #### write the vessel's resource file... - outfo = open(retdict['resourcefilename'],"w") - # write the args into the resource data template - outfo.write(resourcetemplate % resourceargs) - outfo.close() - - return retdict - - - -# lots of little things need to be initialized... -def initialize_state(): - - # first, let's clean up any existing directory data... - for vesseldirectoryname in glob.glob('v[0-9]*'): - if os.path.isdir(vesseldirectoryname): - print 'Removing:',vesseldirectoryname - shutil.rmtree(vesseldirectoryname) - - # initialize my configuration file. This involves a few variables: - # pollfrequency -- the amount of time to sleep after a check when "busy - # waiting". This trades CPU load for responsiveness. - # ports -- the ports the node manager could listen on. - # publickey -- the public key used to identify the node... - # privatekey -- the corresponding private key for the node... - configuration = {} - - configuration['pollfrequency'] = 1.0 - - # NOTE: I chose these randomly (they will be uniform across all NMs)... - # Was this wise? - configuration['ports'] = [2888, 9625, 10348, 39303, 48126, 52862, 57344, 64310] - - print "Generating key..." - keys = rsa_gen_pubpriv_keys(100) - configuration['publickey'] = keys[0] - configuration['privatekey'] = keys[1] - configuration['service_vessel'] = 'v2' - - print "Writing config file..." - # write the config file... - persist.commit_object(configuration,"nodeman.cfg") - - # write the offcut file... - outfo = open("resources.offcut","w") - outfo.write(offcutresourcedata) - outfo.close() - -# vessel1 = make_vessel('v1',controllerpubkey,bigresourcedata, []) - vessel1 = make_vessel('v1',controllerpubkey,smallresourcedata, ('12345','12346', '12347','12348','12345','12346','12347','12348')) - vessel2 = make_vessel('v2',justinpubkey,smallresourcedata, ('20000','20001', '20002','20003','20000','20001','20002','20003')) - vessel3 = make_vessel('v3',guest0pubkey,smallresourcedata, ('30000','30001', '30002','30003','30000','30001','30002','30003')) - vessel4 = make_vessel('v4',guest0pubkey,smallresourcedata, ('21000','21001', '21002','21003','21000','21001','21002','21003')) - vessel5 = make_vessel('v5',guest1pubkey,smallresourcedata, ('22000','22001', '22002','22003','22000','22001','22002','22003')) - vessel6 = make_vessel('v6',guest1pubkey,smallresourcedata, ('23000','23001', '23002','23003','23000','23001','23002','23003')) - vessel7 = make_vessel('v7',guest2pubkey,smallresourcedata, ('24000','24001', '24002','24003','24000','24001','24002','24003')) - vessel8 = make_vessel('v8',guest2pubkey,smallresourcedata, ('25000','25001', '25002','25003','25000','25001','25002','25003')) - vessel9 = make_vessel('v9',guest3pubkey,smallresourcedata, ('26000','26001', '26002','26003','26000','26001','26002','26003')) - vessel10 = make_vessel('v10',guest3pubkey,smallresourcedata, ('27000','27001', '27002','27003','27000','27001','27002','27003')) - - - vesseldict = {'v1':vessel1, 'v2':vessel2, 'v3':vessel3, 'v4':vessel4, 'v5':vessel5, 'v6':vessel6, 'v7':vessel7, 'v8':vessel8, 'v9':vessel9, 'v10':vessel10} - - print "Writing vessel dictionary..." - # write out the vessel dictionary... - persist.commit_object(vesseldict,"vesseldict") - -def main(): - # Parse the options provided. - helpstring = "python nminit.py [-s] " - parser = optparse.OptionParser(usage=helpstring) - - parser.add_option("-s", "--specifyfiles", action = "store_true", - dest="specify_files", default=False, - help="specifying the files from which public keys should be read") - - (options, args) = parser.parse_args() - if len(args) == 1: - controllerpubkeypath = os.path.realpath(args[0]) - else: - print "Please input user public key"+" "+helpstring - return 0 - - #Set variables according to the provided options. - specify = options.specify_files - - if specify: - controllerpubkey = rsa_file_to_publickey(controllerpubkeypath) - else: - print helpstring - return 0 - initialize_state() - - - - - - - -if __name__ == '__main__': - main() From 1cccf480275f9e6997539ba747c8ba1628a1d513 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:40 -0500 Subject: [PATCH 11/17] Delete rebuild_base_installers.sh --- rebuild_base_installers.sh | 110 ------------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 rebuild_base_installers.sh diff --git a/rebuild_base_installers.sh b/rebuild_base_installers.sh deleted file mode 100644 index 6a29928..0000000 --- a/rebuild_base_installers.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -# Ths will update the base installers used by the Clearinghouse -# from what is currently available in the working copy of the -# required repos. See constants below. -# -# You will need sudo privileges to use this. Don't run this as sudo, -# it will invoke sudo when it needs it. -# -# Usage: ./rebuild_base_installers.sh VERSION_STRING - -VERSION=$1 - -USER=cib - -SOFTWARE_UPDATE_URL=http://blackbox.poly.edu/updatesite/ -PUBLIC_KEY_FILE=/home/cib/custominstallerbuilder/repy_runtime/cib.publickey -PRIVATE_KEY_FILE=/home/cib/custominstallerbuilder/repy_runtime/cib.privatekey - -e=`cat $PUBLIC_KEY_FILE | cut -d' ' -f 1` -n=`cat $PUBLIC_KEY_FILE | cut -d' ' -f 2` -SOFTWARE_UPDATE_KEY="{'e':$e, 'n':$n}" - -REPO_PARENT_DIR=/home/cib/custominstallerbuilder/DEPENDENCIES - -BASE_INSTALLER_DIRECTORY=/var/www/dist - -BASE_INSTALLER_ARCHIVE_DIR=/var/www/dist/old_base_installers - -if [ "$VERSION" == "" ]; then - echo "You must supply a version string." - echo "usage: $0 version" - exit 1 -fi - -if [ "$SOFTWARE_UPDATE_URL" == "" ]; then - echo "SOFTWARE_UPDATE_URL isn't set." - exit 1 -fi - -if [ ! -d "$BASE_INSTALLER_DIRECTORY" ]; then - echo "BASE_INSTALLER_DIRECTORY doesn't exist." - exit 1 -fi - -if [ ! -d "$BASE_INSTALLER_ARCHIVE_DIR" ]; then - echo "BASE_INSTALLER_ARCHIVE_DIR doesn't exist." - exit 1 -fi - -if [ ! -d "$REPO_PARENT_DIR" ]; then - echo "REPO_PARENT_DIR doesn't exist." - exit 1 -fi - -if [ "`grep -F "$VERSION" $REPO_PARENT_DIR/nodemanager/nmmain.py`" == "" ]; then - echo "You need to set the version string in $REPO_PARENT_DIR/nodemanager/nmmain.py" - exit 1 -fi - -UPDATE_URL_FOUND=$(grep -F "softwareurl = \"$SOFTWARE_UPDATE_URL\"" $REPO_PARENT_DIR/softwareupdater/softwareupdater.py) - -if [ "$UPDATE_URL_FOUND" == "" ]; then - echo "Did not find the correct update url in $REPO_PARENT_DIR/softwareupdater/softwareupdater.py" - exit 1 -fi - -UPDATE_KEY_FOUND=$(grep -F "$SOFTWARE_UPDATE_KEY" $REPO_PARENT_DIR/softwareupdater/softwareupdater.py) - -if [ "$UPDATE_KEY_FOUND" == "" ]; then - echo "Did not find the correct update key in $REPO_PARENT_DIR/softwareupdater/softwareupdater.py" - exit 1 -fi - -echo "Archiving old base installers to $BASE_INSTALLER_ARCHIVE_DIR" -echo "Warning: failure after this point may leave user $USER with no base installers!" -sudo mv -f $BASE_INSTALLER_DIRECTORY/seattle_* $BASE_INSTALLER_ARCHIVE_DIR - -echo "Building new base installers at $BASE_INSTALLER_DIRECTORY" -sudo python $REPO_PARENT_DIR/dist/make_base_installers.py \ - a \ - $REPO_PARENT_DIR \ - $PUBLIC_KEY_FILE \ - $PRIVATE_KEY_FILE \ - $BASE_INSTALLER_DIRECTORY \ - $VERSION - -if [ "$?" != "0" ]; then - echo "Building base installers failed." - exit 1 -fi - -echo "Changing base installer symlinks used by user $USER." - -pushd $BASE_INSTALLER_DIRECTORY - -if [ ! -f "seattle_${VERSION}_android.zip" ] || [ ! -f "seattle_${VERSION}_linux.tgz" ] || [ ! -f "seattle_${VERSION}_mac.tgz" ] || [ ! -f "seattle_${VERSION}_win.zip" ]; then - echo "The base installers don't appear to have been created." - exit 1 -fi - -sudo chown $USER seattle_* - -sudo -u $USER ln -s -f seattle_${VERSION}_linux.tgz seattle_linux.tgz -sudo -u $USER ln -s -f seattle_${VERSION}_mac.tgz seattle_mac.tgz -sudo -u $USER ln -s -f seattle_${VERSION}_win.zip seattle_win.zip -sudo -u $USER ln -s -f seattle_${VERSION}_android.zip seattle_android.zip -popd - -echo "New base installers created and installed for user $USER." From 023c16699163611d6a491ce3eb62bfbfbd18d7b3 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:33:50 -0500 Subject: [PATCH 12/17] Delete seattleinstaller.py --- seattleinstaller.py | 2315 ------------------------------------------- 1 file changed, 2315 deletions(-) delete mode 100644 seattleinstaller.py diff --git a/seattleinstaller.py b/seattleinstaller.py deleted file mode 100644 index 10e7bab..0000000 --- a/seattleinstaller.py +++ /dev/null @@ -1,2315 +0,0 @@ -""" - - seattleinstaller.py - - - February 10, 2009 - Amended June 11, 2009 - Amended July 30, 2009 - Amended September 9, 2009 - - - Carter Butaud - Amended by Zachary Boka - - - Installs seattle on any supported system. This means setting up the computer - to run seattle at startup, generating node keys, customizing configuration - files, and starting seattle running (not necessarily in that order, except - that seattle must always be started running last). -""" - -# Let's make sure the version of python is supported. -import checkpythonversion -checkpythonversion.ensure_python_version_is_supported() - -import os -import shutil -import platform -import sys -import getopt -import tempfile -import time -import getpass - - -# Python should do this by default, but doesn't on Windows CE. -sys.path.append(os.getcwd()) -import servicelogger -import nonportable -import createnodekeys -import repy_constants -import persist # Armon: Need to modify the NM config file -import benchmark_resources -# Anthony - traceback is imported so that benchmarking can be logged -# before the vessel state has been created (servicelogger does not work -# without the v2 directory). -import traceback - - -SILENT_MODE = False -KEYBITSIZE = 1024 -DISABLE_STARTUP_SCRIPT = False -OS = nonportable.ostype -SUPPORTED_OSES = ["Windows", "WindowsCE", "Linux", "Darwin"] -# Supported Windows Versions: XP, Vista, 7 -# NOTE: -# To support newer versions of Windows (or when changing the Python version -# included with the Windows installer package), ammend the function -# get_filepath_of_win_startup_folder_with_link_to_seattle() below. - -RESOURCE_PERCENTAGE = 10 -# Armon: DISABLE_INSTALL: Special flag for testing purposes that can be -# accessed from the command-line argument "--onlynetwork". All -# pre-install actions are performed, but the actual install is disabled. -DISABLE_INSTALL = False -# Specify the directory containing all seattle files. -SEATTLE_FILES_DIR = os.path.realpath(".") - -# Import subprocess if not in WindowsCE -subprocess = None -if OS != "WindowsCE": - import subprocess - -# Import windows_api if in Windows or WindowsCE -windows_api = None -if OS == "WindowsCE": - import windows_api - -# Import _winreg if in Windows or WindowsCE -_winreg = None -if OS == "Windows" or OS == "WindowsCE": - import _winreg - - -IS_ANDROID = False - - - -class CronAccessibilityFilesPermissionDeniedError(Exception): - pass - -class CronAccessibilityFilesNotFoundError(Exception): - pass - -class CannotDetermineCronStatusError(Exception): - pass - -class DetectUserError(Exception): - pass - -class UnsupportedOSError(Exception): - pass - -class AlreadyInstalledError(Exception): - pass - - - - -def _output(text): - """ - For internal use. - If the program is not in silent mode, prints the input text. - """ - if not SILENT_MODE: - print text - - - - -def find_substring_in_a_file_line(search_absolute_filepath,substring): - """ - - Determine if the given substring exists in at least one line in the given - file by opening a file object for the file in search_absolute_filepath. - - - search_absolute_filepath: - The absolute file path to the file that will be searched for the given - substring. - - substring: - The substring that will be searched for in file named by - search_absolute_filepath. - - - IOError if the supplied file path does not exist. - - - None. - - - True if the substring is found in at least one line in the file specified - by search_absolute_filepath, - False otherwise. - """ - - file_obj = open(search_absolute_filepath,"r") - for line in file_obj: - if substring in line: - file_obj.close() - return True - - file_obj.close - return False - - - - -def preprocess_file(absolute_filepath, substitute_dict, comment="#"): - """ - - Looks through the given file and makes all substitutions indicated in lines - the do not begin with a comment. - - - absolute_filepath: - The absolute path to the file that should be preprocessed. - substitute_dict: - Map of words to be substituted to their replacements, e.g., - {"word1_in_file": "replacement1", "word2_in_file": "replacement2"} - comment: - A string which indicates commented lines; lines that start with this will - be ignored, but lines that contain this symbol somewhere else in the line - will be preprocessed up to the first instance of the symbol. Defaults to - "#". To preprocess all lines in a file, set as the empty string. - - - IOError on bad file names. - - - None. - - - None. - """ - edited_lines = [] - base_fileobj = open(absolute_filepath, "r") - - for fileline in base_fileobj: - commentedOutString = "" - - if comment == "" or not fileline.startswith(comment): - # Substitute the replacement string into the file line. - - # First, test whether there is an in-line comment. - if comment != "" and comment in fileline: - splitLine = fileline.split(comment,1) - fileline = splitLine[0] - commentedOutString = comment + splitLine[1] - - for substitute in substitute_dict: - fileline = fileline.replace(substitute, substitute_dict[substitute]) - - edited_lines.append(fileline + commentedOutString) - - base_fileobj.close() - - # Now, write those modified lines to the actual starter file location. - final_fileobj = open(absolute_filepath, "w") - final_fileobj.writelines(edited_lines) - final_fileobj.close() - - - - -def get_filepath_of_win_startup_folder_with_link_to_seattle(): - """ - - Gets what the full filepath would be to a link to the seattle starter script - in the Windows startup folder. Also tests whether or not that filepath - exists (i.e., whether or not there is currently a link in the startup folder - to run seattle at boot). - - - None. - - - UnsupportedOSException if the operating system is not Windows or WindowsCE. - IOError may be thrown if an error occurs while accessing a file. - - - None. - - - A tuple is returned with the first value being the filepath to the link in - the startup folder that will run seattle at boot. The second value is a - boolean value: True indicates the link currently exists in the startup - folder, and False if it does not. - """ - if OS == "WindowsCE": - startup_path = "\\Windows\\Startup" + os.sep \ - + get_starter_shortucut_file_name() - return (startup_path, os.path.exists(startup_path)) - - elif OS != "Windows": - raise UnsupportedOSError("The startup folder only exists on Windows.") - - - # The startup_path is the same for Vista and Windows 7. - # - # As discussed per ticket #1059, different Python versions return - # different names for Windows 7 (see also http://bugs.python.org/issue7863). - # Testing on Windows 7 Professional, 64 bits, German localization, - # platform.release() returns - # "Vista" for Python versions 2.5.2 and 2.5.4, - # "post2008Server" for versions 2.6.2 to 2.6.5, and - # "7" for versions 2.6.6 and 2.7.0 to 2.7.3. - # Please adapt this once new Python/Windows versions become available. - - release = platform.release() - if release == "Vista" or release == "post2008Server" or release == "7" or release == "8": - startup_path = os.environ.get("HOMEDRIVE") + os.environ.get("HOMEPATH") \ - + "\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs" \ - + "\\Startup" + os.sep + get_starter_shortcut_file_name() - return (startup_path, os.path.exists(startup_path)) - - elif release == "XP": - startup_path = os.environ.get("HOMEDRIVE") + os.environ.get("HOMEPATH") \ - + "\\Start Menu\\Programs\\Startup" + os.sep \ - + get_starter_shortcut_file_name() - return (startup_path, os.path.exists(startup_path)) - - - else: - raise UnsupportedOSError(""" -Sorry, we couldn't detect your Windows version. -Please contact the Seattle development team at - - seattle-devel@googlegroups.com - -to resolve this issue. Version details: -Python version: """ + str(platform.python_version()) + -"\nPlatform arch: " + str(platform.architecture()) + -"\nPlatform release: " + str(platform.release()) + -"\nPlatform version string: " + str(platform.version())) - - - -def get_starter_file_name(): - """ - - Returns the name of the starter file on the current operating system. - - - None. - - - UnsupportedOSError if the operating system requested is not supported. - - - None. - - - A string containing the name of the starter file. - """ - - if OS == "Windows": - return "start_seattle.bat" - elif OS == "WindowsCE": - return "start_seattle.py" - elif OS == "Linux" or OS == "Darwin": - return "start_seattle.sh" - else: - raise UnsupportedOSError("This operating system is not supported. " \ - + "Currently, only the following operating " \ - + "systems are supported: " + SUPPORTED_OSES) - - - - - -def get_starter_shortcut_file_name(): - """ - - Returns the name of the starter shortcut file on the current operating - system. - - - None. - - - UnsupportedOSError if the operating system requested is not supported. - - - None. - - - A string containing the name of the starter shortcut file. - """ - - if OS == "Windows": - return "start_seattle_shortcut.bat" - else: - raise UnsupportedOSError("Only the Windows installer contains a shortcut " \ - + "for the seattle starter batch file.") - - - - -def get_stopper_file_name(): - """ - - Returns the name of the stopper file on the current operating system. - - - None. - - - UnsupportedOSError if the operating system requested is not supported. - - - None. - - - A string containing the name of the stopper file. Returns an empty string - if the supported operating system does not contain a stopper file. - """ - - if OS == "Windows": - return "stop_seattle.bat" - elif OS == "WindowsCE": - return "" - elif OS == "Linux" or OS == "Darwin": - return "stop_seattle.sh" - else: - raise UnsupportedOSError("This operating system is not supported. " \ - + "Currently, only the following operating " \ - + "systems are supported: " + SUPPORTED_OSES) - - - - -def get_uninstaller_file_name(): - """ - - Returns the name of the uninstaller file on the current operating - system. - - - None. - - - UnsupportedOSError if the operating system requested is not supported. - - - None. - - - The name of the uninstaller file. - """ - if OS == "Windows": - return "uninstall.bat" - elif OS == "WindowsCE": - return "uninstall.py" - elif OS == "Linux" or OS == "Darwin": - return "uninstall.sh" - else: - raise UnsupportedOSError("This operating system is not supported. " \ - + "Currently, only the following operating " \ - + "systems are supported: " + SUPPORTED_OSES) - - - - -def search_value_in_win_registry_key(opened_key,seeking_value_name): - """ - - Searches a given key to see if a given value exists for that key. - - - opened_key: - An already opened key that will be searched for the given value. For a - key to be opened, it must have had either the _winreg.OpenKey(...) or - _winreg.CreateKey(...) function performed on it. - - seeking_value_name: - A string containing the name of the value to search for within the - opened_key. - - - UnsupportedOSError if the operating system is not Windows or WindowsCE. - WindowsError if opened_key has not yet been opened. - - - None. - - - True if seeking_value_name is found within opened_key. - False otherwise. - """ - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This operating system must be Windows or " \ - + "WindowsCE in order to manipulate registry " \ - + "keys.") - - # Test to make sure that opened_key was actually opened by obtaining - # information about that key. - # Raises a WindowsError if opened_key has not been opened. - # subkeycount: the number of subkeys opened_key contains. (not used). - # valuescount: the number of values opened_key has. - # modification_info: long integer stating when the key was last modified. - # (not used) - subkeycount, valuescount, modification_info = _winreg.QueryInfoKey(opened_key) - if valuescount == 0: - return False - - - try: - value_data,value_type = _winreg.QueryValueEx(opened_key,seeking_value_name) - # No exception was raised, so seeking_value_name was found. - return True - except WindowsError: - return False - - - - -def remove_seattle_from_win_startup_folder(): - """ - - Removes the seattle startup script from the Windows startup folder if it - exists. - - - None. - - - UnsupportedOSError if the os is not supported (i.e., a Windows machine). - IOError may be raised if an error occurs during file and filepath - manipulation. - - - Removes the seattle startup script from the Windows startup folder if it - exists. - - - True if the function removed the link to the startup script, meaning it - previously existed. - False otherwise, meaning that a link to the startup script did not - previously exist. - """ - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This must be a Windows operating system to " \ - + "access the startup folder.") - - # Getting the startup path in order to see if a link to seattle has been - # installed there. - full_startup_file_path,file_path_exists = \ - get_filepath_of_win_startup_folder_with_link_to_seattle() - if file_path_exists: - os.remove(full_startup_file_path) - return True - else: - return False - - - - -def add_seattle_to_win_startup_folder(): - """ - - Add the seattle startup script to the Windows startup folder. - - - None. - - - UnsupportedOSError if the os is not supported (i.e., a Windows machine). - IOError may be raised if an error occurs during file and filepath - manipulation. - - - Adds the seattle startup script to the Windows startup folder. - - - None. - """ - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This must be a Windows operating system to " \ - + "access the startup folder.") - - # Getting the startup path in order to copy the startup file there which will - # make seattle start when the user logs in. - full_startup_file_path,file_path_exists = \ - get_filepath_of_win_startup_folder_with_link_to_seattle() - if file_path_exists: - raise AlreadyInstalledError("seattle was already installed in the " \ - + "startup folder.") - else: - shutil.copy(SEATTLE_FILES_DIR + os.sep + get_starter_shortcut_file_name(), - full_startup_file_path) - - - - -def add_to_win_registry_Local_Machine_key(): - """ - - Adds seattle to the Windows registry key Local_Machine which runs programs - at machine startup (regardless of which user logs in). - - - None. - - - UnsupportedOSError if the os is not supported (i.e., a Windows machine). - AlreadyInstalledError if seattle has already been installed on the system. - - - Adds a value named "seattle", which contains the absolute file path to the - seattle starter script, to the startup registry key. - - - True if succeeded in adding seattle to the registry, - False otherwise. - """ - - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This machine must be running Windows in order " \ - + "to access the Windows registry.") - - # The following entire try: block attempts to add seattle to the Windows - # registry to run seattle at machine startup regardless of user login. - try: - # The startup key must first be opened before any operations, including - # searching its values, may be performed on it. - - # ARGUMENTS: - # _winreg.HKEY_LOCAL_MACHINE: specifies the key containing the subkey used - # to run programs at machine startup - # (independent of user login). - # "Software\\Microsoft\\Windows\\CurrentVersion\\Run": specifies the subkey - # that runs programs on - # machine startup. - # 0: a reserved integer that must be zero. - # _winreg.KEY_ALL_ACCESS: an integer that acts as an access map that - # describes desired security access for this key. - # In this case, we want all access to the key so it - # can be modified. (Default: _winreg.KEY_READ) - startup_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, - "Software\\Microsoft\\Windows\\CurrentVersion\\Run", - 0, _winreg.KEY_ALL_ACCESS) - except WindowsError: - return False - - else: - # The key was successfully opened. Now check to see if seattle was - # previously installed in this key. *Note that the key should be closed in - # this else: block when it is no longer needed. - if search_value_in_win_registry_key(startup_key, "seattle"): - # Close the key before raising AlreadyInstalledError. - _winreg.CloseKey(startup_key) - raise AlreadyInstalledError("seattle is already installed in the " \ - + "Windows registry startup key.") - - try: - # seattle has not been detected in the registry from a previous - # installation, so attempting to add the value now. - - # _winreg.SetValueEx(...) creates the value "seattle", if it does not - # already exist, and simultaneously adds the given - # data to the value. - # ARGUMENTS: - # startup_key: the opened subkey that runs programs on startup. - # "seattle": the name of the new value to be created under startup_key - # that will make seattle run at machine startup. - # 0: A reserved value that can be anything, though zero is always passed - # to the API according to python documentation for this function. - # _winreg.REG_SZ: Specifies the integer constant REG_SZ which indicates - # that the type of the data to be stored in the value is a - # null-terminated string. - # SEATTLE_FILES_DIR + os.sep + get_starter_file_name(): The data of the - # new value being created - # containing the full path - # to seattle's startup - # script. - _winreg.SetValueEx(startup_key, "seattle", 0, _winreg.REG_SZ, - SEATTLE_FILES_DIR + os.sep + get_starter_file_name()) - servicelogger.log(" seattle was successfully added to the Windows " \ - + "registry key to run at startup: " \ - + "HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows" \ - + "\\CurrentVersion\\Run") - # Close the key before returning. - _winreg.CloseKey(startup_key) - return True - - - except WindowsError: - # Close the key before falling through the try: block. - _winreg.CloseKey(startup_key) - return False - - - - - - -def add_to_win_registry_Current_User_key(): - """ - - Sets up seattle to run at user login on this Windows machine. - - - None. - - - UnsupportedOSError if the os is not supported (i.e., a Windows machine). - AlreadyInstalledError if seattle has already been installed on the system. - - - Adds a value named "seattle", which contains the absolute file path to the - seattle starter script, to the user login registry key. - - - True if succeeded in adding seattle to the registry, - False otherwise. - """ - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This machine must be running Windows in order " \ - + "to access the Windows registry.") - - # The following entire try: block attempts to add seattle to the Windows - # registry to run seattle at user login. - try: - # The startup key must first be opened before any operations, including - # searching its values, may be performed on it. - - # ARGUMENTS: - # _winreg.HKEY_CURRENT_MACHINE: specifies the key containing the subkey used - # to run programs at user login. - # "Software\\Microsoft\\Windows\\CurrentVersion\\Run": specifies the subkey - # that runs programs on - # user login. - # 0: a reserved integer that must be zero. - # _winreg.KEY_ALL_ACCESS: an integer that acts as an access map that - # describes desired security access for this key. - # In this case, we want all access to the key so it - # can be modified. (Default: _winreg.KEY_READ) - startup_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, - "Software\\Microsoft\\Windows\\CurrentVersion\\Run", - 0, _winreg.KEY_ALL_ACCESS) - except WindowsError: - return False - - else: - # The key was successfully opened. Now check to see if seattle was - # previously installed in this key. *Note that the key should be closed in - # this else: block when it is no longer needed. - if search_value_in_win_registry_key(startup_key, "seattle"): - # Close the key before raising AlreadyInstalledError. - _winreg.CloseKey(startup_key) - raise AlreadyInstalledError("seattle is already installed in the " \ - + "Windows registry startup key.") - - try: - # seattle has not been detected in the registry from a previous - # installation, so attempting to add the value now. - - # _winreg.SetValueEx(...) creates the value "seattle", if it does not - # already exist, and simultaneously adds the given - # data to the value. - # ARGUMENTS: - # startup_key: the opened subkey that runs programs on user login. - # "seattle": the name of the new value to be created under startup_key - # that will make seattle run at user login. - # 0: A reserved value that can be anything, though zero is always passed - # to the API according to python documentation for this function. - # _winreg.REG_SZ: Specifies the integer constant REG_SZ which indicates - # that the type of the data to be stored in the value is a - # null-terminated string. - # SEATTLE_FILES_DIR + os.sep + get_starter_file_name(): The data of the - # new value being created - # containing the full path - # to seattle's startup - # script. - _winreg.SetValueEx(startup_key, "seattle", 0, _winreg.REG_SZ, - SEATTLE_FILES_DIR + os.sep + get_starter_file_name()) - servicelogger.log(" seattle was successfully added to the Windows " \ - + "registry key to run at user login: " \ - + "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows" \ - + "\\CurrentVersion\\Run") - # Close the key before returning. - _winreg.CloseKey(startup_key) - return True - - except WindowsError: - # Close the key before falling through the try: block. - _winreg.CloseKey(startup_key) - return False - - - - - -def setup_win_startup(): - """ - - Sets up seattle to run at startup on this Windows machine. First, this means - adding a value, with absolute file path to the seattle starter script, to - the machine startup and user login registry keys (HKEY_LOCAL_MACHINE and - HKEY_CURRENT_USER) which will run seattle at startup regardless of which - user logs in and when the current user logs in (in the case where a machine - is not shut down between users logging in and out). Second, if that fails, - this method attempts to add a link to the Windows startup folder which will - only run seattle when this user logs in. - - - None. - - - UnsupportedOSError if the os is not supported (i.e., a Windows machine). - AlreadyInstalledError if seattle has already been installed on the system. - IOError may be raised if an error occurs during file and filepath - manipulation in one of the sub-functions called by this method. - - - Adds a value named "seattle", which contains the absolute file path to the - seattle starter script, to the startup registry key, or adds seattle to the - startup folder if adding to the registry key fails. - - If an entry is successfully made to the registry key and a pre-existing link - to seattle exists in the startup folder, the entry in the startup folder is - removed. - - - None. - """ - - # Check to make sure the OS is supported - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This operating system must be Windows or " \ - + "WindowsCE in order to modify a registry " \ - + "or startup folder.") - - try: - added_to_CU_key = add_to_win_registry_Current_User_key() - added_to_LM_key = add_to_win_registry_Local_Machine_key() - except Exception,e: - # Fall through try: block to setup seattle in the startup folder. - _output("seattle could not be installed in the Windows registry for the " \ - + "following reason: " + str(e)) - servicelogger.log(" seattle was NOT setup in the Windows registry " \ - + "for the following reason: " + str(e)) - else: - if added_to_CU_key or added_to_CU_key: - # Succeeded in adding seattle to the registry key, so now remove seattle - # from the startup folder if there is currently a link there from a - # previous installation. - if remove_seattle_from_win_startup_folder(): - _output("seattle was detected in the startup folder.") - _output("Now that seattle has been successfully added to the " \ - + "Windows registry key, the link to run seattle has been " \ - + "deleted from the startup folder.") - servicelogger.log(" A link to the seattle starter file from a " \ - + "previous installation was removed from the " \ - + "startup folder during the current installation.") - # Since seattle was successfully installed in the registry, the job of - # this function is finished. - return - - else: - _output("This user does not have permission to access the user registry.") - - - - - - # Reaching this point means modifying the registry key failed, so add seattle - # to the startup folder. - _output("Attempting to add seattle to the startup folder as an " \ - + "alternative method for running seattle at startup.") - add_seattle_to_win_startup_folder() - servicelogger.log(" A link to the seattle starter script was installed in " \ - + "the Windows startup folder rather than in the " \ - + "registry.") - - - - -def test_cron_is_running(): - """ - - Try to find out if cron is installed and running on this system. This is not - a straight-forward process because many operating systems install cron in - different locations. Further, not all the current known locations of - where cron may be installed will allow for the status (whether or not cron - is actually running) to be checked. As a result, the most general method of - trying to find if cron is running is performed first (grep the list of - current processes looking for cron), then if that fails, the following list - of possible cron file locations where the cron status can be checked are - searched. If all else fails, a CannotDetermineCronStatusError is raised. - - Current list of possible cron locations where the status of cron can be - checked: - - DEBIAN AND UBUNTU: /etc/init.d/cron - DEBIAN AND UBUNTU: /etc/init.d/crond - FREEBSD and others?: /etc/rc.d/cron - unknown: /etc/rc.d/init.d/cron - - - - None. - - - UnsupportedOSError if this is not a Linux or Mac box. - CannotDetermineCronStatusError if cron is installed but it cannot be - determined whether or not it is running. - - - None. - - - True if cron is running on this machine, - False otherwise. - """ - - if not OS == "Linux" and not OS == "Darwin": - raise UnsupportedOSError("This must be a Linux or Macintosh machine to " \ - + "test if cron is running.") - - # First, try the most general way of seeing if cron is running. - - # Due to the pipes in the subprocess.Popen command, it makes more sense to - # send the command as one string rather than breaking up the cammand into - # three separate subprocess.Popen processes and piping the output of one into - # the input of the next. - grep_cron_stdout,grep_cron_stderr = \ - subprocess.Popen("ps -ef | grep cron | grep -v grep",shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - - if "cron" in grep_cron_stdout: - return (True,None) - else: - grep_crond_stdout,grep_crond_stderr = \ - subprocess.Popen("ps -ef | grep crond | grep -v grep",shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - if "crond" in grep_crond_stdout: - return (True,None) - - - - - # Reaching this point means cron was not detected using the most general - # method. Cron may still be running or installed, but may not be easily - # accessible. For example, FreeBSD seems to have some trouble running the - # command "ps -ef | grep cron | grep -v grep". - - # Try to get the status of cron if possible. - - # Depending on the system and distribution, the cron file that allows the cron - # status to be checked could appear in a variety of places. - cron_file_paths_list = ["/etc/init.d/cron","/etc/init.d/crond", - "/etc/rc.d/init.d/cron","/etc/rc.d/cron"] - - cron_status_path = None - for possible_cron_path in cron_file_paths_list: - # Test if possible_cron_path exists and is executable. - if os.access(possible_cron_path,os.X_OK): - cron_status_path = possible_cron_path - break - - if cron_status_path == None: - # Not able to detect cron on this machine. Because the cron_file_paths_list - # may be incomplete, this function cannot return false but must instead - # raise a CannotDetermineCronStatusError. - raise CannotDetermineCronStatusError("Cannot determine if cron is " \ - + "installed, and thus cannot " \ - + "test if cron is running.") - - else: - # Try to get the status of cron from the found cron_status_path. - try: - cron_status_stdout,cron_status_stderr = \ - subprocess.Popen([cron_status_path,"status"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - except Exception: - raise CannotDetermineCronStatusError("User cannot access the cron " \ - + "status.") - else: - # Because there will be various outputs depending on the OS and whether - # or not cron is installed, many conditions are needed to attempt to - # capture the cron status. - if not cron_status_stdout and not cron_status_stderr: - # If there is no output, then the status cannot be determined. - raise CannotDetermineCronStatusError("No output produced from the " \ - + "cron status command.") - elif "not running" in cron_status_stdout: - # If "not running" appears in the stdout output, return False. - return (False,cron_status_path) - elif cron_status_stdout and not cron_status_stderr: - # After those tests, if there is stdout output and no stderr output, - # return True to indicate that cron is running. - return (True,cron_status_path) - else: - # For any other unpredicted conditions, raise a - # CannotDetermineCronStatusError - raise CannotDetermineCronStatusError("The output produced by the " \ - + "cron status command could " \ - + "not be interpreted.") - - - - -def test_cron_accessibility(): - """ - - Find out if the user has access to use cron by examining the allow and deny - files for cron. Depending on the operating system, these files may be - located in various locations. Below is a list of probable locations - depending on the system (this list may not be complete). - - DEBIAN: /etc/cron.allow - SuSE: /var/spool/cron/allow - MAC: /usr/lib/cron/cron.allow - UNIX?: /etc/cron.d/cron.allow - BSD: /var/cron/allow - - Because there are so many options, this function searches for any of these - files on the system. If any of them exist, then we have found the location - of the accessibility files on this machine. If this function cannot find - any of these files, then we assume the accessibility files do not exist on - this machine, meaning that the user may or may not have access to cron - depending on the system on its specifications with cron (this cannot be - determined by us, so a CronAccessibilityFilesNotFoundError is raised). - - - None. - - - UnsupportedOSError if this not a Linux or Mac box. - CronAccessibilityFilesNotFoundError if the allow or deny files cannot be - found on this system. - DectectUserError if cron accessibility files are found but the user name - cannot be determined. - CronAccessibilityFilesPermissionDeniedError when the cron accessibility - files are found but the user does not have permission to read them. - - - None. - - - True if the user has access to use cron, - False otherwise. - """ - - if not OS == "Linux" and not OS == "Darwin": - raise UnsupportedOSError("This must be a Linux or Macintosh machine to " \ - + "test if cron is running.") - - cron_allow_path = None - cron_deny_path = None - cron_accessibility_paths = [("/etc/cron.allow","/etc/cron.deny"), - ("/var/spool/cron/allow","/var/spool/cron/deny"), - ("/usr/lib/cron/cron.allow", - "/usr/lib/cron/cron.allow"), - ("/etc/cron.d/cron.allow", - "/etc/cron.d/cron.deny"), - ("/var/cron/allow","/var/cron/deny")] - - # Try to figure out the location of the cron accessibility files. - for (possible_allow_path,possible_deny_path) in cron_accessibility_paths: - if os.path.exists(possible_allow_path) \ - or os.path.exists(possible_deny_path): - cron_allow_path = possible_allow_path - cron_deny_path = possible_deny_path - break - - - if cron_allow_path == None and cron_deny_path == None: - # The cron accessibility files do not exist. - raise CronAccessibilityFilesNotFoundError("Unable to detect existing " \ - + "cron.allow and " \ - + "cron.deny files.") - else: - try: - # Get the user name. - user_name = getpass.getuser() - except Exception,e: - # The user name cannot be determined, and thus the cron.allow and/or the - # cron.deny files cannot be checked to see if the username appears in - # them. - raise DetectUserError("At least one of the cron accessibility files " \ - + "were found, but they could not be searched " \ - + "because the user name could not be " \ - + "determined.") - - # If cron.allow exists, then the user MUST be listed therein in order to use - # cron. - if os.path.exists(cron_allow_path): - try: - found_in_allow = find_substring_in_a_file_line(cron_allow_path, - user_name) - except Exception,e: - raise CronAccessibilityFilesPermissionDeniedError(cron_allow_path) - else: - return (found_in_allow,None) - - # If cron.deny exists AND cron.allow does not exist, then the user must NOT - # be listed therein in order to use cron. - elif os.path.exists(cron_deny_path): - try: - found_in_deny = find_substring_in_a_file_line(cron_deny_path,user_name) - except Exception,e: - raise CronAccessibilityFilesPermissionDeniedError(cron_deny_path) - else: - return (not found_in_deny,cron_deny_path) - - - - -def find_mount_point_of_seattle_dir(): - """ - - Find the mount point of the directory in which seattle is currently being - installed. - - - None. - - - None. - - - None. - - - The mount point for the directory in which seattle is currently being - installed. - """ - - potential_mount_point = SEATTLE_FILES_DIR - - # To prevent a potential, yet unlikely, infinite loop from occuring, exit the - # while loop if the current potential mount point is the same as - # os.path.dirname(potential_mount_point). - while not os.path.ismount(potential_mount_point) \ - and potential_mount_point != os.path.dirname(potential_mount_point): - potential_mount_point = os.path.dirname(potential_mount_point) - - return potential_mount_point - - - - -def add_seattle_to_crontab(): - """ - - Adds an entry to the crontab to run seattle automatically at boot. - - HIGH-LEVEL DESCRIPTION OF CRONTAB ENTRY FUNCTIONALITY: - Check if the seattle start script exists: if so, start seattle. - Else if the mount point for the seattle directory isn't mounted, sit in - a 60 second sleep loop until the mount point has been mounted, then start - start seattle. - Otherwise, the seattle start script has been removed, so remove the - seattle crontab entry. - - *NOTE: Further functionality to check if the seattle start script has been - deleted once the mount point is detected was NOT added to the - crontab entry because it is already highly unlikely that cron will - be started before the directory is mounted. NFS appears to make - all directories appear mounted to the OS at all times. - - - None. - - - OSError if cron is not installed on this system. - - - Adds an entry to the crontab. - - - True if an entry for seattle was successfully added to the crontab, - False otherwise. - """ - # Check to see if the crontab has already been modified to run seattle. - crontab_contents_stdout,crontab_contents_stderr = \ - subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - if get_starter_file_name() in crontab_contents_stdout: - raise AlreadyInstalledError("An entry for seattle was already detected " \ - + "in the crontab.") - - - # Since seattle is not already installed, modify crontab to run seattle at - # boot. - - # First, get the service vessel where standard error produced from cron will - # be written. - service_vessel = servicelogger.get_servicevessel() - - # Next, find the mount point which will be included in the seattle crontab - # entry in case the user installs on a network filesystem. This way the - # seattle entry will not automatically erroneously remove itself if the user's - # filesystem has not yet been mounted. - mount_point = find_mount_point_of_seattle_dir() - - - # The crontab entry automatically removes itself in the event that the seattle - # directory no longer exists (the user removed it without uninstalling). In - # this case, the crontab entry must use mktemp to create a file with - # secure permissions in which to store the modified crontab contents while the - # seattle entry is being removed from crontab. This prevents a malicious - # program on another user account from changing the modified crontab contents - # before it is read back into crontab. - # The mktemp command is different accross platforms, so we create a temp - # file using the '-t' option so mkfile is consistent for our purposes across - # platforms. On regular linux systems, the "XXXXX" in "tempcrontab.XXXXX" - # will be replaced by randomly chosen characters/numbers. On Mac and BSD, - # the "XXXXX" remain part of the file name, and a randomly chosen string of - # characters/numbers are appended to the file name. In both cases, a - # randomly generated file with secure permissions is created on the stop. - cron_line_entry = '@reboot if [ -e "' + SEATTLE_FILES_DIR + os.sep \ - + get_starter_file_name() + '" ]; then "' + SEATTLE_FILES_DIR + os.sep \ - + get_starter_file_name() + '" >> "' + SEATTLE_FILES_DIR + os.sep \ - + service_vessel + '/cronlog.txt" 2>&1; elif [ "`mount | ' \ - + 'grep -e \'[ ]' + mount_point + '[/]*[ ]\'`" = "" ]; then ' \ - + 'while [ "`mount | grep -e \'[ ]' + mount_point + '[/]*[ ]\'`" = ""]; '\ - + 'do sleep 60s; done && "' + SEATTLE_FILES_DIR + os.sep \ - + get_starter_file_name() + '" >> "' + SEATTLE_FILES_DIR + os.sep \ - + service_vessel + '/cronlog.txt" 2>&1; else ' \ - + 'modifiedCrontab=`mktemp -t tempcrontab.XXXXX` && crontab -l | ' \ - + 'sed \'/start_seattle.sh/d\' > ${modifiedCrontab} && ' \ - + 'crontab ${modifiedCrontab} && rm -rf ${modifiedCrontab}; fi' \ - + os.linesep - - # Generate a temp file with the user's crontab plus our task. - temp_crontab_file = tempfile.NamedTemporaryFile() - temp_crontab_file.write(crontab_contents_stdout) - temp_crontab_file.write(cron_line_entry) - temp_crontab_file.flush() - - # Now, replace the crontab with that temp file and remove(close) the - # tempfile. - replace_crontab = subprocess.Popen(["crontab",temp_crontab_file.name], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - replace_crontab.wait() - temp_crontab_file.close() - - - - - # Finally, confirm that seattle was successfully added to the crontab. - crontab_contents_stdout,crontab_contents_stderr = \ - subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - if get_starter_file_name() in crontab_contents_stdout: - return True - else: - return False - - - - -# Derek Cheng: added function for setting up the startup of Seattle on the -# Nokia tablet. This is to be called from setup_linux_or_mac() since the Nokia -# runs on a Linux-based OS. -def setup_nokia_startup(): - """ - - Sets up seattle to run at startup on a Nokia tablet. It requires the user - to be currently on root access (checked in main()). It creates a short - shell script in /etc/init.d that will in turn run start_seattle.sh, and a - symlink in /etc/rc2.d that will link to the short script in /etc/init.d. - These two files will cause Seattle to run on startup. - - - None. - - - AlreadyInstalledError if seattle has already been installed on the system. - - - None. - - - True if the files are created successfully, - False otherwise. - """ - - - # Note to developers: If you need to change the path of the startup script or - # the path of the symlink, make sure you keep it consistent with those in - # test_seattle_is_installed() and seattleuninstaller.py. - - # The name of the startup script. - startup_script_name = "nokia_seattle_startup.sh" - # The directory where the startup script will reside. - startup_script_dir = "/etc/init.d/" - # The full path to the startup script. - startup_script_path = startup_script_dir + startup_script_name - - # The name of the symlink that links to the startup script. - symlink_name = "S99startseattle" - # The directory where the symlink to the startup script will reside. - symlink_dir = "/etc/rc2.d/" - # The full path to the symlink. - symlink_path = symlink_dir + symlink_name - - # The username of the user. This is assumed to be 'user'. - # However, if you do change your user name on the Nokia, - # you will need to modify the following line to match your user name. - username = "user" - - # If the startup script or the symlink already exists prior to this - # installation, an AlreadyInstalledError is raised. - if os.path.exists(startup_script_path) or \ - os.path.lexists(symlink_dir + symlink_name): - _output("The files that are required for running Seattle on startup " \ - + "already exists. If you would like a clean installation, " \ - + "please run the uninstaller first to remove those files.") - servicelogger.log("The startup files were not added to the /etc/ " \ - + "directories because they already existed prior to " \ - + "the installation.") - raise AlreadyInstalledError() - - # The contents of the startup script in its entirety. - # This line indicates that it is a shell script. - startup_script_content = "#! /bin/sh" + "\n" - # This line runs start_seattle.sh as "user". - startup_script_content += "su - " + username + " -c " \ - + os.path.realpath(get_starter_file_name()) + "\n" - - # Creates the startup script file. - try: - startup_script_handle = file(startup_script_path, 'w') - except: - _output("Cannot create startup script file. Make sure you have the " \ - + "permission to do so.") - servicelogger.log("Seattle was not configured to run on startup because " \ - + startup_script_path + " cannot be created.") - return False - - # Writes the startup script content to the startup script file. - try: - startup_script_handle.write(startup_script_content) - except: - _output("Cannot write to the startup script file. Make sure you have the " \ - + "permission to do so.") - servicelogger.log("Seattle was not configured to run on startup because " \ - + startup_script_path + " cannot be written to.") - return False - finally: - startup_script_handle.close() - - # Derek Cheng: This is for changing the permission bits on the startup scripts - # on the Nokia tablet. (i.e., stat.S_IXUSR gives owner permission to execute) - # JAC: this is only needed on the Nokia, so is imported here since stat isn't - # portable - import stat - # Changes the permission bits of the startup script to executable by owner. - try: - os.chmod(startup_script_path, stat.S_IXUSR) - except: - _output("Cannot change the startup script permission to executable. Make " \ - + "sure you have the permission to do so.") - servicelogger.log("Seattle was not configured to run on startup because " \ - + "permissions of " + startup_script_path + \ - " cannot be changed.") - # This is an attempt to clean up by removing the script file if chmod fails. - try: - os.remove(startup_script_path) - except: - pass - return False - - # Creates the symlink to the startup script at symlink_dir. - try: - os.symlink(startup_script_path, symlink_path) - except: - _output("Cannot create symlink to the startup script. Make sure you have " \ - + "the permission to do so.") - servicelogger.log("Seattle was not configured to run on startup because " \ - + " the symlink " + symlink_path + " cannot be " \ - + "created.") - # Attempt to clean up by removing the startup script. - try: - os.remove(startup_script_path) - except: - pass - return False - - servicelogger.log("Seattle has been configured to run on startup. Two " \ - + "files were created: " + startup_script_path + " and " \ - + symlink_path +".") - return True - - - - -def setup_linux_or_mac_startup(): - """ - - Sets up seattle to run at startup on this Linux or Macintosh machine. This - means adding an entry to crontab after running tests to make sure that cron - is running and that the user has the ability to modify the crontab. If any - of these tests show problems, the appropriate output is given to the user. - Otherwise, if seattle is successfully configured to run automatically at - machine boot, then no output is given to the user. - For Nokia N800/900 Tablets, crontab will not be used. Instead, two files - are created in the special directories (/etc/init.d and /etc/rc2.d by - default) that will cause Seattle to run on startup. - - - None. - - - UnsupportedOSError if the os is not supported. - AlreadyInstalledError if seattle has already been installed on the system. - cron nor crond are found on this system. - - - None. - - - True if the crontab was able to be modified, - False otherwise. - """ - - if OS != "Linux" and OS != "Darwin": - raise UnsupportedOSError - - # Derek Cheng: check to see if Seattle is being installed on a Nokia tablet. - #if platform.machine().startswith('armv'): - # return setup_nokia_startup() - - _output("Attempting to add an entry to the crontab...") - - # The error_output will only be displayed to the user if the ultimate attempt - # to add an entry to the crontab fails. - error_output = "" - - # First, check to see that cron is running. - # This variable is declared here because it is referenced later outside the - # try:block statement. In the unlikely event that an unpredicted exception is - # raised while checking if cron is running, it will be determined later by - # noting that the value of this variable is None rather than a boolean value. - cron_is_running = None - # If the following check raises a general exception, fall through to continue - # attempting to set up the crontab since we want the crontab to be set up - # properly in case this user is able to use cron in the future. - try: - cron_is_running,executable_cron_file = test_cron_is_running() - - except CannotDetermineCronStatusError: - # This exception means cron is installed, though whether or not it is - # running cannot be determined. - error_output = error_output + "It cannot be determined whether or not " \ - + "cron is installed and running. Please confirm with the root user " \ - + "that cron is installed and indeed running. If you believe cron is " \ - + "running on your system and seattle does not get configured to run " \ - + "automatically at startup, please read the following instructions " \ - + "or contact the seattle development team if no further " \ - + "instructions are given.\n" - - except Exception,e: - # If there is an unexpected exception raised when accessing cron, fall - # through the try: block to continue trying to set up the crontab. - pass - else: - if not cron_is_running: - _output("cron is not currently running on your system. Only the root " \ - + "user may start cron by running the following command:") - _output(str(executable_cron_file) + " start") - _output("An attempt to setup crontab to run seattle at startup will " \ - + "still be made, although seattle will not automatically " \ - + "run at startup until cron is started as described above.") - servicelogger.log("cron is not running on this system at install time.") - - - - - # Second, check that the user has permission to use cron. If this check raises - # a general exception, fall through to continue attempting to set up the - # crontab since we want the crontab to be set up properly in case this user - # is able to use cron in the future. - try: - crontab_accessible,cron_deny_permission_filepath = test_cron_accessibility() - - except CronAccessibilityFilesPermissionDeniedError,c: - error_output = error_output + "One or both of the files listing users " \ - + "who have access and who do not have access to use cron have been " \ - + "found, but this user does not have permission to read them. If " \ - + "seattle does not get configured to run automatically at machine " \ - + "boot, it is possible that it is because this user name must be " \ - + "listed in the cron 'allow' file which can be found in the man " \ - + "document for crontab (found by running the command 'man crontab' " \ - + "from the terminal).\n" - - except CronAccessibilityFilesNotFoundError,n: - error_output = error_output + "The cron allow and deny files, which " \ - + "specify which users have permission to use the cron service, " \ - + "cannot be found. If seattle is not able to be configured to " \ - + "run automatically at startup, it may be that your user name " \ - + "needs to be added to the cron allow file. The location of this " \ - + "cron allow file can be found in the man document for crontab " \ - + "(found by running the command 'man crontab' from the terminal).\n" - - except DetectUserError,d: - error_output = error_output + "The cron accessibility files were found, " \ - + "but the current user name could not be determined; therefore, the " \ - + "ability for this user to use the cron service could not be " \ - + "determined. If seattle fails to be configured to run " \ - + "automatically at startup, it is probable that the user name needs " \ - + "be added to the cron allow file. The location of the cron allow " \ - + "file can be found in the man document for crontab (found by " \ - + "running the command 'man crontab' from the terminal).\n" - - except Exception,e: - # If there is an unexpected exception raised when accessing the - # allow/deny files, fall through the try: block to continue trying to set up - # the crontab. - pass - else: - if not crontab_accessible: - _output("You do not have permission to use cron which makes seattle " \ - + "run automatically at startup. To get permission to use " \ - + "the cron service, the root user must remove your user " \ - + "name from the " + str(cron_deny_permission_filepath) \ - + " file.") - servicelogger.log("seattle was not added to the crontab because the " \ - + "user does not have permission to use cron.") - return False - - - - - # Lastly, add seattle to the crontab. - try: - successfully_added_to_crontab = add_seattle_to_crontab() - - except AlreadyInstalledError,a: - raise AlreadyInstalledError() - - except Exception: - if not error_output: - _output("seattle could not be configured to run automatically at " \ - + "startup on your machine for an unknown reason. It is " \ - + "that you do not have permission to access crontab. Please " \ - + "contact the seattle development team for more assistance.") - servicelogger.log("seattle could not be successfully added to the " \ - + "crontab for an unknown reason, although it is " \ - + "likely due to the user not having permission to " \ - + "use crontab since an exception was most likely " \ - + "raised when the 'crontab -l' command was run.") - else: - _output("seattle could not be configured to run automatically at " \ - + "machine boot. Following are more details:") - _output(error_output) - servicelogger.log("seattle could not be successfully added to the " \ - + "crontab. Following was the error output:") - servicelogger.log(error_output) - - return False - - else: - # Zack Boka: modify nodeman.cfg if the crontab was successfully installed so - # nmmain.py knows that the correct seattle crontab entry is - # installed. - if successfully_added_to_crontab: - configuration = persist.restore_object("nodeman.cfg") - configuration['crontab_updated_for_2009_installer'] = True - persist.commit_object(configuration,"nodeman.cfg") - - if cron_is_running == None: - _output("seattle was configured to start automatically at machine " \ - + "startup; however, an error occured when trying to " \ - + "detect if cron, the program that starts seattle at " \ - + "machine startup, is actually running. If cron is not " \ - + "running, then seattle will NOT automatically start up " \ - + "at machine boot. Please check with the root user to " \ - + "confirm that cron is installed and indeed running. Also " \ - + "confirm that you have access to use cron.") - return None - else: - return cron_is_running - - - else: - if cron_is_running and not error_output: - # Since cron is running, that could not have been the problem, so output - # to the user that it is unknown what seattle could not be configured to - # start at boot. - _output("seattle could not be configured to run automatically at " \ - + "startup on your machine for an unknown reason. Please " \ - + "contact the seattle development team for assistance.") - servicelogger.log("seattle could not be successfully added to the " \ - + "crontab for an unknown reason.") - elif not cron_is_running and not error_output: - # Despite cron not running, crontab could also not be modified for an - # unknown reason. We must output a message separate from above to not - # confuse the user since we already reported that cron is not running. - _output("seattle could not be configured to run automatically at " \ - + "startup on your machine for an unknown reason, despite " \ - + "cron not running. Please contact the seattle " \ - + "development team for assistance.") - servicelogger.log("seattle could not be successfully added to the " \ - + "crontab for an unknown reason, other than the " \ - + "face that cron is not running.") - return False - else: - _output("seattle could not be configured to run automatically at " \ - + "machine boot. Following are more details:") - _output(error_output) - servicelogger.log("seattle could not be successfully added to the " \ - + "crontab. Following was the error output:") - servicelogger.log(error_output) - - - # Although the default setting for - # config['crontab_updated_for_2009_installer'] = False, it should still be - # set in the event that there was a previous installer which set this - # value to True, but now for whatever reason, installation in the crontab - # failed. - configuration = persist.restore_object("nodeman.cfg") - config['crontab_updated_for_2009_installer'] = False - persist.commit_object(config,'nodeman.cfg') - - return False - - - - -def customize_win_batch_files(): - """ - - Preprocesses the Windows batch files to replace all instances of %PROG_PATH% - and %STARTER_FILE% with their appropriate specified values. - - %PROG_PATH% is used in the scripts to specify the absolute filepath to - the location to that batch file, primarily so the user does not have to be - in the seattle directory to use the scripts. - - %STARTER_FILE% is used in the scripts to specify the absolute filepath to - the location of the starter script in the startup folder (regardless of - whether or not the file actually exists in the startup folder). This is so - the starter batch file and uninstall batch file can appropriately remove - this file in the event that the starter file may still appear in the - startup folder even if install succeeds in installing seattle in the Windows - registry. (It will be rare that uninstall.bat will need this, but - start_seattle.bat may need this file path in case it must remove itself from - the startup folder in the even that the user deletes the seattle directory - without uninstalling.) - - Currently, only start_seattle_shortcut.bat and uninstall.bat are - preprocessed with this function. - - - None. - - - UnsupportedOSError if OS is not Windows\WindowsCE. - IOError may be called by child-function on being passed a bad file name. - - - Changes all instances of %PROG_PATH% and %STARTER_FILE% in the below- - specified files to the appropriate absolute filepath. - - - None. - """ - if OS != "Windows" and OS != "WindowsCE": - raise UnsupportedOSError("This must be a Windows system in order to " \ - + "modify Windows batch files.") - - _output("Customizing seattle batch files...") - - # Customize the start_seattle_shortcut.bat and uninstall.bat files. - full_startup_file_path,file_path_exists = \ - get_filepath_of_win_startup_folder_with_link_to_seattle() - for batchfile in [get_starter_shortcut_file_name(), - get_uninstaller_file_name()]: - preprocess_file(SEATTLE_FILES_DIR + os.sep + batchfile, - {"%PROG_PATH%": SEATTLE_FILES_DIR, - "%STARTER_FILE%": full_startup_file_path}) - - - - -def setup_sitecustomize(): - """ - - On Windows CE, edits the sitecustomize.py file to reference the right - program path, then copies it to the python directory. - - - None. - - - Raises UnsupportedOSError if the version is not Windows CE. - Raises IOError if the original sitecustomize.py file doesn't exist or - if the python path specified in repy_constants doesn't exist. - - - None. - - - None. - """ - original_fname = SEATTLE_FILES_DIR + os.sep + "sitecustomize.py" - if not OS == "WindowsCE": - raise UnsupportedOSError - elif not os.path.exists(original_fname): - raise IOError("Could not find sitecustomize.py under " + SEATTLE_FILES_DIR) - else: - python_dir = os.path.dirname(repy_constants.PATH_PYTHON_INSTALL) - if not os.path.isdir(python_dir): - raise IOError("Could not find repy_constants.PATH_PYTHON_INSTALL") - elif os.path.exists(python_dir + os.sep + "sitecustomize.py"): - raise IOError("sitecustomize.py already existed in python directory") - else: - preprocess_file(original_fname,{"%PROG_PATH%": SEATTLE_FILES_DIR}) - shutil.copy(original_fname, python_dir + os.sep + "sitecustomize.py") - - - - -def start_seattle(): - """ - - Starts seattle by running the starter file on any system. - - - None. - - - IOError if the starter file can not be found under SEATTLE_FILES_DIR. - - - None. - - - None. - """ - starter_file_path = [SEATTLE_FILES_DIR + os.sep + get_starter_file_name()] - if OS == "WindowsCE": - windows_api.launch_python_script(starter_file_path) - else: - if SILENT_MODE: - p = subprocess.Popen(starter_file_path,stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - else: - p = subprocess.Popen(starter_file_path) - - p.wait() - - - - -# Anthony Honstain's benchmarking function. -def perform_system_benchmarking(): - """ - - To call benchmark_resources.main (which performs the system - benchmarking (to find the resources available to the user installing), - calculate the amount that is to be donated, and then generate the - vessel resource files, vessel directories, and the vesseldict.) and - handle any exceptions that may be raised, logging information and - output useful information to the user installing seattle. - - - None - - - IOError if unable to create a log file. - - - May initialize the service logger 'installInfo'. - - Will create or append a temporary log file 'installer_benchmark.log' - that will be used during the benchmark process, if the benchmarking is - successful it will be removed. - - Creates the vessel resource files, vessel directories, and the - vesseldict. - - The benchmarking will look to several OS specific sources for - information, and perform benchmarking that includes retrieving - random numbers and creating a file to measure read/write rate. - WARNING: These benchmarks may take a noticeable amount of time - or consume more resources than normal. - - - Returns True if the benchmarking and creation of vessel structure - is complete, if those failed then False is returned. - - """ - # Run the benchmarks to benchmark system resources and generate - # resource files and the vesseldict. - _output("System benchmark starting...") - # Anthony - this file will be logged to until the v2 directory has - # been created, this will not happen until after the benchmarks - # have run and the majority of the installer state has been created. - benchmark_logfileobj = file("installer_benchmark.log", 'a+') - - try: - benchmark_resources.main(SEATTLE_FILES_DIR, RESOURCE_PERCENTAGE, - benchmark_logfileobj) - except benchmark_resources.BenchmarkingFailureError: - _output("Installation terminated.") - _output("Please email the Seattle project for additional support, and " \ - + "attach the installer_benchmark.log and vesselinfo files, " \ - + "found in the seattle_repy directory, in order to help us " \ - + "diagnose the issue.") - benchmark_logfileobj.close() - return False - except benchmark_resources.InsufficientResourceError: - exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() - traceback.print_exception(exceptionType, exceptionValue, \ - exceptionTraceback, file=benchmark_logfileobj) - _output("Failed.") - _output("This install cannot succeed because resources are insufficient. " \ - + "This could be because the percentage of donated resources " \ - + "is too small or because a custom install had too many " \ - + "vessels.") - _output("Please email the Seattle project for additional support, and " \ - + "attach the installer_benchmark.log and vesselinfo files, " \ - + "found in the seattle_repy directory, in order to help us " \ - + "diagnose the issue.") - benchmark_logfileobj.close() - return False - except Exception: - exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() - traceback.print_exception(exceptionType, exceptionValue, \ - exceptionTraceback, file=benchmark_logfileobj) - _output("Failed.") - _output("This install cannot succeed either because required " \ - + "installation info is corrupted or resources are insufficient.") - _output("Please email the Seattle project for additional support, and " \ - + "attach the installer_benchmark.log and vesselinfo files, " \ - + "found in the seattle_repy directory, in order to help us " \ - + "diagnose the issue.") - benchmark_logfileobj.close() - return False - - else: - # Transfer the contents of the file used to log the benchmark and creation - # of vessel states. The service logger cannot be used sooner because - # the seattle vessel directory has not yet been created. - benchmark_logfileobj.seek(0) - servicelogger.log(benchmark_logfileobj.read()) - benchmark_logfileobj.close() - os.remove(benchmark_logfileobj.name) - - _output("Benchmark complete and vessels created!") - return True - - - - -# Anthony Honstain's test urandom function. -def test_urandom_implemented(): - """ - - This will test if os.urandom is implemented on the OS - If we did not check here and os.urandom raised a NotImplementedError - then the install would surely fail when it attempted to generate - a RSA key (the key generation requires that os.urandom work). - - It should be noted that even if installation no longer required - key generation, currently all random numbers for vessels - come from this source, so when ever os.urandom is called it - would result in an internal error. - - - None - - - None - - - Make a call to a Operating System specific source of - cryptographically secure pseudo random numbers. - - Outputs instructions to the user installing seattle if their - system fails. - - - True if the test succeeded, - False otherwise. - """ - # Anthony - This will test if os.urandom is implemented on the OS - # If we did not check here and os.urandom raised a NotImplementedError - # the next step (setup_start) would surely fail when it tried - # to generate a key pair. - try: - os.urandom(1) - except NotImplementedError: - _output("Failed.") - _output("No source of OS-specific randomness") - _output("On a UNIX-like system this would be /dev/urandom, and on " \ - + "Windows it is CryptGenRandom.") - _output("Please email the Seattle project for additional support.") - return False - else: - # Test succeeded! - return True - - - - -def prepare_installation(options,arguments): - """ - - Prepare all necessary global variables and files for the actual installation - process. This includes combing through the arguments passed to the installer - to set the appropriate variables and setting the Node Manager configuration - information (in nodeman.cfg file). - - - options: - A list of tuples (flag,value) where flag is the argument name passed to - the installer (e.g., --nm-key-bitsize) and value is the value for that - particular flag (e.g., 1024). Example element that could appear in the - list described by options: ("--nm-key-bitsize","1024") - - arguments: - A list of arguments that did not have an argument name associated with it - (e.g., Specifying the install directory. See [install_dir] in usage()) - - - IOError if the specified install directory does not exist. - - - Changes default local and global variables, and injects relevant information - into the Node Manager configuration file (nodeman.cfg). - - - True if this entire prepare_installation() process finished, - False otherwise (meaning an argument was passed that calls for install to be - halted [e.g., --usage] or a value for one of the named arguments is - unreasonable [e.g., setting the resource percentage to be %0].). - """ - global SILENT_MODE - global RESOURCE_PERCENTAGE - global KEYBITSIZE - global DISABLE_STARTUP_SCRIPT - global DISABLE_INSTALL - - # Armon: Specify the variables that will be used to generate the Restrictions - # Information for the NM and Repy. - repy_restricted = False - repy_nootherips = False - repy_user_preference = [] - nm_restricted = False - nm_user_preference = [] - repy_prepend = [] - repy_prepend_dir = None - - # Iterate through and process the arguments, checking for IP/Iface - # restrictions. - for (flag, value) in options: - if flag == "-s": - SILENT_MODE = True - elif flag == "--onlynetwork": - disable_install = True - elif flag == "--percent": - # Check to see that the desired percentage of system resources is valid - # I do not see a reason someone couldn't donate 20.5 percent so it - # will be allowed for now. - try: - RESOURCE_PERCENTAGE = float(value) - except ValueError: - usage() - return False - if RESOURCE_PERCENTAGE <= 0.0 or RESOURCE_PERCENTAGE > 100.0: - usage() - return False - elif flag == "--nm-ip": - nm_restricted = True - nm_user_preference.append((True, value)) - elif flag == "--nm-iface": - nm_restricted = True - nm_user_preference.append((False, value)) - elif flag == "--repy-ip": - repy_restricted = True - repy_user_preference.append((True, value)) - elif flag == "--repy-iface": - repy_restricted = True - repy_user_preference.append((False,value)) - elif flag == "--repy-nootherips": - repy_restricted = True - repy_nootherips = True - elif flag == "--nm-key-bitsize": - KEYBITSIZE = int(value) - elif flag == "--disable-startup-script": - DISABLE_STARTUP_SCRIPT = True - elif flag == "--usage": - usage() - return False - elif flag == "--repy-prepend": - repy_prepend.extend(value.split()) - elif flag == "--repy-prepend-dir": - repy_prepend_dir = value - - # Print this notification after having processed all the arguments in case one - # of the arguments specifies silent mode. - if DISABLE_STARTUP_SCRIPT: - _output("Seattle will not be configured to run automatically at boot.") - - - # Build the configuration dictionary. - config = {} - config['nm_restricted'] = nm_restricted - config['nm_user_preference'] = nm_user_preference - config['repy_restricted'] = repy_restricted - config['repy_user_preference'] = repy_user_preference - config['repy_nootherips'] = repy_nootherips - - # Armon: Inject the configuration information. - configuration = persist.restore_object("nodeman.cfg") - configuration['networkrestrictions'] = config - configuration['repy_prepend'] = repy_prepend - configuration['repy_prepend_dir'] = repy_prepend_dir - persist.commit_object(configuration,"nodeman.cfg") - - # Tell the parent function that the passed-in arguments allow it to continue - # with the installation. - return True - - - - -def test_seattle_is_installed(): - """ - - Tests to see if Seattle is already installed on this computer. - - - None. - - - UnsupportedOSError if the os is not supported. - - - None. - - - True if Seattle is installed, False otherwise. - """ - - if OS == "Windows" or OS == "WindowsCE": - - # Tests if Seattle is set to run at user login. - # See comments in add_to_win_registry_Current_User_key() for details. - try: - Current_User_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, - "Software\\Microsoft\\Windows\\CurrentVersion\\Run", - 0, _winreg.KEY_ALL_ACCESS) - except WindowsError: - pass - else: - Current_User_key_exists = search_value_in_win_registry_key( - Current_User_key, "seattle") - _winreg.CloseKey(Current_User_key) - if Current_User_key_exists: - return True - - # Tests if Seattle is set to run at machine startup. - # See comments in add_to_win_registry_Local_Machine_key() for details. - try: - Local_Machine_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, - "Software\\Microsoft\\Windows\\CurrentVersion\\Run", - 0, _winreg.KEY_ALL_ACCESS) - except WindowsError: - pass - else: - Local_Machine_key_exists = search_value_in_win_registry_key( - Local_Machine_key, "seattle") - _winreg.CloseKey(Local_Machine_key) - if Local_Machine_key_exists: - return True - - # If neither registry key is present, then test if there is a shortcut - # to Seattle in the startup folder to determine if Seattle is installed. - full_startup_file_path,file_path_exists = \ - get_filepath_of_win_startup_folder_with_link_to_seattle() - return file_path_exists - - elif OS == "Linux" or OS == "Darwin": - - # Check to see if Seattle is being installed on a Nokia tablet. - #if platform.machine().startswith('armv'): - # # The full path to the startup script. - # startup_script_path = "/etc/init.d/nokia_seattle_startup.sh" - # # The full path to the symlink. - # symlink_path = "/etc/rc2.d/S99startseattle" - # - # # If the startup script or the symlink exist, then Seattle was installed. - # return os.path.exists(startup_script_path) or \ - # os.path.lexists(symlink_path) - - #else: - # Check to see if the crontab has been modified to run seattle. - crontab_contents_stdout,crontab_contents_stderr = \ - subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - return get_starter_file_name() in crontab_contents_stdout - - else: - raise UnsupportedOSError() - - - - -def usage(): - """ - Prints command line usage of script. - """ - - if OS == "Windows" or OS == "WindowsCE": - print "install.bat", - elif OS == "Linux" or OS == "Darwin": - print "install.sh", - else: - print "python seattleinstaller.py", - - print "[-s] [--usage] " \ - + "[--disable-startup-script] [--percent float] " \ - + "[--nm-key-bitsize bitsize] [--nm-ip ip] [--nm-iface iface] " \ - + "[--repy-ip ip] [--repy-iface iface] [--repy-nootherips] " \ - + "[--onlynetwork] [--repy-prepend args] [--repy-prepend-dir dir]" - print "Info:" - print "-s\t\t\t\tSilent mode: does not print output." - print "--disable-startup-script\tDoes not install the Seattle startup " \ - + "script, meaning that Seattle will not automatically start running " \ - + "at machine start up. It is recommended that this option only be " \ - + "used in exceptional circumstances." - print "--percent percent\t\tSpecifies the desired percentage of available " \ - + "system resources to donate. Default percentage: " \ - + str(RESOURCE_PERCENTAGE) - print "--nm-key-bitsize bitsize\tSpecifies the desired bitsize of the Node " \ - + "Manager keys. Default bitsize: " + str(KEYBITSIZE) - print "--nm-ip IP\t\t\tSpecifies a preferred IP for the NM. Multiple may " \ - + "be specified, they will be used in the specified order." - print "--nm-iface iface\t\tSpecifies a preferred interface for the NM. " \ - + "Multiple may be specified, they will be used in the specified order." - print "--repy-ip, --repy-iface. See --nm-ip and --nm-iface. These flags " \ - + "only affect repy and are separate from the Node Manager." - print "--repy-nootherips\t\tSpecifies that repy is only allowed to use " \ - + "explicit IP's and interfaces." - print "--onlynetwork\t\t\tDoes not install Seattle, but updates the " \ - + "network restrictions information." - print "--repy-prepend args\t\tSpecifies a list of arguments to be " \ - + "prepended to any repy program run by the user. If multiple argument " \ - + "lists are specified, they will be concatenated." - print "--repy-prepend-dir dir\t\tSpecifies a directory containing files to " \ - + "be copied to newly created vessels." - print "See https://seattle.cs.washington.edu/wiki/SecurityLayers for " \ - + "details on using --repy-prepend and --repy-prepend-dir to " \ - + "construct custom security layers." - - - - -def main(): - if OS not in SUPPORTED_OSES: - raise UnsupportedOSError("This operating system is not supported.") - - - # Begin pre-installation process. - - # Pre-install: parse the passed-in arguments. - try: - # Armon: Changed getopt to accept parameters for Repy and NM IP/Iface - # restrictions, also a special disable flag - opts, args = getopt.getopt(sys.argv[1:], "s", - ["percent=", "nm-key-bitsize=","nm-ip=", - "nm-iface=","repy-ip=","repy-iface=", - "repy-nootherips","onlynetwork", - "disable-startup-script","usage", - "repy-prepend=", "repy-prepend-dir="]) - except getopt.GetoptError, err: - print str(err) - usage() - return - - - # Check if Seattle is already installed. This needs to be done seperately - # from setting Seattle to run at startup because installation might fail - # during the pre-installation process. - if test_seattle_is_installed(): - _output("Seattle was already installed. You must run the uninstall " \ - + "script before reinstalling Seattle.") - return - - - # Initialize the service logger. - servicelogger.init('installInfo') - - # This catches Nokias/Androids/iPhones/iPads - if platform.machine().startswith('armv'): - # AR: The Android installer is a GUI, stdout/stderr are redirected to files. - try: - import android - global IS_ANDROID - IS_ANDROID = True - sys.stdout = open('installerstdout.log', 'w') - sys.stderr = open('installerstderr.log', 'w') - _output('Seattle is being installed on an Android compatible handset.') - - except ImportError: - IS_ANDROID = False - - # Derek Cheng: if the user is running a Nokia N800 tablet, we require them - # to be on root first in order to have files created in the /etc/init.d and - # /etc/rc2.d directories. - #if IS_ANDROID == False: - # _output('Seattle is being installed on a Nokia N800/900 Internet Tablet.') - # # JAC: I can't import this on Windows, so will do it here... - # import pwd - # # if the current user name is not 'root' - # if pwd.getpwuid(os.getuid())[0] != 'root': - # _output('Please run the installer as root. This can be done by ' \ - # + 'installing/using the rootsh or openssh package.') - # return - - # Pre-install: process the passed-in arguments, and set up the configuration - # dictionary. - continue_install = prepare_installation(opts,args) - if not continue_install: - return - - # Pre-install: run all tests and benchmarking. - # test_urandom_implemented() MUST be performed before - # perform_system_benchmarking() to get relevant results from the - # benchmarking. - urandom_test_succeeded = test_urandom_implemented() - if not urandom_test_succeeded: - return - benchmarking_succeeded = perform_system_benchmarking() - if not benchmarking_succeeded: - return - - - - # Begin installation. - if DISABLE_INSTALL: - return - - # First, customize any scripts since they may be copied to new locations when - # configuring seattle to run automatically at boot. - - - # If running on a Windows system, customize the batch files. - if OS == "Windows" or OS == "WindowsCE": - customize_win_batch_files() - _output("Done!") - - # If running on WindowsCE, setup the sitecustomize.py file. - if OS == "WindowsCE": - _output("Configuring python for WindowsCE...") - setup_sitecustomize() - _output("Done!") - - - - # Configure seattle to run at startup. - if not DISABLE_STARTUP_SCRIPT: - _output("Preparing Seattle to run automatically at startup...") - # This try: block attempts to install seattle to run at startup. If it - # fails, continue on with the rest of the install process since the seattle - # starter script may still be run even if seattle is not configured to run - # at boot. - try: - # Any errors generated while configuring seattle to run at startup will be - # printed in the child functions, unless an unexpected error is raised, - # which will be caught in the general except Exception: block below. - if OS == "Windows" or OS == "WindowsCE": - setup_win_startup() - _output("Seattle is setup to run at startup!") - elif OS == "Linux" or OS == "Darwin": - setup_success = setup_linux_or_mac_startup() - if setup_success == None: - # Do not print a final message to the user about setting up seattle to - # run automatically at startup. - pass - elif setup_success: - _output("Seattle is setup to run at startup!") - else: - # The reasons for which seattle was unable to be configured at startup - # will have been logged by the service logger in the - # setup_linux_or_mac_startup() function, and output for the possible - # reasons why configuration to run at startup failed will have already - # be given to the user from the setup_linux_or_mac_startup() function. - _output("Seattle failed to be configured to run automatically at " \ - + "startup.") - else: - raise UnsupportedOSError("This operating system is not supported.") - - except UnsupportedOSError,u: - raise UnsupportedOSError(u) - - # If an unpredicted error is raised while setting up seattle to run at - # startup, it is caught here. - except Exception,e: - _output("seattle could not be installed to run automatically at " \ - + "startup for the following reason: " + str(e)) - _output("Continuing with the installation process now. To manually " \ - + "run seattle at any time, just run " \ - + get_starter_file_name() + " from within the seattle " \ - + "directory.") - _output("Please contact the seattle project for further assistance.") - servicelogger.log(time.strftime(" seattle was NOT installed on this " \ - + "system for the following reason: " \ - + str(e) + ". %m-%d-%Y %H:%M:%S")) - - - - # Generate the Node Manager keys even if configuring seattle to run - # automatically at boot fails because Node Manager keys are needed for the - # seattle_starter script which can be run at any time. - _output("Generating the Node Manager RSA keys. This may take a few " \ - + "minutes...") - createnodekeys.initialize_keys(KEYBITSIZE, - nodemanager_directory=SEATTLE_FILES_DIR) - _output("Keys generated!") - - - - # Modify nodeman.cfg so the start_seattle script knows that seattle has been - # installed. This is a new feature that will require seattle to have been - # installed before it can be started. - configuration = persist.restore_object("nodeman.cfg") - configuration['seattle_installed'] = True - persist.commit_object(configuration,"nodeman.cfg") - - - - - # Everything has been installed, so start seattle and print concluding output - # messages. - # AR: On Android, the native installer/app takes care of starting Seattle - # after this script ends. (It collects our logs as well). - try: - if not IS_ANDROID: - _output("Starting seattle...") - start_seattle() - except Exception,e: - _output("seattle could not be started for the following reason: " + str(e)) - _output("Please contact the seattle project immediately for assistance.") - servicelogger.log(time.strftime(" seattle installation failed. seattle " \ - + "could not be started for the " \ - + "following reason: " + str(e) + " " \ - + "%m-%d-%Y %H:%M:%S")) - else: - _output("seattle has been installed!") - _output("To learn more about useful, optional scripts related to running " \ - + "seattle, see the README file.") - servicelogger.log(time.strftime(" seattle completed installation on: " \ - + "%m-%d-%Y %H:%M:%S")) - - - - -if __name__ == "__main__": - main() - From 5e183d685e1f06a245828053f2c3a6d8db75d4aa Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:34:00 -0500 Subject: [PATCH 13/17] Delete settings.py --- settings.py | 67 ----------------------------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 settings.py diff --git a/settings.py b/settings.py deleted file mode 100644 index 0f6bb25..0000000 --- a/settings.py +++ /dev/null @@ -1,67 +0,0 @@ -from django.conf import settings - - -SETTINGS = { - 'THRESHOLD': 3, - 'PARTIALBYTES': 2, - 'SECRET_VERIFICATION_BYTES': 4, - 'SECRET_LENGTH': 32, - 'CACHE_ALIAS': 'pph' -} - -INSTALLED_APPS = ( - 'django.contrib.auth', - 'django_pph', -) - -PASSWORD_HASHERS = ( - 'django_pph.hashers.PolyPasswordHasher', -) - -CACHES={ - 'default': { - 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', - }, - # this will store non-persistent information (you can use the memcache if - # desired - 'pph': { - 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', - 'LOCATION': 'pph_cache', - 'TIMEOUT': None, - }, - # for persistent storage, only non sensitive information will be stored here - 'share_cache': { - 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', - 'LOCATION': 'share_table', - }, -} - -LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'verbose': { - 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', - }, - 'simple': { - 'format': '%(levelname)s %(message)s', - }, - }, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - # set this path to a location specific to your project - 'filename': '/path/to/log.log', - 'formatter':'verbose', - }, - }, - 'loggers': { - 'django.security.PPH': { - 'handlers': ['file'], - 'level': 'DEBUG', - 'propagate': True, - }, - }, -} -SETTINGS.update(getattr(settings, 'PPH_SETTINGS', {})) From 151041cfab32c417694d194b15a782134df0c34f Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:34:09 -0500 Subject: [PATCH 14/17] Delete softwareupdater.py --- softwareupdater.py | 895 --------------------------------------------- 1 file changed, 895 deletions(-) delete mode 100644 softwareupdater.py diff --git a/softwareupdater.py b/softwareupdater.py deleted file mode 100644 index 9717c54..0000000 --- a/softwareupdater.py +++ /dev/null @@ -1,895 +0,0 @@ -""" -Author: Justin Cappos - -Start Date: August 4, 2008 - -Description: -A software updater for the node manager. The focus is to make it secure, -robust, and simple (in that order). - -Usage: ./softwareupdater.py - - -Updated 1/23/2009 use servicelogger to log errors - Xuanhua (Sean)s Ren - - -""" - - - -import sys -import os - -# AR: Determine whether we're running on Android -try: - import android - is_android = True -except ImportError: - is_android = False - - -import daemon - - -# this is being done so that the resources accounting doesn't interfere with logging -from repyportability import * -_context = locals() -add_dy_support(_context) - - -import urllib # to retrieve updates -import random -import shutil -import socket # we'll make it so we don't hang... -import tempfile -import traceback # For exception logging if the servicelogger fails. -import runonce -import harshexit # Used for portablekill -import portable_popen - - -# Import servicelogger to do logging -import servicelogger - - -dy_import_module_symbols("signeddata.r2py") -dy_import_module_symbols("sha.r2py") - -# Armon: The port that should be used to update our time using NTP -TIME_PORT = 51234 -TIME_PORT_2 = 42345 - -softwareurl = "http://127.0.0.1:12345/" - -# embedded this because it seems easier to update it along with this file -# Every computer running Seattle will have this same public key, and will trust -# files signed by this key. -softwareupdatepublickey = {'e':65537, 'n':526075948116080003280554006830575566023462590849832100901744697055909724371399194024610360988680486046658671654307101533713169322958855195024932225696648809267094385509182640667656030952352877629776268858749293991818804459040569204876661410565203782250931351017253716996260387854199214515388261871304887211777345182970665858555947328821458923790473012582536490466243608325281166196543516024989671304320341875597139114573471554153872815441938231903312324309479701306736658225495915950373259096521301612322772576928424944769271327302673419007616538975287650798649054715033859921179977857984108552092381645095446231461631170935130292951762423432838706384800441494986198233943613151543622465112191932047217540864954071072037120395588369723581690570804651327840494050484188839723959492722038728958484930385732791217088658096693158190570354016851080963054776310324073529686992784447640148360945347887172146977490914821481897511169067614133574943071540445508283388471120955347324291709922468281781067685260642235045759342607051334128852361259019773452414605095519296335514665086333152628000973843599475889487016749838695419693398953094596802530432730581909676277047978332827189769004841779937858967352911924408317662516009951588618286633791} - -# Whether the nodemanager should be told not to daemonize when it is restarted. -# This is only to assist our automated tests. -run_nodemanager_in_foreground = False - -# Whether the softwareupdater should run in the foreground or not. Default -# to yes. -run_softwareupdater_in_foreground = True - -# If this is True, the software updater needs to restart itself. Once True, it -# will never be False again. This is global rather than in main() because the -# way that main() is currently written, an exception may escape from main and -# a loop in the global scope will catch it and call main() again. -restartme = False - - - -# This code is in its own function called later rather than directly in the -# global scope right here because otherwise we need to ensure that the -# safe_log* methods are defined above this code or else they would cause a -# NameError because they aren't defined yet. -def safe_servicelogger_init(): - """ - This initializes the servicelogger in a way that will not throw an exception - even if servicelogger.init() does. - """ - # initialize the servicelogger to log on the softwareupdater file - try: - servicelogger.init("softwareupdater") - except: - # We assume that if servicelogger.init() fails, then safe_log will - # fall back on using 'print' because servicelogger.log() calls - # will throw a ValueError. - safe_log("Servicelogger.init() failed. Will try to use stdout. The exception from servicelogger.init() was:") - safe_log_last_exception() - - - -def safe_log(message): - """ - Log a message in a way that cannot throw an exception. First try to log using - the servicelogger, then just try to print the message. - """ - try: - servicelogger.log(message) - except: - try: - print message - except: - # As the standard output streams aren't closed, it would seem that this - # should never happen. If it does, though, what can we do to log the - # message, other than directly write to a file? - pass - - - -def safe_log_last_exception(): - """ - Log the last exception in a way that cannot throw an exception. First try to - log using the servicelogger, then just try to print the message. - """ - try: - # Get the last exception in case the servicelogger fails. - exceptionstr = traceback.format_exc() - except: - pass - - try: - servicelogger.log_last_exception() - except: - try: - print exceptionstr - except: - # As the standard output streams aren't closed, it would seem that this - # should never happen. If it does, though, what can we do to log the - # message, other than directly write to a file? - pass - - - - -def get_file_hash(filename): - fileobj = file(filename, 'rb') - filedata = fileobj.read() - fileobj.close() - - return sha_hexhash(filedata) - - - -# We'll use this to get a file. If it doesn't download in a reasonable time, -# we'll fail. (BUG: doesn't do this yet. I use timeouts, but they don't -# always work) -def safe_download(serverpath, filename, destdir, filesize): - # TODO: filesize isn't being used. - # TODO: raise an RsyncError from here if the download fails instead of - # returning True/False. - try: - # Create the destination directory just in case - fullpath = os.path.join(destdir, filename) - destination_directory = os.path.dirname(fullpath) - os.makedirs(destination_directory) - except OSError: - pass - - try: - # Fix for #1361. serverpath may not end in '/' - urllib.urlretrieve(serverpath+'/'+filename,destdir+filename) - return True - - except Exception,e: - # Steven: these errors are common enough that they don't merit tracebacks - if 'timed out' in str(e): - safe_log('Retrieve timed out') - elif 'Name or service not known' in str(e): - safe_log('[Error] Name or service not known') - elif 'Temporary failure in name resolution' in str(e): - safe_log('[Error] Temporary failure in name resolution') - else: - safe_log_last_exception() - - safe_log('[safe_download] Failed to download ' + serverpath + filename) - return False - -# # how much we have left to download -# remainingsize = filesize -# -# # get a file-like object for the URL... -# safefo = urllib.urlopen(filename) -# -# # always close after this... -# try: -# # download up to "filesize" worth of data... -# # BUG: We also should check to see if this is too slow... -# mydata -# -# -# finally: -# try: -# safefo.close() -# except: -# pass - - - -def _copy(orig_filename, copy_filename): - # AR: Wrap Android-specific shutil.copy() quirks. They seem to have a problem - # setting the file access mode bits there, and shutil.copyfile() suffices - # for the task at hand. - - try: - # Create the destination directory just in case - destination_directory = os.path.dirname(copy_filename) - os.makedirs(destination_directory) - except OSError: - pass - - if not is_android: - shutil.copy(orig_filename, copy_filename) - else: - shutil.copyfile(orig_filename, copy_filename) - - - - -################### Begin Rsync ################### -# I'd love to be able to put this in a separate module or repyify it, but -# I'd need urllib... - -class RsyncError(Exception): - pass - - - - -def do_rsync(serverpath, destdir, tempdir): - """ - - This method is the one that attempts to download the metainfo file from - the given serverpath, then uses that to attempt to do an update. This - method makes sure that the downloaded metainfo file is valid and signed - correctly before changing any files. Once the metainfo file is determined - to be valid, it will then compare file hashes between the ones in the new - metainfo file and the hashes of the files currently on disk. If there is - a difference, the new file is downloaded and added to the updated list. - Once all the new files have been downloaded, if they all did so - successfully they are then copied over the old ones, replacing them and - completing the update of the files. Then a list of the files updated is - returned. - - - serverpath - The url for the update site that we will try to contact. - This should be the url of the directory that contains all of - the files that are being pushed as an update. - destdir - This is the directory where the new files will end up if - everything goes well. - tempdir - This is the directory where the new files will be initially - downloaded to before their hashes are checked. This is not - cleaned up after finishing. - - - Will throw various socket errors if there is trouble getting a file from - the webserver. - Will throw an RsyncError if the downloaded metainfo is malformed, or if - the hash of a downloaded file does not match the one listed in the - metainfo file. - - - Files will be downloaded to tempdir, and they might be copied over to - destdir if everything is successful. - - - A list of files that have been updated. The list is empty if nothing is - to be updated. - """ - - # get the metainfo (like a directory listing) - metainfo_downloaded = safe_download(serverpath, "metainfo", tempdir, 1024*32) - - # if downloading the new metainfo failed, then we can't really do anything - if not metainfo_downloaded: - safe_log("[do_rsync] Failed to download metainfo. Not updating.") - return [] - - # read the file data into a string - newmetafileobject = file(tempdir+"metainfo") - newmetafiledata = newmetafileobject.read() - newmetafileobject.close() - - # Incorrectly signed, we don't update... - if not signeddata_issignedcorrectly(newmetafiledata, softwareupdatepublickey): - safe_log("[do_rsync] New metainfo not signed correctly. Not updating.") - return [] - - try: - # read in the old file - oldmetafileobject = file(destdir+"metainfo") - oldmetafiledata = oldmetafileobject.read() - oldmetafileobject.close() - except Exception: - # The old file has problems. We'll use the new one since it's signed - pass - - else: - try: - # Armon: Update our time via NTP, before we check the meta info - time_updatetime(TIME_PORT) - except Exception: - try: - time_updatetime(TIME_PORT_2) - except Exception: - # Steven: Sometimes we can't successfully update our time, so this is - # better than generating a traceback. - safe_log("[do_rsync] Unable to update ntp time. Not updating.") - return [] - - # they're both good. Let's compare them... - shoulduse, reasons = signeddata_shouldtrust(oldmetafiledata,newmetafiledata,softwareupdatepublickey) - - if shoulduse == True: - # great! All is well... - pass - elif shoulduse == None: - # hmm, a warning... - if len(reasons) == 1 and reasons[0] == 'Cannot check expiration': - # we should probably allow this. The node may be offline - # JCS: if it's offline, how is it downloading the metainfo or even - # getting past the time_updatetime() calls above? - safe_log("[do_rsync] Warning: " + str(reasons)) - elif 'Timestamps match' in reasons: - # Already seen this one... - safe_log("[do_rsync] The metainfo indicates no update is needed: " + str(reasons)) - return [] - - elif shoulduse == False: - if 'Public keys do not match' in reasons: - # If the only complaint is that the oldmetafiledata and newmetafiledata - # are signed by different keys, this is actually OK at this point. We - # know that the newmetafiledata was correctly signed with the key held - # within this softwareupdater, so this should actually only happen when - # the oldmetafiledata has an out of date signature. However, we do - # still need to make sure there weren't any other fatal errors that - # we should distrust. - Brent - reasons.remove('Public keys do not match') - for comment in reasons: - if comment in signeddata_fatal_comments: - # If there is a different fatal comment still there, still log it - # and don't perform the update. - safe_log("[do_rsync] Serious problem with signed metainfo: " + str(reasons)) - return [] - - if comment in signeddata_warning_comments: - # If there is a different warning comment still there, log the - # warning. We will take care of specific behavior shortly. - safe_log("[do_rsync] " + str(comment)) - - if 'Timestamps match' in reasons: - # Act as we do above when timestamps match - # Already seen this one... - safe_log("[do_rsync] The metainfo indicates no update is needed: " + str(reasons)) - return [] - else: - # Let's assume this is a bad thing and exit - safe_log("[do_rsync] Something is wrong with the metainfo: " + str(reasons)) - return [] - - # now it's time to update - updatedfiles = [ "metainfo" ] - - for line in file(tempdir+"metainfo"): - - # skip comments - if line[0] == '#': - continue - - # skip signature parts - if line[0] == '!': - continue - - # skip blank lines - if line.strip() == '': - continue - - linelist = line.split() - if len(linelist)!= 3: - raise RsyncError, "Malformed metainfo line: '"+line+"'" - - filename, filehash, filesize = linelist - - shoulddownloadfile = False - - # if the file is missing or the hash is different, we want to download... - if not os.path.exists(destdir+filename): - shoulddownloadfile = True - safe_log("[do_rsync] Downloading file " + filename + " because it doesn't already exist at " + destdir+filename) - elif get_file_hash(destdir+filename) != filehash: - shoulddownloadfile = True - safe_log("[do_rsync] Downloading file " + filename + " because the hash changed.") - - if shoulddownloadfile: - # get the file - safe_download(serverpath, filename, tempdir, filesize) - - # The hash doesn't match what we expected it to be according to the signed metainfo. - if get_file_hash(tempdir+filename) != filehash: - safe_log("[do_rsync] Hash mismatch on file '"+filename+"':" + filehash + - " vs " + get_file_hash(tempdir+filename)) - raise RsyncError, "Hash of file '"+filename+"' does not match information in metainfo file" - - # put this file in the list of files we need to update - updatedfiles.append(filename) - - - # copy the files to the local dir... - safe_log("[do_rsync] Updating files: " + str(updatedfiles)) - for filename in updatedfiles: - _copy(tempdir+filename, destdir+filename) - - # done! We updated the files - return updatedfiles - -################### End Rsync ################### - - - - - -# MUTEX (how I prevent multiple copies) -# a new copy writes an "OK" file. if it's written the previous can exit. -# a previous copy writes a "stop" file. if it's written the new copy must exit -# each new program has its own stop and OK files (listed by mutex number) -# -# first program (fresh_software_updater) -# get softwareupdater.new mutex -# clean all mutex files -# once in main, take softwareupdater.old, release softwareupdater.new -# exit if we ever lose softwareupdater.old -# -# old program (restart_software_updater) -# find an unused mutex -# starts new with arg that is the mutex -# wait for some time -# if "OK" file exists, release softwareupdater.old, remove it and exit -# else write "stop" file -# continue normal operation -# -# new program: (software_updater_start) -# take softwareupdater.new mutex -# initializes -# if "stop" file exists, then exit -# write "OK" file -# while "OK" file exists -# if "stop" file exists, then exit -# take softwareupdater.old, release softwareupdater.new -# start normal operation -# - - -def init(): - """ - - This method is here to do a runthrough of trying to update. The idea is - that if there is going to be a fatal error, we want to die immediately - rather than later. This way, when a node is updating to a flawed version, - the old one won't die until we know the new one is working. Also goes - through the magic explained in the comment block above. - - - None - - - See fresh_software_updater and software_updater_start. - - - If we can't get the lock, we will exit. - We will hold the softwareupdater.new lock while trying to start, but if - all goes well, we will release that lock and aquire the - softwareupdater.old lock. - - - None - """ - # Note: be careful about making this init() method take too long. If it takes - # longer to complete than the amount of time that restart_software_updater() - # waits, then the new software updater will never be left running. Keep in - # mind very slow systems and adjust the wait time in restart_software_updater() - # if needed. - - gotlock = runonce.getprocesslock("softwareupdater.new") - if gotlock == True: - # I got the lock. All is well... - pass - else: - # didn't get the lock, and we like to be real quiet, so lets - # exit quietly - sys.exit(55) - - # Close stdin because we don't read from stdin at all. We leave stdout and stderr - # open because we haven't done anything to make sure that output to those (such as - # uncaught python exceptions) go somewhere else useful. - sys.stdin.close() - - # don't hang if the socket is slow (in some ways, this doesn't always work) - # BUG: http://mail.python.org/pipermail/python-list/2008-January/471845.html - socket.setdefaulttimeout(10) - - # time to handle startup (with respect to other copies of the updater - if len(sys.argv) == 1: - # I was called with no arguments, must be a fresh start... - fresh_software_updater() - else: - # the first argument is our mutex number... - software_updater_start(sys.argv[1]) - - -def software_updater_start(mutexname): - """ - - When restarting the software updater, this method is called in the new - one. It will write an OK file to let the original know it has started, - then will wait for the original to acknowledge by either removing the OK - file, meaning we should carry on, or by writing a stop file, meaning we - should exit. Carrying on means getting the softwareupdater.old lock, and - releasing the softwareupdater.new lock, then returning. - - - mutexname - The new software updater was started with a given mutex name, - which is used to uniquely identify the stop and OK files as - coming from this softwareupdater. This way the old one can - know that the softwareupdater it started is the one that is - continueing on. - - - Possible Exception creating the OK file. - - - Acquires the softwareupdater.old lock and releases the softwareupdater.new - lock. - - - None - """ - - safe_log("[software_updater_start] This is a new software updater process started by an existing one.") - - # if "stop" file exists, then exit - if os.path.exists("softwareupdater.stop."+mutexname): - safe_log("[software_updater_start] There's a stop file. Exiting.") - sys.exit(2) - - # write "OK" file - file("softwareupdater.OK."+mutexname,"w").close() - - # while "OK" file exists - while os.path.exists("softwareupdater.OK."+mutexname): - safe_log("[software_updater_start] Waiting for the file softwareupdater.OK."+mutexname+" to be removed.") - sleep(1.0) - # if "stop" file exists, then exit - if os.path.exists("softwareupdater.stop."+mutexname): - sys.exit(3) - - # Get the process lock for the main part of the program. - gotlock = runonce.getprocesslock("softwareupdater.old") - # Release the lock on the initialization part of the program - runonce.releaseprocesslock('softwareupdater.new') - if gotlock == True: - # I got the lock. All is well... - pass - else: - if gotlock: - safe_log("[software_updater_start] Another software updater old process (pid: "+str(gotlock)+") is running") - sys.exit(55) - else: - safe_log("[software_updater_start] Another software updater old process is running") - sys.exit(55) - - safe_log("[software_updater_start] This software updater process is now taking over.") - - # start normal operation - return - - -# this is called by either the installer or the program that handles starting -# up on boot -def fresh_software_updater(): - """ - - This function is ment to be called when starting a softwareupdater when no - other is currently running. It will clear away any outdated OK or stop - files, then release the softwareupdater.new lock and acquire the - softwareupdater.old lock. - - - None - - - Possible exception if there is a problem removing the OK/stop files. - - - The softwareupdater.new lock is released. - The softwareupdater.old lock is acquired. - All old OK and stop files are removed. - - - None - """ - # clean all mutex files - for filename in os.listdir('.'): - # Remove any outdated stop or OK files... - if filename.startswith('softwareupdater.OK.') or filename.startswith('softwareupdater.stop.'): - os.remove(filename) - - # Get the process lock for the main part of the program. - gotlock = runonce.getprocesslock("softwareupdater.old") - # Release the lock on the initialization part of the program - runonce.releaseprocesslock('softwareupdater.new') - if gotlock == True: - # I got the lock. All is well... - pass - else: - if gotlock: - safe_log("[fresh_software_updater] Another software updater old process (pid: "+str(gotlock)+") is running") - sys.exit(55) - else: - safe_log("[fresh_software_updater] Another software updater old process is running") - sys.exit(55) - # Should be ready to go... - - safe_log("[fresh_software_updater] Fresh software updater started.") - - -def get_mutex(): - # do this until we find an unused file mutex. we should find one - # immediately with overwhelming probability - while True: - randtoken = str(random.random()) - if not os.path.exists("softwareupdater.OK."+randtoken) and not os.path.exists("softwareupdater.stop."+randtoken): - return randtoken - - -def restart_software_updater(): - """ - - Attempts to start a new software updater, and will exit this one if the - new one seems to start successfully. If the new one does not start - successfully, then we just return. - - - None - - - Possible exception if there is problems writing the OK file. - - - If all goes well, a new softwareupdater will be started, and this one will - exit. - - - In the successful case, it will not return. If the new softwareupdater does - not start correctly, we will return None. - """ - - safe_log("[restart_software_updater] Attempting to restart software updater.") - - # find an unused mutex - thismutex = get_mutex() - - # starts new with arg that is the mutex - junkupdaterobject = portable_popen.Popen([sys.executable,"softwareupdater.py",thismutex]) - - # wait for some time (1 minute) for them to init and stop them if they don't - for junkcount in range(30): - sleep(2.0) - - # if "OK" file exists, release softwareupdater.old, remove OK file and exit - if os.path.exists("softwareupdater.OK."+thismutex): - runonce.releaseprocesslock('softwareupdater.old') - os.remove("softwareupdater.OK."+thismutex) - # I'm happy, it is taking over - safe_log("[restart_software_updater] The new instance of the software updater is running. This one is exiting.") - sys.exit(10) - - # else write "stop" file because it failed... - file("softwareupdater.stop."+thismutex,"w").close() - - safe_log("[restart_software_updater] Failed to restart software updater. This instance will continue.") - - # I continue normal operation - return - - - -def restart_client(filenamelist): - """ - - Restarts the node manager. - - - filenamelist - Currently not used, but is included for possible future use. - - - None - - - The current node manager is killed, and a new one is started. - - - None. - """ - # kill nmmain if it is currently running - retval = runonce.getprocesslock('seattlenodemanager') - if retval == True: - safe_log("[restart_client] Obtained the lock 'seattlenodemanager', it wasn't running.") - # I got the lock, it wasn't running... - # we want to start a new one, so lets release - runonce.releaseprocesslock('seattlenodemanager') - elif retval == False: - # Someone has the lock, but I can't do anything... - safe_log("[restart_client] The lock 'seattlenodemanager' is held by an unknown process. Will try to start it anyways.") - else: - safe_log("[restart_client] Stopping the nodemanager.") - # I know the process ID! Let's stop the process... - harshexit.portablekill(retval) - - safe_log("[restart_client] Starting the nodemanager.") - - # run the node manager. I rely on it to do the smart thing (handle multiple - # instances, etc.) - nm_restart_command_args_list = [sys.executable, "nmmain.py"] - - if run_nodemanager_in_foreground: - nm_restart_command_args_list.append('--foreground') - - junkprocessobject = portable_popen.Popen(nm_restart_command_args_list) - - # I don't do anything with the processobject. The process will run for some - # time, perhaps outliving me (if I'm updated first) - - -def main(): - """ - - Has an infinite loop where we sleep for 5-55 minutes, then check for - updates. If an update happens, we will restart ourselves and/or the - node manager as necesary. - - - None - - - Any non-RsyncError exceptions from do_rsync. - - - If there is an update on the update site we are checking, it will be - grabbed eventually. - - - Will not return. Either an exception will be thrown, we exit because we - are restarting, or we loop infinitely. - """ - - global restartme - - # This is similar to init only: - # 1) we loop / sleep - # 2) we restart ourselves if we are updated - # 3) we restart our client if they are updated - - while True: - # sleep for 5-55 minutes - for junk in range(random.randint(10, 12)): - # We need to wake up every 30 seconds otherwise we will take - # the full 5-55 minutes before we die when someone tries to - # kill us nicely. - sleep(30) - # Make sure we still have the process lock. - # If not, we should exit - if not runonce.stillhaveprocesslock('softwareupdater.old'): - safe_log('[main] We no longer have the processlock\n') - sys.exit(55) - - - # Make sure that if we failed somehow to restart, we keep trying before - # every time we try to update. - Brent - if restartme: - restart_software_updater() - - # where I'll put files... - tempdir = tempfile.mkdtemp()+"/" - - - # I'll clean this up in a minute - try: - updatedlist = do_rsync(softwareurl, "./",tempdir) - except RsyncError: - # oops, hopefully this will be fixed next time... - continue - - finally: - shutil.rmtree(tempdir) - - # no updates :) Let's wait again... - if updatedlist == []: - continue - - # if there were updates, the metainfo file should be one of them... - assert('metainfo' in updatedlist) - - clientlist = updatedlist[:] - - if 'softwareupdater.py' in clientlist: - restartme = True - clientlist.remove('softwareupdater.py') - - # if the client software changed, let's update it! - # AR: On Android, the native app takes care of starting/restarting - # the client and/or updater, depending on the exit code we return here. - if clientlist != []: - if not is_android: - restart_client(clientlist) - else: - sys.exit(200) # Native app should restart both client and updater - - # oh! I've changed too. I should restart... search for MUTEX for info - if restartme: - if not is_android: - restart_software_updater() - else: - sys.exit(201) # Native app should restart the updater - - - - - -def read_environmental_options(): - """ - This doesn't read command line options. It reads environment variable - options. The reason is because the software updater currently expects that - any first command line arg is the name of a mutex used by an already running - software updater. I don't see any good reason to risk changing more than is - needed until more major changes are being made to the software updater. - This also makes it so that we don't have to bother passing the option through - to restarts of the softwareupdater. - """ - try: - global run_nodemanager_in_foreground - global run_softwareupdater_in_foreground - if 'SEATTLE_RUN_NODEMANAGER_IN_FOREGROUND' in os.environ: - run_nodemanager_in_foreground = True - if os.environ.get('SEATTLE_RUN_SOFTWAREUPDATER_IN_FOREGROUND', True) == "False": - run_softwareupdater_in_foreground = False - except: - # The defaults here are safe, so if something went wrong in - # the code above, however unlikely, let's ignore it. - pass - - - - -if __name__ == '__main__': - read_environmental_options() - if not run_softwareupdater_in_foreground: - daemon.daemonize() - - # Initialize the service logger. - safe_servicelogger_init() - - # problems here are fatal. If they occur, the old updater won't stop... - try: - init() - except Exception, e: - safe_log_last_exception() - raise e - - # in case there is an unexpected exception, continue (we'll sleep first thing - # in main) - while True: - try: - main() - except SystemExit: - # If there is a SystemExit exception, we should probably actually exit... - raise - except Exception, e: - # Log the exception and let main() run again. - safe_log_last_exception() - # Sleep a little to prevent a fast loop if the exception is happening - # before any other calls to do_sleep(). - sleep(1.0) From ac2cb33099a36c0bfb482d52b42666123870c0a8 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:34:16 -0500 Subject: [PATCH 15/17] Delete test_client.r2py --- test_client.r2py | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 test_client.r2py diff --git a/test_client.r2py b/test_client.r2py deleted file mode 100644 index ec826e4..0000000 --- a/test_client.r2py +++ /dev/null @@ -1,9 +0,0 @@ -if callfunc == 'initialize': - myip = getmyip() - myport = 12346 - try: - return_connection = openconnection('10.0.2.15',12345,myip,myport,10) - while True: - return_connection.send('Hello') - except Exception, err: - log("The Error is"+" "+str(err)) From 5f810e8a27183f5d5f73a8a6f1001a5eff9522d2 Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:34:23 -0500 Subject: [PATCH 16/17] Delete test_server.r2py --- test_server.r2py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 test_server.r2py diff --git a/test_server.r2py b/test_server.r2py deleted file mode 100644 index 59a4bb0..0000000 --- a/test_server.r2py +++ /dev/null @@ -1,22 +0,0 @@ -if callfunc == 'initialize': - serverip = getmyip() - serverport = 12345 - data = "" - MAX = 1400 - server_sock = listenforconnection(serverip, serverport) - log("Started Server listening on port - '%d' and IP - '%s'"%(serverport,serverip)) - while True: - try: - remote_ip, remote_port, sockobj = server_sock.getconnection() - log("Incoming connection from '%s and %d'"%(remote_ip,remote_port)) - while True: - free_space = MAX - len(data) - try: - data += sockobj.recv(free_space) - log(len(data)) - except SocketWouldBlockError: - pass - except SocketWouldBlockError: - pass - except Exception, err: - log("Error in get connnection "+str(err)) From 27887d066b28641b1cca8fa8fe5f29e6046d812d Mon Sep 17 00:00:00 2001 From: asm582 Date: Mon, 15 Dec 2014 09:34:29 -0500 Subject: [PATCH 17/17] Delete test_server_v2.r2py --- test_server_v2.r2py | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 test_server_v2.r2py diff --git a/test_server_v2.r2py b/test_server_v2.r2py deleted file mode 100644 index 818b69c..0000000 --- a/test_server_v2.r2py +++ /dev/null @@ -1,27 +0,0 @@ -if callfunc == 'initialize': - serverip = getmyip() - serverport = 12345 - data = "" - MAX = 1400 - server_sock = listenforconnection(serverip, serverport) - log("Started Server listening on port - '%d' and IP - '%s'"%(serverport,serverip)) - while True: - try: - remote_ip, remote_port, sockobj = server_sock.getconnection() - log("Incoming connection from '%s and %d'"%(remote_ip,remote_port)) - while True: - free_space = MAX - len(data) - log(free_space) - try: - while True: - data += sockobj.recv(free_space) - if("" == sockobj.recv(free_space)): - #log("buffer is full, remote side closing connection") - raise RepyArgumentError("buffer is full, remote side closing connection") - except SocketWouldBlockError: - pass - except SocketWouldBlockError: - pass - except Exception, err: - log(str(err)) -