SSL Status: {$ sslStatus $}
diff --git a/manageSSL/urls.py b/manageSSL/urls.py
index c10f03f31..58abdc6b9 100755
--- a/manageSSL/urls.py
+++ b/manageSSL/urls.py
@@ -6,6 +6,7 @@ urlpatterns = [
path('manageSSL', views.manageSSL, name='manageSSL'),
path('issueSSL', views.issueSSL, name='issueSSL'),
+ path('getSSLDetails', views.getSSLDetails, name='getSSLDetails'),
path('sslForHostName', views.sslForHostName, name='sslForHostName'),
path('obtainHostNameSSL', views.obtainHostNameSSL, name='obtainHostNameSSL'),
diff --git a/manageSSL/views.py b/manageSSL/views.py
index 9d92fcbd3..055774014 100755
--- a/manageSSL/views.py
+++ b/manageSSL/views.py
@@ -333,3 +333,73 @@ def obtainMailServerSSL(request):
'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
+
+def getSSLDetails(request):
+ try:
+ userID = request.session['userID']
+ admin = Administrator.objects.get(pk=userID)
+ try:
+ if request.method == 'POST':
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 1:
+ pass
+ elif currentACL['manageSSL'] == 1:
+ pass
+ else:
+ return ACLManager.loadErrorJson('SSL', 0)
+
+ data = json.loads(request.body)
+ virtualHost = data['virtualHost']
+
+ if ACLManager.checkOwnership(virtualHost, admin, currentACL) == 1:
+ pass
+ else:
+ return ACLManager.loadErrorJson()
+
+ try:
+ website = ChildDomains.objects.get(domain=virtualHost)
+ except:
+ website = Websites.objects.get(domain=virtualHost)
+
+ try:
+ import OpenSSL
+ from datetime import datetime
+ filePath = '/etc/letsencrypt/live/%s/fullchain.pem' % (virtualHost)
+ x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
+ open(filePath, 'r').read())
+ expireData = x509.get_notAfter().decode('ascii')
+ finalDate = datetime.strptime(expireData, '%Y%m%d%H%M%SZ')
+
+ now = datetime.now()
+ diff = finalDate - now
+
+ data_ret = {
+ 'status': 1,
+ 'hasSSL': True,
+ 'days': str(diff.days),
+ 'authority': x509.get_issuer().get_components()[1][1].decode('utf-8'),
+ 'expiryDate': finalDate.strftime('%Y-%m-%d %H:%M:%S')
+ }
+
+ if data_ret['authority'] == 'Denial':
+ data_ret['authority'] = 'SELF-SIGNED SSL'
+
+ except BaseException as msg:
+ data_ret = {
+ 'status': 1,
+ 'hasSSL': False,
+ 'error_message': str(msg)
+ }
+
+ json_data = json.dumps(data_ret)
+ return HttpResponse(json_data)
+
+ except BaseException as msg:
+ data_ret = {'status': 0, 'error_message': str(msg)}
+ json_data = json.dumps(data_ret)
+ return HttpResponse(json_data)
+ except KeyError:
+ data_ret = {'status': 0, 'error_message': 'Not logged in'}
+ json_data = json.dumps(data_ret)
+ return HttpResponse(json_data)
diff --git a/plogical/acl.py b/plogical/acl.py
index f0aee9150..a9788791b 100644
--- a/plogical/acl.py
+++ b/plogical/acl.py
@@ -14,7 +14,7 @@ django.setup()
from loginSystem.models import Administrator, ACL
from django.shortcuts import HttpResponse
from packages.models import Package
-from websiteFunctions.models import Websites, ChildDomains, aliasDomains, DockerSites
+from websiteFunctions.models import Websites, ChildDomains, aliasDomains, DockerSites, WPSites
import json
from subprocess import call, CalledProcessError
from shlex import split
@@ -582,24 +582,44 @@ class ACLManager:
@staticmethod
def searchWebsiteObjects(currentACL, userID, searchTerm):
-
if currentACL['admin'] == 1:
- return Websites.objects.filter(domain__istartswith=searchTerm)
+ # Get websites that match the search term
+ websites = Websites.objects.filter(domain__istartswith=searchTerm)
+ # Get WordPress sites that match the search term
+ wp_sites = WPSites.objects.filter(title__icontains=searchTerm)
+ # Add WordPress sites' parent websites to the results
+ for wp in wp_sites:
+ if wp.owner not in websites:
+ websites = websites | Websites.objects.filter(pk=wp.owner.pk)
+ return websites
else:
websiteList = []
admin = Administrator.objects.get(pk=userID)
+ # Get websites that match the search term
websites = admin.websites_set.filter(domain__istartswith=searchTerm)
-
for items in websites:
websiteList.append(items)
- admins = Administrator.objects.filter(owner=admin.pk)
+ # Get WordPress sites that match the search term
+ wp_sites = WPSites.objects.filter(title__icontains=searchTerm)
+ for wp in wp_sites:
+ if wp.owner.admin == admin and wp.owner not in websiteList:
+ websiteList.append(wp.owner)
+ admins = Administrator.objects.filter(owner=admin.pk)
for items in admins:
+ # Get websites that match the search term
webs = items.websites_set.filter(domain__istartswith=searchTerm)
for web in webs:
- websiteList.append(web)
+ if web not in websiteList:
+ websiteList.append(web)
+
+ # Get WordPress sites that match the search term
+ wp_sites = WPSites.objects.filter(title__icontains=searchTerm)
+ for wp in wp_sites:
+ if wp.owner.admin == items and wp.owner not in websiteList:
+ websiteList.append(wp.owner)
return websiteList
diff --git a/plogical/customACME.py b/plogical/customACME.py
new file mode 100644
index 000000000..e769e8c7d
--- /dev/null
+++ b/plogical/customACME.py
@@ -0,0 +1,934 @@
+import json
+import os
+import time
+import requests
+import base64
+import hashlib
+import logging
+from cryptography import x509
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import padding
+import OpenSSL
+from plogical import CyberCPLogFileWriter as logging
+from plogical.processUtilities import ProcessUtilities
+import socket
+
+class CustomACME:
+ def __init__(self, domain, admin_email, staging=False, provider='letsencrypt'):
+ """Initialize CustomACME"""
+ logging.CyberCPLogFileWriter.writeToFile(f'Initializing CustomACME for domain: {domain}, email: {admin_email}, staging: {staging}, provider: {provider}')
+ self.domain = domain
+ self.admin_email = admin_email
+ self.staging = staging
+ self.provider = provider
+
+ # Set the ACME directory URL based on provider and staging flag
+ if provider == 'zerossl':
+ if staging:
+ self.acme_directory = "https://acme-staging.zerossl.com/v2/DV90"
+ logging.CyberCPLogFileWriter.writeToFile('Using ZeroSSL staging ACME directory')
+ else:
+ self.acme_directory = "https://acme.zerossl.com/v2/DV90"
+ logging.CyberCPLogFileWriter.writeToFile('Using ZeroSSL production ACME directory')
+ else: # letsencrypt
+ if staging:
+ self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ logging.CyberCPLogFileWriter.writeToFile('Using Let\'s Encrypt staging ACME directory')
+ else:
+ self.acme_directory = "https://acme-v02.api.letsencrypt.org/directory"
+ logging.CyberCPLogFileWriter.writeToFile('Using Let\'s Encrypt production ACME directory')
+
+ self.account_key = None
+ self.account_url = None
+ self.directory = None
+ self.nonce = None
+ self.order_url = None
+ self.authorizations = []
+ self.finalize_url = None
+ self.certificate_url = None
+
+ # Initialize paths
+ self.cert_path = f'/etc/letsencrypt/live/{domain}'
+ self.challenge_path = '/usr/local/lsws/Example/html/.well-known/acme-challenge'
+ self.account_key_path = f'/etc/letsencrypt/accounts/{domain}.key'
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate path: {self.cert_path}, Challenge path: {self.challenge_path}')
+
+ # Create accounts directory if it doesn't exist
+ os.makedirs('/etc/letsencrypt/accounts', exist_ok=True)
+
+ def _generate_account_key(self):
+ """Generate RSA account key"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Generating RSA account key...')
+ key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend()
+ )
+ self.account_key = key
+ logging.CyberCPLogFileWriter.writeToFile('Successfully generated RSA account key')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error generating account key: {str(e)}')
+ return False
+
+ def _get_directory(self):
+ """Get ACME directory"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Fetching ACME directory from {self.acme_directory}')
+ response = requests.get(self.acme_directory)
+ self.directory = response.json()
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully fetched ACME directory: {json.dumps(self.directory)}')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error getting directory: {str(e)}')
+ return False
+
+ def _get_nonce(self):
+ """Get new nonce from ACME server"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Getting new nonce...')
+ response = requests.head(self.directory['newNonce'])
+ self.nonce = response.headers['Replay-Nonce']
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully got nonce: {self.nonce}')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error getting nonce: {str(e)}')
+ return False
+
+ def _create_jws(self, payload, url):
+ """Create JWS (JSON Web Signature)"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Creating JWS for URL: {url}')
+ if payload is not None:
+ logging.CyberCPLogFileWriter.writeToFile(f'Payload: {json.dumps(payload)}')
+
+ # Get a fresh nonce for this request
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get fresh nonce')
+ return None
+
+ # Get the private key numbers
+ logging.CyberCPLogFileWriter.writeToFile('Getting private key numbers...')
+ private_numbers = self.account_key.private_numbers()
+ public_numbers = private_numbers.public_numbers
+
+ # Convert numbers to bytes
+ logging.CyberCPLogFileWriter.writeToFile('Converting RSA numbers to bytes...')
+ n_bytes = public_numbers.n.to_bytes((public_numbers.n.bit_length() + 7) // 8, 'big')
+ e_bytes = public_numbers.e.to_bytes((public_numbers.e.bit_length() + 7) // 8, 'big')
+
+ # Create JWK
+ logging.CyberCPLogFileWriter.writeToFile('Creating JWK...')
+ jwk_key = {
+ "kty": "RSA",
+ "n": base64.urlsafe_b64encode(n_bytes).decode('utf-8').rstrip('='),
+ "e": base64.urlsafe_b64encode(e_bytes).decode('utf-8').rstrip('='),
+ "alg": "RS256"
+ }
+ logging.CyberCPLogFileWriter.writeToFile(f'Created JWK: {json.dumps(jwk_key)}')
+
+ # Create protected header
+ protected = {
+ "alg": "RS256",
+ "url": url,
+ "nonce": self.nonce
+ }
+
+ # Add either JWK or Key ID based on whether we have an account URL
+ if self.account_url and url != self.directory['newAccount']:
+ protected["kid"] = self.account_url
+ logging.CyberCPLogFileWriter.writeToFile(f'Using Key ID: {self.account_url}')
+ else:
+ protected["jwk"] = jwk_key
+ logging.CyberCPLogFileWriter.writeToFile('Using JWK for new account')
+
+ # Encode protected header
+ logging.CyberCPLogFileWriter.writeToFile('Encoding protected header...')
+ protected_b64 = base64.urlsafe_b64encode(
+ json.dumps(protected).encode('utf-8')
+ ).decode('utf-8').rstrip('=')
+
+ # For POST-as-GET requests, payload_b64 should be empty string
+ if payload is None:
+ payload_b64 = ""
+ logging.CyberCPLogFileWriter.writeToFile('Using empty payload for POST-as-GET request')
+ else:
+ # Encode payload
+ logging.CyberCPLogFileWriter.writeToFile('Encoding payload...')
+ payload_b64 = base64.urlsafe_b64encode(
+ json.dumps(payload).encode('utf-8')
+ ).decode('utf-8').rstrip('=')
+
+ # Create signature input
+ logging.CyberCPLogFileWriter.writeToFile('Creating signature input...')
+ signature_input = f"{protected_b64}.{payload_b64}".encode('utf-8')
+
+ # Sign the input
+ logging.CyberCPLogFileWriter.writeToFile('Signing input...')
+ signature = self.account_key.sign(
+ signature_input,
+ padding.PKCS1v15(),
+ hashes.SHA256()
+ )
+
+ # Encode signature
+ logging.CyberCPLogFileWriter.writeToFile('Encoding signature...')
+ signature_b64 = base64.urlsafe_b64encode(signature).decode('utf-8').rstrip('=')
+
+ # Create final JWS
+ logging.CyberCPLogFileWriter.writeToFile('Creating final JWS...')
+ jws = {
+ "protected": protected_b64,
+ "signature": signature_b64
+ }
+
+ # Only add payload if it exists
+ if payload is not None:
+ jws["payload"] = payload_b64
+
+ # Ensure the JWS is properly formatted
+ jws_str = json.dumps(jws, separators=(',', ':'))
+ logging.CyberCPLogFileWriter.writeToFile(f'Final JWS: {jws_str}')
+
+ return jws_str
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error creating JWS: {str(e)}')
+ return None
+
+ def _load_account_key(self):
+ """Load existing account key if available"""
+ try:
+ if os.path.exists(self.account_key_path):
+ logging.CyberCPLogFileWriter.writeToFile('Loading existing account key...')
+ with open(self.account_key_path, 'rb') as f:
+ key_data = f.read()
+ self.account_key = serialization.load_pem_private_key(
+ key_data,
+ password=None,
+ backend=default_backend()
+ )
+ logging.CyberCPLogFileWriter.writeToFile('Successfully loaded existing account key')
+ return True
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error loading account key: {str(e)}')
+ return False
+
+ def _save_account_key(self):
+ """Save account key for future use"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Saving account key...')
+ key_data = self.account_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption()
+ )
+ with open(self.account_key_path, 'wb') as f:
+ f.write(key_data)
+ logging.CyberCPLogFileWriter.writeToFile('Successfully saved account key')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error saving account key: {str(e)}')
+ return False
+
+ def _create_account(self):
+ """Create new ACME account"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Creating new ACME account...')
+ payload = {
+ "termsOfServiceAgreed": True,
+ "contact": [f"mailto:{self.admin_email}"]
+ }
+
+ jws = self._create_jws(payload, self.directory['newAccount'])
+ if not jws:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for account creation')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Sending account creation request...')
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.post(self.directory['newAccount'], data=jws, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Account creation response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Account creation response: {response.text}')
+
+ if response.status_code == 201:
+ self.account_url = response.headers['Location']
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully created account. Account URL: {self.account_url}')
+ # Save the account key for future use
+ self._save_account_key()
+ return True
+ elif response.status_code == 429:
+ logging.CyberCPLogFileWriter.writeToFile('Rate limit hit for account creation. Using staging environment...')
+ self.staging = True
+ self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ # Get new directory and nonce for staging
+ if not self._get_directory():
+ return False
+ if not self._get_nonce():
+ return False
+ # Try one more time with staging
+ return self._create_account()
+ elif response.status_code == 400 and "badNonce" in response.text:
+ logging.CyberCPLogFileWriter.writeToFile('Bad nonce, getting new nonce and retrying...')
+ if not self._get_nonce():
+ return False
+ return self._create_account()
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error creating account: {str(e)}')
+ return False
+
+ def _create_order(self, domains):
+ """Create new order for domains"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Creating new order for domains: {domains}')
+ identifiers = [{"type": "dns", "value": domain} for domain in domains]
+ payload = {
+ "identifiers": identifiers
+ }
+
+ jws = self._create_jws(payload, self.directory['newOrder'])
+ if not jws:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for order creation')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Sending order creation request...')
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.post(self.directory['newOrder'], data=jws, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Order creation response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Order creation response: {response.text}')
+
+ if response.status_code == 201:
+ self.order_url = response.headers['Location']
+ self.authorizations = response.json()['authorizations']
+ self.finalize_url = response.json()['finalize']
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully created order. Order URL: {self.order_url}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Authorizations: {self.authorizations}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Finalize URL: {self.finalize_url}')
+ return True
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error creating order: {str(e)}')
+ return False
+
+ def _handle_http_challenge(self, challenge):
+ """Handle HTTP-01 challenge"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Handling HTTP challenge: {json.dumps(challenge)}')
+
+ # Get key authorization
+ key_auth = self._get_key_authorization(challenge)
+ if not key_auth:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get key authorization')
+ return False
+
+ # Create challenge directory if it doesn't exist
+ if not os.path.exists(self.challenge_path):
+ logging.CyberCPLogFileWriter.writeToFile(f'Creating challenge directory: {self.challenge_path}')
+ os.makedirs(self.challenge_path)
+
+ # Write challenge file
+ challenge_file = os.path.join(self.challenge_path, challenge['token'])
+ logging.CyberCPLogFileWriter.writeToFile(f'Writing challenge file: {challenge_file}')
+
+ # Write only the key authorization to the file
+ with open(challenge_file, 'w') as f:
+ f.write(key_auth)
+
+ logging.CyberCPLogFileWriter.writeToFile('Successfully handled HTTP challenge')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error handling HTTP challenge: {str(e)}')
+ return False
+
+ def _handle_dns_challenge(self, challenge):
+ """Handle DNS-01 challenge (Cloudflare)"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Handling DNS challenge: {json.dumps(challenge)}')
+ # This is a placeholder - implement Cloudflare API integration
+ # You'll need to add your Cloudflare API credentials and implementation
+ pass
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error handling DNS challenge: {str(e)}')
+ return False
+
+ def _get_key_authorization(self, challenge):
+ """Get key authorization for challenge"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Getting key authorization...')
+
+ # Get the private key numbers
+ private_numbers = self.account_key.private_numbers()
+ public_numbers = private_numbers.public_numbers
+
+ # Convert numbers to bytes
+ n_bytes = public_numbers.n.to_bytes((public_numbers.n.bit_length() + 7) // 8, 'big')
+ e_bytes = public_numbers.e.to_bytes((public_numbers.e.bit_length() + 7) // 8, 'big')
+
+ # Create JWK without alg field
+ jwk_key = {
+ "kty": "RSA",
+ "n": base64.urlsafe_b64encode(n_bytes).decode('utf-8').rstrip('='),
+ "e": base64.urlsafe_b64encode(e_bytes).decode('utf-8').rstrip('=')
+ }
+
+ # Calculate the JWK thumbprint according to RFC 7638
+ # The thumbprint is a hash of the JWK (JSON Web Key) in a specific format
+ # First, we create a dictionary with the required JWK parameters
+ jwk = {
+ "e": base64.urlsafe_b64encode(public_numbers.e.to_bytes(3, 'big')).decode('utf-8').rstrip('='),
+ "kty": "RSA", # Key type
+ "n": base64.urlsafe_b64encode(public_numbers.n.to_bytes(256, 'big')).decode('utf-8').rstrip('=')
+ }
+
+ # Sort the JWK parameters alphabetically by key name
+ # This ensures consistent thumbprint calculation regardless of parameter order
+ sorted_jwk = json.dumps(jwk, sort_keys=True, separators=(',', ':'))
+
+ # Calculate the SHA-256 hash of the sorted JWK
+ # Example of what sorted_jwk might look like:
+ # {"e":"AQAB","kty":"RSA","n":"tVKUtcx_n9rt5afY_2WFNVAu9fjD4xqX4Xm3dJz3XYb"}
+ # The thumbprint will be a 32-byte SHA-256 hash of this string
+ # For example, it might look like: b'x\x9c\x1d\x8f\x8b\x1b\x1e\x8b\x1b\x1e\x8b\x1b\x1e\x8b\x1b\x1e'
+ thumbprint = hashlib.sha256(sorted_jwk.encode('utf-8')).digest()
+
+ # Encode the thumbprint in base64url format (RFC 4648)
+ # This removes padding characters (=) and replaces + and / with - and _
+ # Example final thumbprint: "xJ0dj8sbHosbHosbHosbHos"
+ thumbprint = base64.urlsafe_b64encode(thumbprint).decode('utf-8').rstrip('=')
+
+ # Combine token and key authorization
+ key_auth = f"{challenge['token']}.{thumbprint}"
+ logging.CyberCPLogFileWriter.writeToFile(f'Key authorization: {key_auth}')
+ return key_auth
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error getting key authorization: {str(e)}')
+ return None
+
+ def _verify_challenge(self, challenge_url):
+ """Verify challenge completion with the ACME server
+
+ This function sends a POST request to the ACME server to verify that the challenge
+ has been completed successfully. The challenge URL is provided by the ACME server
+ when the challenge is created.
+
+ Example challenge_url:
+ "https://acme-v02.api.letsencrypt.org/acme/challenge/example.com/123456"
+
+ The verification process:
+ 1. Creates an empty payload (POST-as-GET request)
+ 2. Creates a JWS (JSON Web Signature) with the payload
+ 3. Sends the request to the ACME server
+ 4. Checks the response status
+
+ Returns:
+ bool: True if challenge is verified successfully, False otherwise
+ """
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Verifying challenge at URL: {challenge_url}')
+
+ # Create empty payload for POST-as-GET request
+ # This is a special type of request where we want to GET a resource
+ # but need to include a signature, so we use POST with an empty payload
+ payload = {}
+
+ # Create JWS (JSON Web Signature) for the request
+ # Example JWS might look like:
+ # {
+ # "protected": "eyJhbGciOiJSUzI1NiIsIm5vbmNlIjoiMTIzNDU2Nzg5MCIsInVybCI6Imh0dHBzOi8vYWNtZS12MDIuYXBpLmxldHNlbmNyeXB0Lm9yZy9hY21lL2NoYWxsZW5nZS9leGFtcGxlLmNvbS8xMjM0NTYifQ",
+ # "signature": "c2lnbmF0dXJlX2hlcmU",
+ # "payload": ""
+ # }
+ jws = self._create_jws(payload, challenge_url)
+ if not jws:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for challenge verification')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Sending challenge verification request...')
+
+ # Set headers for the request
+ # Content-Type: application/jose+json indicates we're sending a JWS
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+
+ # Send the verification request to the ACME server
+ # Example response might look like:
+ # {
+ # "type": "http-01",
+ # "status": "valid",
+ # "validated": "2024-03-20T12:00:00Z",
+ # "url": "https://acme-v02.api.letsencrypt.org/acme/challenge/example.com/123456"
+ # }
+ response = requests.post(challenge_url, data=jws, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Challenge verification response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Challenge verification response: {response.text}')
+
+ # Check if the challenge was verified successfully
+ # Status code 200 indicates success
+ # The response will contain the challenge status and validation time
+ if response.status_code == 200:
+ logging.CyberCPLogFileWriter.writeToFile('Successfully verified challenge')
+ return True
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error verifying challenge: {str(e)}')
+ return False
+
+ def _finalize_order(self, csr):
+ """Finalize order and get certificate"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Finalizing order...')
+ payload = {
+ "csr": base64.urlsafe_b64encode(csr).decode('utf-8').rstrip('=')
+ }
+
+ jws = self._create_jws(payload, self.finalize_url)
+ if not jws:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for order finalization')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Sending order finalization request...')
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.post(self.finalize_url, data=jws, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Order finalization response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Order finalization response: {response.text}')
+
+ if response.status_code == 200:
+ # Wait for order to be processed
+ max_attempts = 30
+ delay = 2
+ for attempt in range(max_attempts):
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for order status check')
+ return False
+
+ response = requests.get(self.order_url, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check response: {response.text}')
+
+ if response.status_code == 200:
+ order_status = response.json().get('status')
+ if order_status == 'valid':
+ self.certificate_url = response.json().get('certificate')
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully finalized order. Certificate URL: {self.certificate_url}')
+ return True
+ elif order_status == 'invalid':
+ logging.CyberCPLogFileWriter.writeToFile('Order validation failed')
+ return False
+ elif order_status == 'processing':
+ logging.CyberCPLogFileWriter.writeToFile(f'Order still processing, attempt {attempt + 1}/{max_attempts}')
+ time.sleep(delay)
+ continue
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
+ time.sleep(delay)
+
+ logging.CyberCPLogFileWriter.writeToFile('Order processing timed out')
+ return False
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error finalizing order: {str(e)}')
+ return False
+
+ def _download_certificate(self):
+ """Download certificate from ACME server"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Downloading certificate...')
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate URL: {self.certificate_url}')
+
+ # For certificate downloads, we can use a simple GET request
+ response = requests.get(self.certificate_url)
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response headers: {response.headers}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response content: {response.text}')
+
+ if response.status_code == 200:
+ logging.CyberCPLogFileWriter.writeToFile('Successfully downloaded certificate')
+ return response.content
+ return None
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error downloading certificate: {str(e)}')
+ return None
+
+ def _wait_for_challenge_validation(self, challenge_url, max_attempts=30, delay=2):
+ """Wait for challenge to be validated by the ACME server"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Waiting for challenge validation at URL: {challenge_url}')
+ for attempt in range(max_attempts):
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for challenge status check')
+ return False
+
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.get(challenge_url, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Challenge status check response: {response.text}')
+
+ if response.status_code == 200:
+ challenge_status = response.json().get('status')
+ if challenge_status == 'valid':
+ logging.CyberCPLogFileWriter.writeToFile('Challenge validated successfully')
+ return True
+ elif challenge_status == 'invalid':
+ logging.CyberCPLogFileWriter.writeToFile('Challenge validation failed')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Challenge still pending, attempt {attempt + 1}/{max_attempts}')
+ time.sleep(delay)
+
+ logging.CyberCPLogFileWriter.writeToFile('Challenge validation timed out')
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error waiting for challenge validation: {str(e)}')
+ return False
+
+ def _check_dns_record(self, domain):
+ """Check if a domain has valid DNS records
+
+ This function performs multiple DNS checks to ensure the domain has valid DNS records.
+ It includes:
+ 1. A record (IPv4) check
+ 2. AAAA record (IPv6) check
+ 3. DNS caching prevention
+ 4. Multiple DNS server checks
+
+ Args:
+ domain (str): The domain to check
+
+ Returns:
+ bool: True if valid DNS records are found, False otherwise
+ """
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Checking DNS records for domain: {domain}')
+
+ # List of public DNS servers to check against
+ dns_servers = [
+ '8.8.8.8', # Google DNS
+ '1.1.1.1', # Cloudflare DNS
+ '208.67.222.222' # OpenDNS
+ ]
+
+ # Function to check DNS record with specific DNS server
+ def check_with_dns_server(server, record_type='A'):
+ try:
+ # Create a new socket for each check
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.settimeout(5) # 5 second timeout
+
+ # Set the DNS server
+ sock.connect((server, 53))
+
+ # Create DNS query
+ query = bytearray()
+ # DNS header
+ query += b'\x00\x01' # Transaction ID
+ query += b'\x01\x00' # Flags: Standard query
+ query += b'\x00\x01' # Questions: 1
+ query += b'\x00\x00' # Answer RRs: 0
+ query += b'\x00\x00' # Authority RRs: 0
+ query += b'\x00\x00' # Additional RRs: 0
+
+ # Domain name
+ for part in domain.split('.'):
+ query.append(len(part))
+ query.extend(part.encode())
+ query += b'\x00' # End of domain name
+
+ # Query type and class
+ if record_type == 'A':
+ query += b'\x00\x01' # Type: A
+ else: # AAAA
+ query += b'\x00\x1c' # Type: AAAA
+ query += b'\x00\x01' # Class: IN
+
+ # Send query
+ sock.send(query)
+
+ # Receive response
+ response = sock.recv(1024)
+
+ # Check if we got a valid response
+ if len(response) > 12: # Minimum DNS response size
+ # Check if there are answers in the response
+ answer_count = int.from_bytes(response[6:8], 'big')
+ if answer_count > 0:
+ return True
+
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error checking DNS with server {server}: {str(e)}')
+ return False
+ finally:
+ sock.close()
+
+ # Check A records (IPv4) with multiple DNS servers
+ a_record_found = False
+ for server in dns_servers:
+ if check_with_dns_server(server, 'A'):
+ a_record_found = True
+ break
+
+ # Check AAAA records (IPv6) with multiple DNS servers
+ aaaa_record_found = False
+ for server in dns_servers:
+ if check_with_dns_server(server, 'AAAA'):
+ aaaa_record_found = True
+ break
+
+ # Also check with system's DNS resolver as a fallback
+ try:
+ # Try to resolve A record (IPv4)
+ socket.gethostbyname(domain)
+ a_record_found = True
+ except socket.gaierror:
+ pass
+
+ try:
+ # Try to resolve AAAA record (IPv6)
+ socket.getaddrinfo(domain, None, socket.AF_INET6)
+ aaaa_record_found = True
+ except socket.gaierror:
+ pass
+
+ # Log the results
+ if a_record_found:
+ logging.CyberCPLogFileWriter.writeToFile(f'IPv4 DNS record found for domain: {domain}')
+ if aaaa_record_found:
+ logging.CyberCPLogFileWriter.writeToFile(f'IPv6 DNS record found for domain: {domain}')
+
+ # Return True if either A or AAAA record is found
+ return a_record_found or aaaa_record_found
+
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error checking DNS records: {str(e)}')
+ return False
+
+ def _wait_for_order_processing(self, max_attempts=30, delay=2):
+ """Wait for order to be processed"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile('Waiting for order processing...')
+ for attempt in range(max_attempts):
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for order status check')
+ return False
+
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.get(self.order_url, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check response: {response.text}')
+
+ if response.status_code == 200:
+ order_status = response.json().get('status')
+ if order_status == 'valid':
+ self.certificate_url = response.json().get('certificate')
+ logging.CyberCPLogFileWriter.writeToFile('Order validated successfully')
+ return True
+ elif order_status == 'invalid':
+ logging.CyberCPLogFileWriter.writeToFile('Order validation failed')
+ return False
+ elif order_status == 'processing':
+ logging.CyberCPLogFileWriter.writeToFile(f'Order still processing, attempt {attempt + 1}/{max_attempts}')
+ time.sleep(delay)
+ continue
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
+ time.sleep(delay)
+
+ logging.CyberCPLogFileWriter.writeToFile('Order processing timed out')
+ return False
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error waiting for order processing: {str(e)}')
+ return False
+
+ def issue_certificate(self, domains, use_dns=False):
+ """Main method to issue certificate"""
+ try:
+ logging.CyberCPLogFileWriter.writeToFile(f'Starting certificate issuance for domains: {domains}, use_dns: {use_dns}')
+
+ # Try to load existing account key first
+ if self._load_account_key():
+ logging.CyberCPLogFileWriter.writeToFile('Using existing account key')
+ else:
+ logging.CyberCPLogFileWriter.writeToFile('No existing account key found, will create new one')
+
+ # Filter domains to only include those with valid DNS records
+ valid_domains = []
+ for domain in domains:
+ if self._check_dns_record(domain):
+ valid_domains.append(domain)
+ else:
+ logging.CyberCPLogFileWriter.writeToFile(f'Skipping domain {domain} due to missing DNS records')
+
+ if not valid_domains:
+ logging.CyberCPLogFileWriter.writeToFile('No valid domains found with DNS records')
+ return False
+
+ # Initialize ACME
+ logging.CyberCPLogFileWriter.writeToFile('Step 1: Generating account key')
+ if not self._generate_account_key():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to generate account key')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Step 2: Getting ACME directory')
+ if not self._get_directory():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get ACME directory')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Step 3: Getting nonce')
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce')
+ return False
+
+ logging.CyberCPLogFileWriter.writeToFile('Step 4: Creating account')
+ if not self._create_account():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create account')
+ # If we failed to create account and we're not in staging, try staging
+ if not self.staging:
+ logging.CyberCPLogFileWriter.writeToFile('Switching to staging environment...')
+ self.staging = True
+ self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ if not self._get_directory():
+ return False
+ if not self._get_nonce():
+ return False
+ if not self._create_account():
+ return False
+ else:
+ return False
+
+ # Create order with only valid domains
+ logging.CyberCPLogFileWriter.writeToFile('Step 5: Creating order')
+ if not self._create_order(valid_domains):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to create order')
+ return False
+
+ # Handle challenges
+ logging.CyberCPLogFileWriter.writeToFile('Step 6: Handling challenges')
+ for auth_url in self.authorizations:
+ logging.CyberCPLogFileWriter.writeToFile(f'Processing authorization URL: {auth_url}')
+ if not self._get_nonce():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for authorization')
+ return False
+
+ # Get authorization details with GET request
+ headers = {
+ 'Content-Type': 'application/jose+json'
+ }
+ response = requests.get(auth_url, headers=headers)
+ logging.CyberCPLogFileWriter.writeToFile(f'Authorization response status: {response.status_code}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Authorization response: {response.text}')
+
+ if response.status_code != 200:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to get authorization')
+ return False
+
+ challenges = response.json()['challenges']
+ for challenge in challenges:
+ logging.CyberCPLogFileWriter.writeToFile(f'Processing challenge: {json.dumps(challenge)}')
+
+ # Only handle the challenge type we're using
+ if use_dns and challenge['type'] == 'dns-01':
+ if not self._handle_dns_challenge(challenge):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to handle DNS challenge')
+ return False
+ if not self._verify_challenge(challenge['url']):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to verify DNS challenge')
+ return False
+ if not self._wait_for_challenge_validation(challenge['url']):
+ logging.CyberCPLogFileWriter.writeToFile('DNS challenge validation failed')
+ return False
+ elif not use_dns and challenge['type'] == 'http-01':
+ if not self._handle_http_challenge(challenge):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to handle HTTP challenge')
+ return False
+ if not self._verify_challenge(challenge['url']):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to verify HTTP challenge')
+ return False
+ if not self._wait_for_challenge_validation(challenge['url']):
+ logging.CyberCPLogFileWriter.writeToFile('HTTP challenge validation failed')
+ return False
+ else:
+ logging.CyberCPLogFileWriter.writeToFile(f'Skipping {challenge["type"]} challenge')
+
+ # Generate CSR
+ logging.CyberCPLogFileWriter.writeToFile('Step 7: Generating CSR')
+ key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend()
+ )
+
+ # Get the domain from the order response
+ order_response = requests.get(self.order_url, headers=headers).json()
+ order_domains = [identifier['value'] for identifier in order_response['identifiers']]
+ logging.CyberCPLogFileWriter.writeToFile(f'Order domains: {order_domains}')
+
+ # Create CSR with exactly the domains from the order
+ csr = x509.CertificateSigningRequestBuilder().subject_name(
+ x509.Name([
+ x509.NameAttribute(x509.NameOID.COMMON_NAME, order_domains[0])
+ ])
+ ).add_extension(
+ x509.SubjectAlternativeName([
+ x509.DNSName(domain) for domain in order_domains
+ ]),
+ critical=False
+ ).sign(key, hashes.SHA256(), default_backend())
+
+ # Finalize order
+ logging.CyberCPLogFileWriter.writeToFile('Step 8: Finalizing order')
+ if not self._finalize_order(csr.public_bytes(serialization.Encoding.DER)):
+ logging.CyberCPLogFileWriter.writeToFile('Failed to finalize order')
+ return False
+
+ # Wait for order processing
+ logging.CyberCPLogFileWriter.writeToFile('Step 9: Waiting for order processing')
+ if not self._wait_for_order_processing():
+ logging.CyberCPLogFileWriter.writeToFile('Failed to process order')
+ return False
+
+ # Download certificate
+ logging.CyberCPLogFileWriter.writeToFile('Step 10: Downloading certificate')
+ certificate = self._download_certificate()
+ if not certificate:
+ logging.CyberCPLogFileWriter.writeToFile('Failed to download certificate')
+ return False
+
+ # Save certificate
+ logging.CyberCPLogFileWriter.writeToFile('Step 11: Saving certificate')
+ if not os.path.exists(self.cert_path):
+ logging.CyberCPLogFileWriter.writeToFile(f'Creating certificate directory: {self.cert_path}')
+ os.makedirs(self.cert_path)
+
+ cert_file = os.path.join(self.cert_path, 'fullchain.pem')
+ key_file = os.path.join(self.cert_path, 'privkey.pem')
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Saving certificate to: {cert_file}')
+ with open(cert_file, 'wb') as f:
+ f.write(certificate)
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Saving private key to: {key_file}')
+ with open(key_file, 'wb') as f:
+ f.write(key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption()
+ ))
+
+ logging.CyberCPLogFileWriter.writeToFile('Successfully completed certificate issuance')
+ return True
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error issuing certificate: {str(e)}')
+ return False
\ No newline at end of file
diff --git a/plogical/customAcme.py b/plogical/customAcme.py
index b09c76b5e..e769e8c7d 100644
--- a/plogical/customAcme.py
+++ b/plogical/customAcme.py
@@ -16,24 +16,31 @@ from plogical import CyberCPLogFileWriter as logging
from plogical.processUtilities import ProcessUtilities
import socket
-
class CustomACME:
- def __init__(self, domain, admin_email, staging=False):
+ def __init__(self, domain, admin_email, staging=False, provider='letsencrypt'):
"""Initialize CustomACME"""
- logging.CyberCPLogFileWriter.writeToFile(
- f'Initializing CustomACME for domain: {domain}, email: {admin_email}, staging: {staging}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Initializing CustomACME for domain: {domain}, email: {admin_email}, staging: {staging}, provider: {provider}')
self.domain = domain
self.admin_email = admin_email
self.staging = staging
-
- # Set the ACME directory URL based on staging flag
- if staging:
- self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
- logging.CyberCPLogFileWriter.writeToFile('Using staging ACME directory')
- else:
- self.acme_directory = "https://acme-v02.api.letsencrypt.org/directory"
- logging.CyberCPLogFileWriter.writeToFile('Using production ACME directory')
-
+ self.provider = provider
+
+ # Set the ACME directory URL based on provider and staging flag
+ if provider == 'zerossl':
+ if staging:
+ self.acme_directory = "https://acme-staging.zerossl.com/v2/DV90"
+ logging.CyberCPLogFileWriter.writeToFile('Using ZeroSSL staging ACME directory')
+ else:
+ self.acme_directory = "https://acme.zerossl.com/v2/DV90"
+ logging.CyberCPLogFileWriter.writeToFile('Using ZeroSSL production ACME directory')
+ else: # letsencrypt
+ if staging:
+ self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
+ logging.CyberCPLogFileWriter.writeToFile('Using Let\'s Encrypt staging ACME directory')
+ else:
+ self.acme_directory = "https://acme-v02.api.letsencrypt.org/directory"
+ logging.CyberCPLogFileWriter.writeToFile('Using Let\'s Encrypt production ACME directory')
+
self.account_key = None
self.account_url = None
self.directory = None
@@ -42,17 +49,16 @@ class CustomACME:
self.authorizations = []
self.finalize_url = None
self.certificate_url = None
-
+
# Initialize paths
self.cert_path = f'/etc/letsencrypt/live/{domain}'
self.challenge_path = '/usr/local/lsws/Example/html/.well-known/acme-challenge'
self.account_key_path = f'/etc/letsencrypt/accounts/{domain}.key'
- logging.CyberCPLogFileWriter.writeToFile(
- f'Certificate path: {self.cert_path}, Challenge path: {self.challenge_path}')
-
+ logging.CyberCPLogFileWriter.writeToFile(f'Certificate path: {self.cert_path}, Challenge path: {self.challenge_path}')
+
# Create accounts directory if it doesn't exist
os.makedirs('/etc/letsencrypt/accounts', exist_ok=True)
-
+
def _generate_account_key(self):
"""Generate RSA account key"""
try:
@@ -75,8 +81,7 @@ class CustomACME:
logging.CyberCPLogFileWriter.writeToFile(f'Fetching ACME directory from {self.acme_directory}')
response = requests.get(self.acme_directory)
self.directory = response.json()
- logging.CyberCPLogFileWriter.writeToFile(
- f'Successfully fetched ACME directory: {json.dumps(self.directory)}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully fetched ACME directory: {json.dumps(self.directory)}')
return True
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error getting directory: {str(e)}')
@@ -100,22 +105,22 @@ class CustomACME:
logging.CyberCPLogFileWriter.writeToFile(f'Creating JWS for URL: {url}')
if payload is not None:
logging.CyberCPLogFileWriter.writeToFile(f'Payload: {json.dumps(payload)}')
-
+
# Get a fresh nonce for this request
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get fresh nonce')
return None
-
+
# Get the private key numbers
logging.CyberCPLogFileWriter.writeToFile('Getting private key numbers...')
private_numbers = self.account_key.private_numbers()
public_numbers = private_numbers.public_numbers
-
+
# Convert numbers to bytes
logging.CyberCPLogFileWriter.writeToFile('Converting RSA numbers to bytes...')
n_bytes = public_numbers.n.to_bytes((public_numbers.n.bit_length() + 7) // 8, 'big')
e_bytes = public_numbers.e.to_bytes((public_numbers.e.bit_length() + 7) // 8, 'big')
-
+
# Create JWK
logging.CyberCPLogFileWriter.writeToFile('Creating JWK...')
jwk_key = {
@@ -125,14 +130,14 @@ class CustomACME:
"alg": "RS256"
}
logging.CyberCPLogFileWriter.writeToFile(f'Created JWK: {json.dumps(jwk_key)}')
-
+
# Create protected header
protected = {
"alg": "RS256",
"url": url,
"nonce": self.nonce
}
-
+
# Add either JWK or Key ID based on whether we have an account URL
if self.account_url and url != self.directory['newAccount']:
protected["kid"] = self.account_url
@@ -140,13 +145,13 @@ class CustomACME:
else:
protected["jwk"] = jwk_key
logging.CyberCPLogFileWriter.writeToFile('Using JWK for new account')
-
+
# Encode protected header
logging.CyberCPLogFileWriter.writeToFile('Encoding protected header...')
protected_b64 = base64.urlsafe_b64encode(
json.dumps(protected).encode('utf-8')
).decode('utf-8').rstrip('=')
-
+
# For POST-as-GET requests, payload_b64 should be empty string
if payload is None:
payload_b64 = ""
@@ -157,11 +162,11 @@ class CustomACME:
payload_b64 = base64.urlsafe_b64encode(
json.dumps(payload).encode('utf-8')
).decode('utf-8').rstrip('=')
-
+
# Create signature input
logging.CyberCPLogFileWriter.writeToFile('Creating signature input...')
signature_input = f"{protected_b64}.{payload_b64}".encode('utf-8')
-
+
# Sign the input
logging.CyberCPLogFileWriter.writeToFile('Signing input...')
signature = self.account_key.sign(
@@ -169,26 +174,26 @@ class CustomACME:
padding.PKCS1v15(),
hashes.SHA256()
)
-
+
# Encode signature
logging.CyberCPLogFileWriter.writeToFile('Encoding signature...')
signature_b64 = base64.urlsafe_b64encode(signature).decode('utf-8').rstrip('=')
-
+
# Create final JWS
logging.CyberCPLogFileWriter.writeToFile('Creating final JWS...')
jws = {
"protected": protected_b64,
"signature": signature_b64
}
-
+
# Only add payload if it exists
if payload is not None:
jws["payload"] = payload_b64
-
+
# Ensure the JWS is properly formatted
jws_str = json.dumps(jws, separators=(',', ':'))
logging.CyberCPLogFileWriter.writeToFile(f'Final JWS: {jws_str}')
-
+
return jws_str
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error creating JWS: {str(e)}')
@@ -238,12 +243,12 @@ class CustomACME:
"termsOfServiceAgreed": True,
"contact": [f"mailto:{self.admin_email}"]
}
-
+
jws = self._create_jws(payload, self.directory['newAccount'])
if not jws:
logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for account creation')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Sending account creation request...')
headers = {
'Content-Type': 'application/jose+json'
@@ -251,17 +256,15 @@ class CustomACME:
response = requests.post(self.directory['newAccount'], data=jws, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Account creation response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Account creation response: {response.text}')
-
+
if response.status_code == 201:
self.account_url = response.headers['Location']
- logging.CyberCPLogFileWriter.writeToFile(
- f'Successfully created account. Account URL: {self.account_url}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully created account. Account URL: {self.account_url}')
# Save the account key for future use
self._save_account_key()
return True
elif response.status_code == 429:
- logging.CyberCPLogFileWriter.writeToFile(
- 'Rate limit hit for account creation. Using staging environment...')
+ logging.CyberCPLogFileWriter.writeToFile('Rate limit hit for account creation. Using staging environment...')
self.staging = True
self.acme_directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
# Get new directory and nonce for staging
@@ -289,12 +292,12 @@ class CustomACME:
payload = {
"identifiers": identifiers
}
-
+
jws = self._create_jws(payload, self.directory['newOrder'])
if not jws:
logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for order creation')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Sending order creation request...')
headers = {
'Content-Type': 'application/jose+json'
@@ -302,7 +305,7 @@ class CustomACME:
response = requests.post(self.directory['newOrder'], data=jws, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Order creation response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Order creation response: {response.text}')
-
+
if response.status_code == 201:
self.order_url = response.headers['Location']
self.authorizations = response.json()['authorizations']
@@ -320,26 +323,26 @@ class CustomACME:
"""Handle HTTP-01 challenge"""
try:
logging.CyberCPLogFileWriter.writeToFile(f'Handling HTTP challenge: {json.dumps(challenge)}')
-
+
# Get key authorization
key_auth = self._get_key_authorization(challenge)
if not key_auth:
logging.CyberCPLogFileWriter.writeToFile('Failed to get key authorization')
return False
-
+
# Create challenge directory if it doesn't exist
if not os.path.exists(self.challenge_path):
logging.CyberCPLogFileWriter.writeToFile(f'Creating challenge directory: {self.challenge_path}')
os.makedirs(self.challenge_path)
-
+
# Write challenge file
challenge_file = os.path.join(self.challenge_path, challenge['token'])
logging.CyberCPLogFileWriter.writeToFile(f'Writing challenge file: {challenge_file}')
-
+
# Write only the key authorization to the file
with open(challenge_file, 'w') as f:
f.write(key_auth)
-
+
logging.CyberCPLogFileWriter.writeToFile('Successfully handled HTTP challenge')
return True
except Exception as e:
@@ -361,22 +364,22 @@ class CustomACME:
"""Get key authorization for challenge"""
try:
logging.CyberCPLogFileWriter.writeToFile('Getting key authorization...')
-
+
# Get the private key numbers
private_numbers = self.account_key.private_numbers()
public_numbers = private_numbers.public_numbers
-
+
# Convert numbers to bytes
n_bytes = public_numbers.n.to_bytes((public_numbers.n.bit_length() + 7) // 8, 'big')
e_bytes = public_numbers.e.to_bytes((public_numbers.e.bit_length() + 7) // 8, 'big')
-
+
# Create JWK without alg field
jwk_key = {
"kty": "RSA",
"n": base64.urlsafe_b64encode(n_bytes).decode('utf-8').rstrip('='),
"e": base64.urlsafe_b64encode(e_bytes).decode('utf-8').rstrip('=')
}
-
+
# Calculate the JWK thumbprint according to RFC 7638
# The thumbprint is a hash of the JWK (JSON Web Key) in a specific format
# First, we create a dictionary with the required JWK parameters
@@ -385,23 +388,23 @@ class CustomACME:
"kty": "RSA", # Key type
"n": base64.urlsafe_b64encode(public_numbers.n.to_bytes(256, 'big')).decode('utf-8').rstrip('=')
}
-
+
# Sort the JWK parameters alphabetically by key name
# This ensures consistent thumbprint calculation regardless of parameter order
sorted_jwk = json.dumps(jwk, sort_keys=True, separators=(',', ':'))
-
+
# Calculate the SHA-256 hash of the sorted JWK
# Example of what sorted_jwk might look like:
# {"e":"AQAB","kty":"RSA","n":"tVKUtcx_n9rt5afY_2WFNVAu9fjD4xqX4Xm3dJz3XYb"}
# The thumbprint will be a 32-byte SHA-256 hash of this string
# For example, it might look like: b'x\x9c\x1d\x8f\x8b\x1b\x1e\x8b\x1b\x1e\x8b\x1b\x1e\x8b\x1b\x1e'
thumbprint = hashlib.sha256(sorted_jwk.encode('utf-8')).digest()
-
+
# Encode the thumbprint in base64url format (RFC 4648)
# This removes padding characters (=) and replaces + and / with - and _
# Example final thumbprint: "xJ0dj8sbHosbHosbHosbHos"
thumbprint = base64.urlsafe_b64encode(thumbprint).decode('utf-8').rstrip('=')
-
+
# Combine token and key authorization
key_auth = f"{challenge['token']}.{thumbprint}"
logging.CyberCPLogFileWriter.writeToFile(f'Key authorization: {key_auth}')
@@ -412,31 +415,31 @@ class CustomACME:
def _verify_challenge(self, challenge_url):
"""Verify challenge completion with the ACME server
-
+
This function sends a POST request to the ACME server to verify that the challenge
has been completed successfully. The challenge URL is provided by the ACME server
when the challenge is created.
-
+
Example challenge_url:
"https://acme-v02.api.letsencrypt.org/acme/challenge/example.com/123456"
-
+
The verification process:
1. Creates an empty payload (POST-as-GET request)
2. Creates a JWS (JSON Web Signature) with the payload
3. Sends the request to the ACME server
4. Checks the response status
-
+
Returns:
bool: True if challenge is verified successfully, False otherwise
"""
try:
logging.CyberCPLogFileWriter.writeToFile(f'Verifying challenge at URL: {challenge_url}')
-
+
# Create empty payload for POST-as-GET request
# This is a special type of request where we want to GET a resource
# but need to include a signature, so we use POST with an empty payload
payload = {}
-
+
# Create JWS (JSON Web Signature) for the request
# Example JWS might look like:
# {
@@ -448,15 +451,15 @@ class CustomACME:
if not jws:
logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for challenge verification')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Sending challenge verification request...')
-
+
# Set headers for the request
# Content-Type: application/jose+json indicates we're sending a JWS
headers = {
'Content-Type': 'application/jose+json'
}
-
+
# Send the verification request to the ACME server
# Example response might look like:
# {
@@ -468,7 +471,7 @@ class CustomACME:
response = requests.post(challenge_url, data=jws, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Challenge verification response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Challenge verification response: {response.text}')
-
+
# Check if the challenge was verified successfully
# Status code 200 indicates success
# The response will contain the challenge status and validation time
@@ -487,12 +490,12 @@ class CustomACME:
payload = {
"csr": base64.urlsafe_b64encode(csr).decode('utf-8').rstrip('=')
}
-
+
jws = self._create_jws(payload, self.finalize_url)
if not jws:
logging.CyberCPLogFileWriter.writeToFile('Failed to create JWS for order finalization')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Sending order finalization request...')
headers = {
'Content-Type': 'application/jose+json'
@@ -500,7 +503,7 @@ class CustomACME:
response = requests.post(self.finalize_url, data=jws, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Order finalization response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Order finalization response: {response.text}')
-
+
if response.status_code == 200:
# Wait for order to be processed
max_attempts = 30
@@ -509,30 +512,27 @@ class CustomACME:
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for order status check')
return False
-
+
response = requests.get(self.order_url, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Order status check response: {response.text}')
-
+
if response.status_code == 200:
order_status = response.json().get('status')
if order_status == 'valid':
self.certificate_url = response.json().get('certificate')
- logging.CyberCPLogFileWriter.writeToFile(
- f'Successfully finalized order. Certificate URL: {self.certificate_url}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully finalized order. Certificate URL: {self.certificate_url}')
return True
elif order_status == 'invalid':
logging.CyberCPLogFileWriter.writeToFile('Order validation failed')
return False
elif order_status == 'processing':
- logging.CyberCPLogFileWriter.writeToFile(
- f'Order still processing, attempt {attempt + 1}/{max_attempts}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Order still processing, attempt {attempt + 1}/{max_attempts}')
time.sleep(delay)
continue
-
- logging.CyberCPLogFileWriter.writeToFile(
- f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
time.sleep(delay)
-
+
logging.CyberCPLogFileWriter.writeToFile('Order processing timed out')
return False
return False
@@ -545,13 +545,13 @@ class CustomACME:
try:
logging.CyberCPLogFileWriter.writeToFile('Downloading certificate...')
logging.CyberCPLogFileWriter.writeToFile(f'Certificate URL: {self.certificate_url}')
-
+
# For certificate downloads, we can use a simple GET request
response = requests.get(self.certificate_url)
logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response headers: {response.headers}')
logging.CyberCPLogFileWriter.writeToFile(f'Certificate download response content: {response.text}')
-
+
if response.status_code == 200:
logging.CyberCPLogFileWriter.writeToFile('Successfully downloaded certificate')
return response.content
@@ -568,13 +568,13 @@ class CustomACME:
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for challenge status check')
return False
-
+
headers = {
'Content-Type': 'application/jose+json'
}
response = requests.get(challenge_url, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Challenge status check response: {response.text}')
-
+
if response.status_code == 200:
challenge_status = response.json().get('status')
if challenge_status == 'valid':
@@ -583,11 +583,10 @@ class CustomACME:
elif challenge_status == 'invalid':
logging.CyberCPLogFileWriter.writeToFile('Challenge validation failed')
return False
-
- logging.CyberCPLogFileWriter.writeToFile(
- f'Challenge still pending, attempt {attempt + 1}/{max_attempts}')
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Challenge still pending, attempt {attempt + 1}/{max_attempts}')
time.sleep(delay)
-
+
logging.CyberCPLogFileWriter.writeToFile('Challenge validation timed out')
return False
except Exception as e:
@@ -596,40 +595,40 @@ class CustomACME:
def _check_dns_record(self, domain):
"""Check if a domain has valid DNS records
-
+
This function performs multiple DNS checks to ensure the domain has valid DNS records.
It includes:
1. A record (IPv4) check
2. AAAA record (IPv6) check
3. DNS caching prevention
4. Multiple DNS server checks
-
+
Args:
domain (str): The domain to check
-
+
Returns:
bool: True if valid DNS records are found, False otherwise
"""
try:
logging.CyberCPLogFileWriter.writeToFile(f'Checking DNS records for domain: {domain}')
-
+
# List of public DNS servers to check against
dns_servers = [
- '8.8.8.8', # Google DNS
- '1.1.1.1', # Cloudflare DNS
- '208.67.222.222' # OpenDNS
+ '8.8.8.8', # Google DNS
+ '1.1.1.1', # Cloudflare DNS
+ '208.67.222.222' # OpenDNS
]
-
+
# Function to check DNS record with specific DNS server
def check_with_dns_server(server, record_type='A'):
try:
# Create a new socket for each check
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5) # 5 second timeout
-
+
# Set the DNS server
sock.connect((server, 53))
-
+
# Create DNS query
query = bytearray()
# DNS header
@@ -639,54 +638,54 @@ class CustomACME:
query += b'\x00\x00' # Answer RRs: 0
query += b'\x00\x00' # Authority RRs: 0
query += b'\x00\x00' # Additional RRs: 0
-
+
# Domain name
for part in domain.split('.'):
query.append(len(part))
query.extend(part.encode())
query += b'\x00' # End of domain name
-
+
# Query type and class
if record_type == 'A':
query += b'\x00\x01' # Type: A
else: # AAAA
query += b'\x00\x1c' # Type: AAAA
query += b'\x00\x01' # Class: IN
-
+
# Send query
sock.send(query)
-
+
# Receive response
response = sock.recv(1024)
-
+
# Check if we got a valid response
if len(response) > 12: # Minimum DNS response size
# Check if there are answers in the response
answer_count = int.from_bytes(response[6:8], 'big')
if answer_count > 0:
return True
-
+
return False
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error checking DNS with server {server}: {str(e)}')
return False
finally:
sock.close()
-
+
# Check A records (IPv4) with multiple DNS servers
a_record_found = False
for server in dns_servers:
if check_with_dns_server(server, 'A'):
a_record_found = True
break
-
+
# Check AAAA records (IPv6) with multiple DNS servers
aaaa_record_found = False
for server in dns_servers:
if check_with_dns_server(server, 'AAAA'):
aaaa_record_found = True
break
-
+
# Also check with system's DNS resolver as a fallback
try:
# Try to resolve A record (IPv4)
@@ -694,23 +693,23 @@ class CustomACME:
a_record_found = True
except socket.gaierror:
pass
-
+
try:
# Try to resolve AAAA record (IPv6)
socket.getaddrinfo(domain, None, socket.AF_INET6)
aaaa_record_found = True
except socket.gaierror:
pass
-
+
# Log the results
if a_record_found:
logging.CyberCPLogFileWriter.writeToFile(f'IPv4 DNS record found for domain: {domain}')
if aaaa_record_found:
logging.CyberCPLogFileWriter.writeToFile(f'IPv6 DNS record found for domain: {domain}')
-
+
# Return True if either A or AAAA record is found
return a_record_found or aaaa_record_found
-
+
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error checking DNS records: {str(e)}')
return False
@@ -723,13 +722,13 @@ class CustomACME:
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for order status check')
return False
-
+
headers = {
'Content-Type': 'application/jose+json'
}
response = requests.get(self.order_url, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Order status check response: {response.text}')
-
+
if response.status_code == 200:
order_status = response.json().get('status')
if order_status == 'valid':
@@ -740,15 +739,13 @@ class CustomACME:
logging.CyberCPLogFileWriter.writeToFile('Order validation failed')
return False
elif order_status == 'processing':
- logging.CyberCPLogFileWriter.writeToFile(
- f'Order still processing, attempt {attempt + 1}/{max_attempts}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Order still processing, attempt {attempt + 1}/{max_attempts}')
time.sleep(delay)
continue
-
- logging.CyberCPLogFileWriter.writeToFile(
- f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
+
+ logging.CyberCPLogFileWriter.writeToFile(f'Order status check failed, attempt {attempt + 1}/{max_attempts}')
time.sleep(delay)
-
+
logging.CyberCPLogFileWriter.writeToFile('Order processing timed out')
return False
except Exception as e:
@@ -758,15 +755,14 @@ class CustomACME:
def issue_certificate(self, domains, use_dns=False):
"""Main method to issue certificate"""
try:
- logging.CyberCPLogFileWriter.writeToFile(
- f'Starting certificate issuance for domains: {domains}, use_dns: {use_dns}')
-
+ logging.CyberCPLogFileWriter.writeToFile(f'Starting certificate issuance for domains: {domains}, use_dns: {use_dns}')
+
# Try to load existing account key first
if self._load_account_key():
logging.CyberCPLogFileWriter.writeToFile('Using existing account key')
else:
logging.CyberCPLogFileWriter.writeToFile('No existing account key found, will create new one')
-
+
# Filter domains to only include those with valid DNS records
valid_domains = []
for domain in domains:
@@ -774,27 +770,27 @@ class CustomACME:
valid_domains.append(domain)
else:
logging.CyberCPLogFileWriter.writeToFile(f'Skipping domain {domain} due to missing DNS records')
-
+
if not valid_domains:
logging.CyberCPLogFileWriter.writeToFile('No valid domains found with DNS records')
return False
-
+
# Initialize ACME
logging.CyberCPLogFileWriter.writeToFile('Step 1: Generating account key')
if not self._generate_account_key():
logging.CyberCPLogFileWriter.writeToFile('Failed to generate account key')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Step 2: Getting ACME directory')
if not self._get_directory():
logging.CyberCPLogFileWriter.writeToFile('Failed to get ACME directory')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Step 3: Getting nonce')
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce')
return False
-
+
logging.CyberCPLogFileWriter.writeToFile('Step 4: Creating account')
if not self._create_account():
logging.CyberCPLogFileWriter.writeToFile('Failed to create account')
@@ -811,13 +807,13 @@ class CustomACME:
return False
else:
return False
-
+
# Create order with only valid domains
logging.CyberCPLogFileWriter.writeToFile('Step 5: Creating order')
if not self._create_order(valid_domains):
logging.CyberCPLogFileWriter.writeToFile('Failed to create order')
return False
-
+
# Handle challenges
logging.CyberCPLogFileWriter.writeToFile('Step 6: Handling challenges')
for auth_url in self.authorizations:
@@ -825,7 +821,7 @@ class CustomACME:
if not self._get_nonce():
logging.CyberCPLogFileWriter.writeToFile('Failed to get nonce for authorization')
return False
-
+
# Get authorization details with GET request
headers = {
'Content-Type': 'application/jose+json'
@@ -833,15 +829,15 @@ class CustomACME:
response = requests.get(auth_url, headers=headers)
logging.CyberCPLogFileWriter.writeToFile(f'Authorization response status: {response.status_code}')
logging.CyberCPLogFileWriter.writeToFile(f'Authorization response: {response.text}')
-
+
if response.status_code != 200:
logging.CyberCPLogFileWriter.writeToFile('Failed to get authorization')
return False
-
+
challenges = response.json()['challenges']
for challenge in challenges:
logging.CyberCPLogFileWriter.writeToFile(f'Processing challenge: {json.dumps(challenge)}')
-
+
# Only handle the challenge type we're using
if use_dns and challenge['type'] == 'dns-01':
if not self._handle_dns_challenge(challenge):
@@ -865,7 +861,7 @@ class CustomACME:
return False
else:
logging.CyberCPLogFileWriter.writeToFile(f'Skipping {challenge["type"]} challenge')
-
+
# Generate CSR
logging.CyberCPLogFileWriter.writeToFile('Step 7: Generating CSR')
key = rsa.generate_private_key(
@@ -873,12 +869,12 @@ class CustomACME:
key_size=2048,
backend=default_backend()
)
-
+
# Get the domain from the order response
order_response = requests.get(self.order_url, headers=headers).json()
order_domains = [identifier['value'] for identifier in order_response['identifiers']]
logging.CyberCPLogFileWriter.writeToFile(f'Order domains: {order_domains}')
-
+
# Create CSR with exactly the domains from the order
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([
@@ -890,39 +886,39 @@ class CustomACME:
]),
critical=False
).sign(key, hashes.SHA256(), default_backend())
-
+
# Finalize order
logging.CyberCPLogFileWriter.writeToFile('Step 8: Finalizing order')
if not self._finalize_order(csr.public_bytes(serialization.Encoding.DER)):
logging.CyberCPLogFileWriter.writeToFile('Failed to finalize order')
return False
-
+
# Wait for order processing
logging.CyberCPLogFileWriter.writeToFile('Step 9: Waiting for order processing')
if not self._wait_for_order_processing():
logging.CyberCPLogFileWriter.writeToFile('Failed to process order')
return False
-
+
# Download certificate
logging.CyberCPLogFileWriter.writeToFile('Step 10: Downloading certificate')
certificate = self._download_certificate()
if not certificate:
logging.CyberCPLogFileWriter.writeToFile('Failed to download certificate')
return False
-
+
# Save certificate
logging.CyberCPLogFileWriter.writeToFile('Step 11: Saving certificate')
if not os.path.exists(self.cert_path):
logging.CyberCPLogFileWriter.writeToFile(f'Creating certificate directory: {self.cert_path}')
os.makedirs(self.cert_path)
-
+
cert_file = os.path.join(self.cert_path, 'fullchain.pem')
key_file = os.path.join(self.cert_path, 'privkey.pem')
-
+
logging.CyberCPLogFileWriter.writeToFile(f'Saving certificate to: {cert_file}')
with open(cert_file, 'wb') as f:
f.write(certificate)
-
+
logging.CyberCPLogFileWriter.writeToFile(f'Saving private key to: {key_file}')
with open(key_file, 'wb') as f:
f.write(key.private_bytes(
@@ -930,9 +926,9 @@ class CustomACME:
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
))
-
+
logging.CyberCPLogFileWriter.writeToFile('Successfully completed certificate issuance')
return True
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error issuing certificate: {str(e)}')
- return False
\ No newline at end of file
+ return False
\ No newline at end of file
diff --git a/plogical/processUtilities.py b/plogical/processUtilities.py
index 52a0c65ea..46cd51473 100755
--- a/plogical/processUtilities.py
+++ b/plogical/processUtilities.py
@@ -169,27 +169,31 @@ class ProcessUtilities(multi.Thread):
distroPath = '/etc/lsb-release'
distroPathAlma = '/etc/redhat-release'
- if os.path.exists(distroPath):
-
- ## this is check only
- if open(distroPath, 'r').read().find('22.04') > -1:
- ProcessUtilities.ubuntu22Check = 1
+ # First check if we're on Ubuntu
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release', 'r') as f:
+ content = f.read()
+ if 'Ubuntu' in content:
+ if '22.04' in content:
+ ProcessUtilities.ubuntu22Check = 1
+ return ProcessUtilities.ubuntu20
+ elif '20.04' in content:
+ return ProcessUtilities.ubuntu20
+ return ProcessUtilities.ubuntu
- if open(distroPath, 'r').read().find('20.04') > -1 or open(distroPath, 'r').read().find('22.04'):
- return ProcessUtilities.ubuntu20
- return ProcessUtilities.ubuntu
- else:
- if open('/etc/redhat-release', 'r').read().find('CentOS Linux release 8') > -1 or open('/etc/redhat-release', 'r').read().find('AlmaLinux release 8') > -1 \
- or open('/etc/redhat-release', 'r').read().find('Rocky Linux release 8') > -1 \
- or open('/etc/redhat-release', 'r').read().find('Rocky Linux release 9') > -1 or open('/etc/redhat-release', 'r').read().find('AlmaLinux release 9') > -1 or \
- open('/etc/redhat-release', 'r').read().find('CloudLinux release 9') > -1 or open('/etc/redhat-release', 'r').read().find('CloudLinux release 8') > -1:
- ## this is check only
- if open(distroPathAlma, 'r').read().find('AlmaLinux release 9') > -1 or open(distroPathAlma, 'r').read().find('Rocky Linux release 9') > -1:
- ProcessUtilities.alma9check = 1
-
- return ProcessUtilities.cent8
- return ProcessUtilities.centos
+ # Check for RedHat-based distributions
+ if os.path.exists(distroPathAlma):
+ with open(distroPathAlma, 'r') as f:
+ content = f.read()
+ if any(x in content for x in ['CentOS Linux release 8', 'AlmaLinux release 8', 'Rocky Linux release 8',
+ 'Rocky Linux release 9', 'AlmaLinux release 9', 'CloudLinux release 9',
+ 'CloudLinux release 8']):
+ if any(x in content for x in ['AlmaLinux release 9', 'Rocky Linux release 9']):
+ ProcessUtilities.alma9check = 1
+ return ProcessUtilities.cent8
+ # Default to Ubuntu if no other distribution is detected
+ return ProcessUtilities.ubuntu
@staticmethod
def containerCheck():
diff --git a/plogical/renew.py b/plogical/renew.py
index 4f2193652..251355e32 100644
--- a/plogical/renew.py
+++ b/plogical/renew.py
@@ -3,150 +3,113 @@ import os
import os.path
import sys
import django
+from typing import Union, Optional
+from datetime import datetime, timedelta
+import time
sys.path.append('/usr/local/CyberCP')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings")
django.setup()
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
from websiteFunctions.models import Websites, ChildDomains
-from os import path
-from datetime import datetime
import OpenSSL
from plogical.virtualHostUtilities import virtualHostUtilities
+from plogical.processUtilities import ProcessUtilities
class Renew:
+ def _check_and_renew_ssl(self, domain: str, path: str, admin_email: str, is_child: bool = False) -> None:
+ """Helper method to check and renew SSL for a domain."""
+ try:
+ logging.writeToFile(f'Checking SSL for {domain}.', 0)
+ file_path = f'/etc/letsencrypt/live/{domain}/fullchain.pem'
+
+ if not os.path.exists(file_path):
+ logging.writeToFile(f'SSL does not exist for {domain}. Obtaining now..', 0)
+ virtualHostUtilities.issueSSL(domain, path, admin_email)
+ return
+
+ logging.writeToFile(f'SSL exists for {domain}. Checking if SSL will expire in 15 days..', 0)
+
+ with open(file_path, 'r') as cert_file:
+ x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_file.read())
+
+ expire_data = x509.get_notAfter().decode('ascii')
+ final_date = datetime.strptime(expire_data, '%Y%m%d%H%M%SZ')
+ now = datetime.now()
+ diff = final_date - now
+
+ ssl_provider = x509.get_issuer().get_components()[1][1].decode('utf-8')
+ logging.writeToFile(f'Provider: {ssl_provider}, Days until expiration: {diff.days}', 0)
+
+ if diff.days >= 15 and ssl_provider != 'Denial':
+ logging.writeToFile(f'SSL exists for {domain} and is not ready to renew, skipping..', 0)
+ return
+
+ if ssl_provider == 'Denial' or ssl_provider == "Let's Encrypt":
+ logging.writeToFile(f'SSL exists for {domain} and ready to renew..', 0)
+ logging.writeToFile(f'Renewing SSL for {domain}..', 0)
+ virtualHostUtilities.issueSSL(domain, path, admin_email)
+ elif ssl_provider != "Let's Encrypt":
+ logging.writeToFile(f'Custom SSL exists for {domain} and ready to renew..', 1)
+
+ except OpenSSL.crypto.Error as e:
+ logging.writeToFile(f'OpenSSL error for {domain}: {str(e)}', 1)
+ except Exception as e:
+ logging.writeToFile(f'Error processing SSL for {domain}: {str(e)}', 1)
+
+ def _restart_services(self) -> None:
+ """Helper method to restart required services."""
+ try:
+ logging.writeToFile('Restarting mail services for them to see new SSL.', 0)
+
+ commands = [
+ 'postmap -F hash:/etc/postfix/vmail_ssl.map',
+ 'systemctl restart postfix',
+ 'systemctl restart dovecot',
+ 'systemctl restart lscpd'
+ ]
+
+ for cmd in commands:
+ ProcessUtilities.normalExecutioner(cmd)
+ # Add a small delay between restarts
+ time.sleep(2)
+
+ except Exception as e:
+ logging.writeToFile(f'Error restarting services: {str(e)}', 1)
+
def SSLObtainer(self):
try:
logging.writeToFile('Running SSL Renew Utility')
- ## For Non-suspended websites only
-
+ # Process main domains
for website in Websites.objects.filter(state=1):
- logging.writeToFile('Checking SSL for %s.' % (website.domain), 0)
- filePath = '/etc/letsencrypt/live/%s/fullchain.pem' % (website.domain)
+ self._check_and_renew_ssl(
+ website.domain,
+ f'/home/{website.domain}/public_html',
+ website.adminEmail
+ )
- if path.exists(filePath):
- logging.writeToFile('SSL exists for %s. Checking if SSL will expire in 15 days..' % (website.domain), 0)
- x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
- open(filePath, 'r').read())
- expireData = x509.get_notAfter().decode('ascii')
- finalDate = datetime.strptime(expireData, '%Y%m%d%H%M%SZ')
- now = datetime.now()
- diff = finalDate - now
+ # Process child domains
+ for child in ChildDomains.objects.all():
+ self._check_and_renew_ssl(
+ child.domain,
+ child.path,
+ child.master.adminEmail,
+ is_child=True
+ )
- SSLProvider = x509.get_issuer().get_components()[1][1].decode('utf-8')
+ self._restart_services()
- print(f"Provider: {x509.get_issuer().get_components()[1][1].decode('utf-8')}, Days : {diff.days}")
-
- if int(diff.days) >= 15 and SSLProvider!='Denial':
- logging.writeToFile(
- 'SSL exists for %s and is not ready to renew, skipping..' % (website.domain), 0)
- print(
- f'SSL exists for %s and is not ready to renew, skipping..' % (website.domain))
- elif SSLProvider == 'Denial':
- logging.writeToFile(
- 'SSL exists for %s and ready to renew..' % (website.domain), 0)
- logging.writeToFile(
- 'Renewing SSL for %s..' % (website.domain), 0)
-
- print(
- f'SSL exists for %s and ready to renew..' % (website.domain))
-
- virtualHostUtilities.issueSSL(website.domain, '/home/%s/public_html' % (website.domain),
- website.adminEmail)
- elif SSLProvider != "Let's Encrypt":
- logging.writeToFile(
- 'Custom SSL exists for %s and ready to renew..' % (website.domain), 1)
- print(
- 'Custom SSL exists for %s and ready to renew..' % (website.domain))
- else:
- logging.writeToFile(
- 'SSL exists for %s and ready to renew..' % (website.domain), 0)
- logging.writeToFile(
- 'Renewing SSL for %s..' % (website.domain), 0)
-
- print(
- 'SSL exists for %s and ready to renew..' % (website.domain))
-
-
- virtualHostUtilities.issueSSL(website.domain, '/home/%s/public_html' % (website.domain), website.adminEmail)
- else:
- logging.writeToFile(
- 'SSL does not exist for %s. Obtaining now..' % (website.domain), 0)
- virtualHostUtilities.issueSSL(website.domain, '/home/%s/public_html' % (website.domain),
- website.adminEmail)
-
- ## For child-domains
-
- for website in ChildDomains.objects.all():
- logging.writeToFile('Checking SSL for %s.' % (website.domain), 0)
- filePath = '/etc/letsencrypt/live/%s/fullchain.pem' % (website.domain)
-
- if path.exists(filePath):
- logging.writeToFile(
- 'SSL exists for %s. Checking if SSL will expire in 15 days..' % (website.domain), 0)
- x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
- open(filePath, 'r').read())
- expireData = x509.get_notAfter().decode('ascii')
- finalDate = datetime.strptime(expireData, '%Y%m%d%H%M%SZ')
- now = datetime.now()
- diff = finalDate - now
-
- SSLProvider = x509.get_issuer().get_components()[1][1]
-
- print(f"Provider: {x509.get_issuer().get_components()[1][1].decode('utf-8')}, Days : {diff.days}")
-
- if int(diff.days) >= 15 and SSLProvider != 'Denial':
- logging.writeToFile(
- 'SSL exists for %s and is not ready to renew, skipping..' % (website.domain), 0)
- elif SSLProvider == 'Denial':
- logging.writeToFile(
- 'SSL exists for %s and ready to renew..' % (website.domain), 0)
- logging.writeToFile(
- 'Renewing SSL for %s..' % (website.domain), 0)
-
- virtualHostUtilities.issueSSL(website.domain, website.path,
- website.master.adminEmail)
- elif SSLProvider != "Let's Encrypt":
- logging.writeToFile(
- 'Custom SSL exists for %s and ready to renew..' % (website.domain), 1)
- else:
- logging.writeToFile(
- 'SSL exists for %s and ready to renew..' % (website.domain), 0)
- logging.writeToFile(
- 'Renewing SSL for %s..' % (website.domain), 0)
-
- virtualHostUtilities.issueSSL(website.domain, website.path,
- website.master.adminEmail)
- else:
- logging.writeToFile(
- 'SSL does not exist for %s. Obtaining now..' % (website.domain), 0)
- virtualHostUtilities.issueSSL(website.domain, website.path,
- website.master.adminEmail)
-
- self.file = logging.writeToFile('Restarting mail services for them to see new SSL.', 0)
-
- from plogical.processUtilities import ProcessUtilities
- command = 'postmap -F hash:/etc/postfix/vmail_ssl.map'
- ProcessUtilities.normalExecutioner(command)
-
- command = 'systemctl restart postfix'
- ProcessUtilities.normalExecutioner(command)
-
- command = 'systemctl restart dovecot'
- ProcessUtilities.normalExecutioner(command)
-
- command = 'systemctl restart lscpd'
- ProcessUtilities.normalExecutioner(command)
-
- except BaseException as msg:
- logging.writeToFile(str(msg) + '. Renew.SSLObtainer')
+ except Exception as e:
+ logging.writeToFile(f'Error in SSLObtainer: {str(e)}', 1)
@staticmethod
def FixMailSSL():
- for website in Websites.objects.all():
- virtualHostUtilities.setupAutoDiscover(1, '/home/cyberpanel/templogs', website.domain, website.admin)
-
+ try:
+ for website in Websites.objects.all():
+ virtualHostUtilities.setupAutoDiscover(1, '/home/cyberpanel/templogs', website.domain, website.admin)
+ except Exception as e:
+ logging.writeToFile(f'Error in FixMailSSL: {str(e)}', 1)
if __name__ == "__main__":
sslOB = Renew()
diff --git a/plogical/sslUtilities.py b/plogical/sslUtilities.py
index a59441b1d..11f616202 100755
--- a/plogical/sslUtilities.py
+++ b/plogical/sslUtilities.py
@@ -173,14 +173,14 @@ class sslUtilities:
@staticmethod
def PatchVhostConf(virtualHostName):
"""Patch the virtual host configuration to add ACME challenge support
-
+
This function adds the necessary configuration to handle ACME challenges
for both OpenLiteSpeed (OLS) and Apache configurations. It also checks
for potential configuration conflicts before making changes.
-
+
Args:
virtualHostName (str): The domain name to configure
-
+
Returns:
tuple: (status, message) where status is 1 for success, 0 for failure
"""
@@ -188,12 +188,12 @@ class sslUtilities:
# Construct paths
confPath = os.path.join(sslUtilities.Server_root, "conf", "vhosts", virtualHostName)
completePathToConfigFile = os.path.join(confPath, "vhost.conf")
-
+
# Check if file exists
if not os.path.exists(completePathToConfigFile):
logging.CyberCPLogFileWriter.writeToFile(f'Configuration file not found: {completePathToConfigFile}')
return 0, f'Configuration file not found: {completePathToConfigFile}'
-
+
# Read current configuration
try:
with open(completePathToConfigFile, 'r') as f:
@@ -201,42 +201,41 @@ class sslUtilities:
except IOError as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error reading configuration file: {str(e)}')
return 0, f'Error reading configuration file: {str(e)}'
-
+
# Check for potential conflicts
conflicts = []
-
+
# Check if ACME challenge is already configured
if DataVhost.find('/.well-known/acme-challenge') != -1:
logging.CyberCPLogFileWriter.writeToFile(f'ACME challenge already configured for {virtualHostName}')
return 1, 'ACME challenge already configured'
-
+
# Check for conflicting rewrite rules
if DataVhost.find('rewrite') != -1 and DataVhost.find('enable 1') != -1:
conflicts.append('Active rewrite rules found that might interfere with ACME challenges')
-
+
# Check for conflicting location blocks
if DataVhost.find('location /.well-known') != -1:
conflicts.append('Existing location block for /.well-known found')
-
+
# Check for conflicting aliases
if DataVhost.find('Alias /.well-known') != -1:
conflicts.append('Existing alias for /.well-known found')
-
+
# Check for conflicting context blocks
if DataVhost.find('context /.well-known') != -1:
conflicts.append('Existing context block for /.well-known found')
-
+
# Check for conflicting access controls
if DataVhost.find('deny from all') != -1 and DataVhost.find('location') != -1:
conflicts.append('Global deny rules found that might block ACME challenges')
-
+
# If conflicts found, log them and return
if conflicts:
conflict_message = 'Configuration conflicts found: ' + '; '.join(conflicts)
- logging.CyberCPLogFileWriter.writeToFile(
- f'Configuration conflicts for {virtualHostName}: {conflict_message}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Configuration conflicts for {virtualHostName}: {conflict_message}')
return 0, conflict_message
-
+
# Create challenge directory if it doesn't exist
challenge_dir = '/usr/local/lsws/Example/html/.well-known/acme-challenge'
try:
@@ -246,7 +245,7 @@ class sslUtilities:
except OSError as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error creating challenge directory: {str(e)}')
return 0, f'Error creating challenge directory: {str(e)}'
-
+
# Handle configuration based on server type
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
# OpenLiteSpeed configuration
@@ -274,31 +273,29 @@ context /.well-known/acme-challenge {
# Read current configuration
with open(completePathToConfigFile, 'r') as f:
lines = f.readlines()
-
+
# Write new configuration
with open(completePathToConfigFile, 'w') as f:
check = 0
for line in lines:
f.write(line)
if line.find('DocumentRoot /home/') > -1 and check == 0:
- f.write(
- ' Alias /.well-known/acme-challenge /usr/local/lsws/Example/html/.well-known/acme-challenge\n')
+ f.write(' Alias /.well-known/acme-challenge /usr/local/lsws/Example/html/.well-known/acme-challenge\n')
check = 1
except IOError as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error writing Apache configuration: {str(e)}')
return 0, f'Error writing Apache configuration: {str(e)}'
-
+
# Restart LiteSpeed
try:
from plogical import installUtilities
installUtilities.installUtilities.reStartLiteSpeed()
- logging.CyberCPLogFileWriter.writeToFile(
- f'Successfully configured ACME challenge for {virtualHostName}')
+ logging.CyberCPLogFileWriter.writeToFile(f'Successfully configured ACME challenge for {virtualHostName}')
return 1, 'Successfully configured ACME challenge'
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error restarting LiteSpeed: {str(e)}')
return 0, f'Error restarting LiteSpeed: {str(e)}'
-
+
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Unexpected error in PatchVhostConf: {str(e)}')
return 0, f'Unexpected error: {str(e)}'
@@ -578,12 +575,12 @@ context /.well-known/acme-challenge {
command = f'chmod -R 755 /usr/local/lsws/Example/html'
ProcessUtilities.executioner(command)
- # Try custom ACME implementation first
+ # Try Let's Encrypt first
try:
domains = [virtualHostName, f'www.{virtualHostName}']
if aliasDomain:
domains.extend([aliasDomain, f'www.{aliasDomain}'])
-
+
# Check if Cloudflare is used
use_dns = False
try:
@@ -592,17 +589,32 @@ context /.well-known/acme-challenge {
use_dns = True
except:
pass
-
- acme = CustomACME(virtualHostName, adminEmail, staging=False) # Force production environment
+
+ acme = CustomACME(virtualHostName, adminEmail, staging=False, provider='letsencrypt')
if acme.issue_certificate(domains, use_dns=use_dns):
logging.CyberCPLogFileWriter.writeToFile(
- f"Successfully obtained SSL using custom ACME implementation for: {virtualHostName}")
+ f"Successfully obtained SSL using Let's Encrypt for: {virtualHostName}")
return 1
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(
- f"Custom ACME implementation failed: {str(e)}. Falling back to acme.sh")
+ f"Let's Encrypt failed: {str(e)}. Trying ZeroSSL...")
- # Fallback to acme.sh if custom implementation fails
+ # Try ZeroSSL if Let's Encrypt fails
+ try:
+ domains = [virtualHostName, f'www.{virtualHostName}']
+ if aliasDomain:
+ domains.extend([aliasDomain, f'www.{aliasDomain}'])
+
+ acme = CustomACME(virtualHostName, adminEmail, staging=False, provider='zerossl')
+ if acme.issue_certificate(domains, use_dns=use_dns):
+ logging.CyberCPLogFileWriter.writeToFile(
+ f"Successfully obtained SSL using ZeroSSL for: {virtualHostName}")
+ return 1
+ except Exception as e:
+ logging.CyberCPLogFileWriter.writeToFile(
+ f"ZeroSSL failed: {str(e)}. Falling back to acme.sh")
+
+ # Fallback to acme.sh if both ACME providers fail
try:
acmePath = '/root/.acme.sh/acme.sh'
command = '%s --register-account -m %s' % (acmePath, adminEmail)
@@ -621,20 +633,19 @@ context /.well-known/acme-challenge {
command = acmePath + " --issue -d " + virtualHostName + " -d www." + virtualHostName \
+ ' --cert-file ' + existingCertPath + '/cert.pem' + ' --key-file ' + existingCertPath + '/privkey.pem' \
+ ' --fullchain-file ' + existingCertPath + '/fullchain.pem' + ' -w /usr/local/lsws/Example/html -k ec-256 --force --staging'
-
+
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
result = subprocess.run(command, capture_output=True, universal_newlines=True, shell=True)
else:
- result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- universal_newlines=True, shell=True)
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
if result.returncode == 0:
command = acmePath + " --issue -d " + virtualHostName + " -d www." + virtualHostName \
+ ' --cert-file ' + existingCertPath + '/cert.pem' + ' --key-file ' + existingCertPath + '/privkey.pem' \
+ ' --fullchain-file ' + existingCertPath + '/fullchain.pem' + ' -w /usr/local/lsws/Example/html -k ec-256 --force --server letsencrypt'
-
+
result = subprocess.run(command, capture_output=True, universal_newlines=True, shell=True)
-
+
if result.returncode == 0:
logging.CyberCPLogFileWriter.writeToFile(
"Successfully obtained SSL for: " + virtualHostName + " and: www." + virtualHostName, 0)
@@ -658,7 +669,7 @@ context /.well-known/acme-challenge {
+ ' --fullchain-file ' + existingCertPath + '/fullchain.pem' + ' -w /usr/local/lsws/Example/html -k ec-256 --force --server letsencrypt'
result = subprocess.run(command, capture_output=True, universal_newlines=True, shell=True)
-
+
if result.returncode == 0:
return 1
return 0
diff --git a/plogical/upgrade.py b/plogical/upgrade.py
index e7ea704e2..424096663 100755
--- a/plogical/upgrade.py
+++ b/plogical/upgrade.py
@@ -2207,6 +2207,10 @@ CREATE TABLE `websiteFunctions_backupsv2` (`id` integer AUTO_INCREMENT NOT NULL
if not Upgrade.executioner(command, command, 1):
return 0, 'Failed to execute %s' % (command)
+ command = 'git clean -f'
+ if not Upgrade.executioner(command, command, 1):
+ return 0, 'Failed to execute %s' % (command)
+
command = 'git pull'
if not Upgrade.executioner(command, command, 1):
return 0, 'Failed to execute %s' % (command)
@@ -3401,6 +3405,25 @@ pm.max_spare_servers = 3
WriteToFile.write(content)
WriteToFile.close()
+ @staticmethod
+ def setupPHPSymlink():
+ try:
+ # Remove existing PHP symlink if it exists
+ if os.path.exists('/usr/bin/php'):
+ os.remove('/usr/bin/php')
+
+ # Create symlink to PHP 8.0
+ command = 'ln -s /usr/local/lsws/lsphp80/bin/php /usr/bin/php'
+ Upgrade.executioner(command, 'Setup PHP Symlink', 0)
+
+ Upgrade.stdOut("PHP symlink created successfully.")
+
+ except BaseException as msg:
+ Upgrade.stdOut('[ERROR] ' + str(msg) + " [setupPHPSymlink]")
+ return 0
+
+ return 1
+
@staticmethod
def upgrade(branch):
@@ -3464,6 +3487,7 @@ pm.max_spare_servers = 3
Upgrade.executioner(command, 'tmp adjustment', 0)
Upgrade.dockerUsers()
+ Upgrade.setupPHPSymlink()
Upgrade.setupComposer()
##
diff --git a/static/websiteFunctions/js/resource-monitoring.js b/static/websiteFunctions/js/resource-monitoring.js
new file mode 100644
index 000000000..6b0f3ed87
--- /dev/null
+++ b/static/websiteFunctions/js/resource-monitoring.js
@@ -0,0 +1,143 @@
+// Resource Monitoring
+let cpuChart, memoryChart, diskChart;
+let cpuData = [], memoryData = [], diskData = [];
+const maxDataPoints = 30;
+
+function initializeCharts() {
+ const chartOptions = {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ },
+ animation: {
+ duration: 750
+ }
+ };
+
+ // CPU Chart
+ const cpuCtx = document.getElementById('cpuChart').getContext('2d');
+ cpuChart = new Chart(cpuCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'CPU Usage (%)',
+ data: [],
+ borderColor: '#2563eb',
+ backgroundColor: 'rgba(37, 99, 235, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Memory Chart
+ const memoryCtx = document.getElementById('memoryChart').getContext('2d');
+ memoryChart = new Chart(memoryCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Memory Usage (%)',
+ data: [],
+ borderColor: '#00b894',
+ backgroundColor: 'rgba(0, 184, 148, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Disk Chart
+ const diskCtx = document.getElementById('diskChart').getContext('2d');
+ diskChart = new Chart(diskCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Disk Usage (%)',
+ data: [],
+ borderColor: '#ff9800',
+ backgroundColor: 'rgba(255, 152, 0, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+}
+
+function updateCharts(data) {
+ const now = new Date();
+ const timeLabel = now.toLocaleTimeString();
+
+ // Update CPU Chart
+ cpuData.push(data.cpu_usage);
+ if (cpuData.length > maxDataPoints) cpuData.shift();
+ cpuChart.data.labels.push(timeLabel);
+ if (cpuChart.data.labels.length > maxDataPoints) cpuChart.data.labels.shift();
+ cpuChart.data.datasets[0].data = cpuData;
+ cpuChart.update('none'); // Use 'none' mode for better performance
+
+ // Update Memory Chart
+ memoryData.push(data.memory_usage);
+ if (memoryData.length > maxDataPoints) memoryData.shift();
+ memoryChart.data.labels.push(timeLabel);
+ if (memoryChart.data.labels.length > maxDataPoints) memoryChart.data.labels.shift();
+ memoryChart.data.datasets[0].data = memoryData;
+ memoryChart.update('none');
+
+ // Update Disk Chart
+ diskData.push(data.disk_percent);
+ if (diskData.length > maxDataPoints) diskData.shift();
+ diskChart.data.labels.push(timeLabel);
+ if (diskChart.data.labels.length > maxDataPoints) diskChart.data.labels.shift();
+ diskChart.data.datasets[0].data = diskData;
+ diskChart.update('none');
+}
+
+function fetchResourceUsage() {
+ $.ajax({
+ url: '/websites/get_website_resources/',
+ type: 'POST',
+ data: JSON.stringify({
+ 'domain': $('#domainNamePage').text().trim()
+ }),
+ contentType: 'application/json',
+ success: function(data) {
+ if (data.status === 1) {
+ updateCharts(data);
+ } else {
+ console.error('Error fetching resource data:', data.error_message);
+ }
+ },
+ error: function(xhr, status, error) {
+ console.error('Failed to fetch resource usage:', error);
+ }
+ });
+}
+
+// Initialize charts when the page loads
+$(document).ready(function() {
+ if (document.getElementById('cpuChart')) {
+ initializeCharts();
+ // Fetch resource usage every 5 seconds
+ setInterval(fetchResourceUsage, 5000);
+ // Initial fetch
+ fetchResourceUsage();
+ }
+});
\ No newline at end of file
diff --git a/websiteFunctions/resource_monitoring.py b/websiteFunctions/resource_monitoring.py
new file mode 100644
index 000000000..243dbc805
--- /dev/null
+++ b/websiteFunctions/resource_monitoring.py
@@ -0,0 +1,52 @@
+import psutil
+import os
+from plogical.processUtilities import ProcessUtilities
+from plogical.acl import ACLManager
+import plogical.CyberCPLogFileWriter as logging
+
+def get_website_resource_usage(externalApp):
+ try:
+ user = externalApp
+ if not user:
+ return {'status': 0, 'error_message': 'User not found'}
+
+ # Get CPU and Memory usage using ps command
+ command = f"ps -u {user} -o pcpu,pmem | grep -v CPU | awk '{{cpu += $1; mem += $2}} END {{print cpu, mem}}'"
+ result = ProcessUtilities.outputExecutioner(command)
+
+ try:
+ cpu_percent, memory_percent = map(float, result.split())
+ except:
+ cpu_percent = 0
+ memory_percent = 0
+
+ # Get disk usage using du command
+ website_path = f"/home/{user}/public_html"
+ if os.path.exists(website_path):
+ # Get disk usage in MB
+ command = f"du -sm {website_path} | cut -f1"
+ disk_used = float(ProcessUtilities.outputExecutioner(command))
+
+ # Get total disk space
+ command = f"df -m {website_path} | tail -1 | awk '{{print $2}}'"
+ disk_total = float(ProcessUtilities.outputExecutioner(command))
+
+ # Calculate percentage
+ disk_percent = (disk_used / disk_total) * 100 if disk_total > 0 else 0
+ else:
+ disk_used = 0
+ disk_total = 0
+ disk_percent = 0
+
+ return {
+ 'status': 1,
+ 'cpu_usage': round(cpu_percent, 2),
+ 'memory_usage': round(memory_percent, 2),
+ 'disk_used': round(disk_used, 2),
+ 'disk_total': round(disk_total, 2),
+ 'disk_percent': round(disk_percent, 2)
+ }
+
+ except BaseException as msg:
+ logging.CyberCPLogFileWriter.writeToFile(f'Error in get_website_resource_usage: {str(msg)}')
+ return {'status': 0, 'error_message': str(msg)}
\ No newline at end of file
diff --git a/websiteFunctions/static/js/resource-monitoring.js b/websiteFunctions/static/js/resource-monitoring.js
new file mode 100644
index 000000000..6b0f3ed87
--- /dev/null
+++ b/websiteFunctions/static/js/resource-monitoring.js
@@ -0,0 +1,143 @@
+// Resource Monitoring
+let cpuChart, memoryChart, diskChart;
+let cpuData = [], memoryData = [], diskData = [];
+const maxDataPoints = 30;
+
+function initializeCharts() {
+ const chartOptions = {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ },
+ animation: {
+ duration: 750
+ }
+ };
+
+ // CPU Chart
+ const cpuCtx = document.getElementById('cpuChart').getContext('2d');
+ cpuChart = new Chart(cpuCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'CPU Usage (%)',
+ data: [],
+ borderColor: '#2563eb',
+ backgroundColor: 'rgba(37, 99, 235, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Memory Chart
+ const memoryCtx = document.getElementById('memoryChart').getContext('2d');
+ memoryChart = new Chart(memoryCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Memory Usage (%)',
+ data: [],
+ borderColor: '#00b894',
+ backgroundColor: 'rgba(0, 184, 148, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Disk Chart
+ const diskCtx = document.getElementById('diskChart').getContext('2d');
+ diskChart = new Chart(diskCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Disk Usage (%)',
+ data: [],
+ borderColor: '#ff9800',
+ backgroundColor: 'rgba(255, 152, 0, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+}
+
+function updateCharts(data) {
+ const now = new Date();
+ const timeLabel = now.toLocaleTimeString();
+
+ // Update CPU Chart
+ cpuData.push(data.cpu_usage);
+ if (cpuData.length > maxDataPoints) cpuData.shift();
+ cpuChart.data.labels.push(timeLabel);
+ if (cpuChart.data.labels.length > maxDataPoints) cpuChart.data.labels.shift();
+ cpuChart.data.datasets[0].data = cpuData;
+ cpuChart.update('none'); // Use 'none' mode for better performance
+
+ // Update Memory Chart
+ memoryData.push(data.memory_usage);
+ if (memoryData.length > maxDataPoints) memoryData.shift();
+ memoryChart.data.labels.push(timeLabel);
+ if (memoryChart.data.labels.length > maxDataPoints) memoryChart.data.labels.shift();
+ memoryChart.data.datasets[0].data = memoryData;
+ memoryChart.update('none');
+
+ // Update Disk Chart
+ diskData.push(data.disk_percent);
+ if (diskData.length > maxDataPoints) diskData.shift();
+ diskChart.data.labels.push(timeLabel);
+ if (diskChart.data.labels.length > maxDataPoints) diskChart.data.labels.shift();
+ diskChart.data.datasets[0].data = diskData;
+ diskChart.update('none');
+}
+
+function fetchResourceUsage() {
+ $.ajax({
+ url: '/websites/get_website_resources/',
+ type: 'POST',
+ data: JSON.stringify({
+ 'domain': $('#domainNamePage').text().trim()
+ }),
+ contentType: 'application/json',
+ success: function(data) {
+ if (data.status === 1) {
+ updateCharts(data);
+ } else {
+ console.error('Error fetching resource data:', data.error_message);
+ }
+ },
+ error: function(xhr, status, error) {
+ console.error('Failed to fetch resource usage:', error);
+ }
+ });
+}
+
+// Initialize charts when the page loads
+$(document).ready(function() {
+ if (document.getElementById('cpuChart')) {
+ initializeCharts();
+ // Fetch resource usage every 5 seconds
+ setInterval(fetchResourceUsage, 5000);
+ // Initial fetch
+ fetchResourceUsage();
+ }
+});
\ No newline at end of file
diff --git a/websiteFunctions/static/js/websiteFunctions.js b/websiteFunctions/static/js/websiteFunctions.js
new file mode 100644
index 000000000..c7c4eca13
--- /dev/null
+++ b/websiteFunctions/static/js/websiteFunctions.js
@@ -0,0 +1,162 @@
+// Resource Monitoring
+let cpuChart, memoryChart, diskChart;
+let cpuData = [], memoryData = [], diskData = [];
+const maxDataPoints = 30;
+
+function initializeCharts() {
+ // CPU Chart
+ const cpuCtx = document.getElementById('cpuChart').getContext('2d');
+ cpuChart = new Chart(cpuCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'CPU Usage (%)',
+ data: [],
+ borderColor: '#2563eb',
+ backgroundColor: 'rgba(37, 99, 235, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+
+ // Memory Chart
+ const memoryCtx = document.getElementById('memoryChart').getContext('2d');
+ memoryChart = new Chart(memoryCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Memory Usage (%)',
+ data: [],
+ borderColor: '#00b894',
+ backgroundColor: 'rgba(0, 184, 148, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+
+ // Disk Chart
+ const diskCtx = document.getElementById('diskChart').getContext('2d');
+ diskChart = new Chart(diskCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Disk Usage (%)',
+ data: [],
+ borderColor: '#ff9800',
+ backgroundColor: 'rgba(255, 152, 0, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+}
+
+function updateCharts(data) {
+ const now = new Date();
+ const timeLabel = now.getHours() + ':' + now.getMinutes() + ':' + now.getSeconds();
+
+ // Update CPU Chart
+ cpuData.push(data.cpu_usage);
+ if (cpuData.length > maxDataPoints) cpuData.shift();
+ cpuChart.data.labels.push(timeLabel);
+ if (cpuChart.data.labels.length > maxDataPoints) cpuChart.data.labels.shift();
+ cpuChart.data.datasets[0].data = cpuData;
+ cpuChart.update();
+
+ // Update Memory Chart
+ memoryData.push(data.memory_usage);
+ if (memoryData.length > maxDataPoints) memoryData.shift();
+ memoryChart.data.labels.push(timeLabel);
+ if (memoryChart.data.labels.length > maxDataPoints) memoryChart.data.labels.shift();
+ memoryChart.data.datasets[0].data = memoryData;
+ memoryChart.update();
+
+ // Update Disk Chart
+ diskData.push(data.disk_percent);
+ if (diskData.length > maxDataPoints) diskData.shift();
+ diskChart.data.labels.push(timeLabel);
+ if (diskChart.data.labels.length > maxDataPoints) diskChart.data.labels.shift();
+ diskChart.data.datasets[0].data = diskData;
+ diskChart.update();
+}
+
+function fetchResourceUsage() {
+ $.ajax({
+ url: '/website/get_website_resources/',
+ type: 'POST',
+ data: JSON.stringify({
+ 'domain': $('#domainNamePage').text()
+ }),
+ contentType: 'application/json',
+ success: function(data) {
+ if (data.status === 1) {
+ updateCharts(data);
+ }
+ },
+ error: function() {
+ console.error('Error fetching resource usage data');
+ }
+ });
+}
+
+// Initialize charts when the page loads
+$(document).ready(function() {
+ initializeCharts();
+ // Fetch resource usage every 5 seconds
+ setInterval(fetchResourceUsage, 5000);
+ // Initial fetch
+ fetchResourceUsage();
+});
\ No newline at end of file
diff --git a/websiteFunctions/static/websiteFunctions/js/resource-monitoring.js b/websiteFunctions/static/websiteFunctions/js/resource-monitoring.js
new file mode 100644
index 000000000..6b0f3ed87
--- /dev/null
+++ b/websiteFunctions/static/websiteFunctions/js/resource-monitoring.js
@@ -0,0 +1,143 @@
+// Resource Monitoring
+let cpuChart, memoryChart, diskChart;
+let cpuData = [], memoryData = [], diskData = [];
+const maxDataPoints = 30;
+
+function initializeCharts() {
+ const chartOptions = {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ },
+ animation: {
+ duration: 750
+ }
+ };
+
+ // CPU Chart
+ const cpuCtx = document.getElementById('cpuChart').getContext('2d');
+ cpuChart = new Chart(cpuCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'CPU Usage (%)',
+ data: [],
+ borderColor: '#2563eb',
+ backgroundColor: 'rgba(37, 99, 235, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Memory Chart
+ const memoryCtx = document.getElementById('memoryChart').getContext('2d');
+ memoryChart = new Chart(memoryCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Memory Usage (%)',
+ data: [],
+ borderColor: '#00b894',
+ backgroundColor: 'rgba(0, 184, 148, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+
+ // Disk Chart
+ const diskCtx = document.getElementById('diskChart').getContext('2d');
+ diskChart = new Chart(diskCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Disk Usage (%)',
+ data: [],
+ borderColor: '#ff9800',
+ backgroundColor: 'rgba(255, 152, 0, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: chartOptions
+ });
+}
+
+function updateCharts(data) {
+ const now = new Date();
+ const timeLabel = now.toLocaleTimeString();
+
+ // Update CPU Chart
+ cpuData.push(data.cpu_usage);
+ if (cpuData.length > maxDataPoints) cpuData.shift();
+ cpuChart.data.labels.push(timeLabel);
+ if (cpuChart.data.labels.length > maxDataPoints) cpuChart.data.labels.shift();
+ cpuChart.data.datasets[0].data = cpuData;
+ cpuChart.update('none'); // Use 'none' mode for better performance
+
+ // Update Memory Chart
+ memoryData.push(data.memory_usage);
+ if (memoryData.length > maxDataPoints) memoryData.shift();
+ memoryChart.data.labels.push(timeLabel);
+ if (memoryChart.data.labels.length > maxDataPoints) memoryChart.data.labels.shift();
+ memoryChart.data.datasets[0].data = memoryData;
+ memoryChart.update('none');
+
+ // Update Disk Chart
+ diskData.push(data.disk_percent);
+ if (diskData.length > maxDataPoints) diskData.shift();
+ diskChart.data.labels.push(timeLabel);
+ if (diskChart.data.labels.length > maxDataPoints) diskChart.data.labels.shift();
+ diskChart.data.datasets[0].data = diskData;
+ diskChart.update('none');
+}
+
+function fetchResourceUsage() {
+ $.ajax({
+ url: '/websites/get_website_resources/',
+ type: 'POST',
+ data: JSON.stringify({
+ 'domain': $('#domainNamePage').text().trim()
+ }),
+ contentType: 'application/json',
+ success: function(data) {
+ if (data.status === 1) {
+ updateCharts(data);
+ } else {
+ console.error('Error fetching resource data:', data.error_message);
+ }
+ },
+ error: function(xhr, status, error) {
+ console.error('Failed to fetch resource usage:', error);
+ }
+ });
+}
+
+// Initialize charts when the page loads
+$(document).ready(function() {
+ if (document.getElementById('cpuChart')) {
+ initializeCharts();
+ // Fetch resource usage every 5 seconds
+ setInterval(fetchResourceUsage, 5000);
+ // Initial fetch
+ fetchResourceUsage();
+ }
+});
\ No newline at end of file
diff --git a/websiteFunctions/static/websiteFunctions/js/websiteFunctions.js b/websiteFunctions/static/websiteFunctions/js/websiteFunctions.js
new file mode 100644
index 000000000..c7c4eca13
--- /dev/null
+++ b/websiteFunctions/static/websiteFunctions/js/websiteFunctions.js
@@ -0,0 +1,162 @@
+// Resource Monitoring
+let cpuChart, memoryChart, diskChart;
+let cpuData = [], memoryData = [], diskData = [];
+const maxDataPoints = 30;
+
+function initializeCharts() {
+ // CPU Chart
+ const cpuCtx = document.getElementById('cpuChart').getContext('2d');
+ cpuChart = new Chart(cpuCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'CPU Usage (%)',
+ data: [],
+ borderColor: '#2563eb',
+ backgroundColor: 'rgba(37, 99, 235, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+
+ // Memory Chart
+ const memoryCtx = document.getElementById('memoryChart').getContext('2d');
+ memoryChart = new Chart(memoryCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Memory Usage (%)',
+ data: [],
+ borderColor: '#00b894',
+ backgroundColor: 'rgba(0, 184, 148, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+
+ // Disk Chart
+ const diskCtx = document.getElementById('diskChart').getContext('2d');
+ diskChart = new Chart(diskCtx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: 'Disk Usage (%)',
+ data: [],
+ borderColor: '#ff9800',
+ backgroundColor: 'rgba(255, 152, 0, 0.1)',
+ borderWidth: 2,
+ fill: true,
+ tension: 0.4
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ y: {
+ beginAtZero: true,
+ max: 100,
+ ticks: {
+ callback: function(value) {
+ return value + '%';
+ }
+ }
+ }
+ }
+ }
+ });
+}
+
+function updateCharts(data) {
+ const now = new Date();
+ const timeLabel = now.getHours() + ':' + now.getMinutes() + ':' + now.getSeconds();
+
+ // Update CPU Chart
+ cpuData.push(data.cpu_usage);
+ if (cpuData.length > maxDataPoints) cpuData.shift();
+ cpuChart.data.labels.push(timeLabel);
+ if (cpuChart.data.labels.length > maxDataPoints) cpuChart.data.labels.shift();
+ cpuChart.data.datasets[0].data = cpuData;
+ cpuChart.update();
+
+ // Update Memory Chart
+ memoryData.push(data.memory_usage);
+ if (memoryData.length > maxDataPoints) memoryData.shift();
+ memoryChart.data.labels.push(timeLabel);
+ if (memoryChart.data.labels.length > maxDataPoints) memoryChart.data.labels.shift();
+ memoryChart.data.datasets[0].data = memoryData;
+ memoryChart.update();
+
+ // Update Disk Chart
+ diskData.push(data.disk_percent);
+ if (diskData.length > maxDataPoints) diskData.shift();
+ diskChart.data.labels.push(timeLabel);
+ if (diskChart.data.labels.length > maxDataPoints) diskChart.data.labels.shift();
+ diskChart.data.datasets[0].data = diskData;
+ diskChart.update();
+}
+
+function fetchResourceUsage() {
+ $.ajax({
+ url: '/website/get_website_resources/',
+ type: 'POST',
+ data: JSON.stringify({
+ 'domain': $('#domainNamePage').text()
+ }),
+ contentType: 'application/json',
+ success: function(data) {
+ if (data.status === 1) {
+ updateCharts(data);
+ }
+ },
+ error: function() {
+ console.error('Error fetching resource usage data');
+ }
+ });
+}
+
+// Initialize charts when the page loads
+$(document).ready(function() {
+ initializeCharts();
+ // Fetch resource usage every 5 seconds
+ setInterval(fetchResourceUsage, 5000);
+ // Initial fetch
+ fetchResourceUsage();
+});
\ No newline at end of file
diff --git a/websiteFunctions/static/websiteFunctions/websiteFunctions.js b/websiteFunctions/static/websiteFunctions/websiteFunctions.js
index 143947cb5..ecf1752a9 100755
--- a/websiteFunctions/static/websiteFunctions/websiteFunctions.js
+++ b/websiteFunctions/static/websiteFunctions/websiteFunctions.js
@@ -16904,4 +16904,937 @@ app.controller('BuyAddons', function ($scope, $http) {
}
-})
\ No newline at end of file
+})
+
+app.controller('launchChild', function ($scope, $http) {
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = true;
+ $scope.hideLogs = true;
+ $scope.hideErrorLogs = true;
+
+ $scope.hidelogsbtn = function () {
+ $scope.hideLogs = true;
+ };
+
+ $scope.hideErrorLogsbtn = function () {
+ $scope.hideLogs = true;
+ };
+
+ $scope.fileManagerURL = "/filemanager/" + $("#domainNamePage").text();
+ $scope.previewUrl = "/preview/" + $("#childDomain").text() + "/";
+ $scope.wordPressInstallURL = "/websites/" + $("#childDomain").text() + "/wordpressInstall";
+ $scope.joomlaInstallURL = "/websites/" + $("#childDomain").text() + "/joomlaInstall";
+ $scope.setupGit = "/websites/" + $("#childDomain").text() + "/setupGit";
+ $scope.installPrestaURL = "/websites/" + $("#childDomain").text() + "/installPrestaShop";
+ $scope.installMagentoURL = "/websites/" + $("#childDomain").text() + "/installMagento";
+
+ var logType = 0;
+ $scope.pageNumber = 1;
+
+ $scope.fetchLogs = function (type) {
+
+ var pageNumber = $scope.pageNumber;
+
+
+ if (type == 3) {
+ pageNumber = $scope.pageNumber + 1;
+ $scope.pageNumber = pageNumber;
+ } else if (type == 4) {
+ pageNumber = $scope.pageNumber - 1;
+ $scope.pageNumber = pageNumber;
+ } else {
+ logType = type;
+ }
+
+
+ $scope.logFileLoading = false;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = false;
+ $scope.hideErrorLogs = true;
+
+
+ url = "/websites/getDataFromLogFile";
+
+ var domainNamePage = $("#domainNamePage").text();
+
+
+ var data = {
+ logType: logType,
+ virtualHost: domainNamePage,
+ page: pageNumber,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.logstatus === 1) {
+
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = false;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = false;
+ $scope.hideLogs = false;
+
+
+ $scope.records = JSON.parse(response.data.data);
+
+ } else {
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = false;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = true;
+ $scope.hideLogs = false;
+
+
+ $scope.errorMessage = response.data.error_message;
+ console.log(domainNamePage)
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = false;
+ $scope.fetchedData = true;
+ $scope.hideLogs = false;
+
+ }
+
+
+ };
+
+ $scope.errorPageNumber = 1;
+
+
+ $scope.fetchErrorLogs = function (type) {
+
+ var errorPageNumber = $scope.errorPageNumber;
+
+
+ if (type === 3) {
+ errorPageNumber = $scope.errorPageNumber + 1;
+ $scope.errorPageNumber = errorPageNumber;
+ } else if (type === 4) {
+ errorPageNumber = $scope.errorPageNumber - 1;
+ $scope.errorPageNumber = errorPageNumber;
+ } else {
+ logType = type;
+ }
+
+ // notifications
+
+ $scope.logFileLoading = false;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = true;
+ $scope.hideErrorLogs = true;
+ $scope.hideLogs = false;
+
+
+ url = "/websites/fetchErrorLogs";
+
+ var domainNamePage = $("#domainNamePage").text();
+
+
+ var data = {
+ virtualHost: domainNamePage,
+ page: errorPageNumber,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.logstatus === 1) {
+
+
+ // notifications
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = false;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = true;
+ $scope.hideLogs = false;
+ $scope.hideErrorLogs = false;
+
+
+ $scope.errorLogsData = response.data.data;
+
+ } else {
+
+ // notifications
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = false;
+ $scope.couldNotConnect = true;
+ $scope.fetchedData = true;
+ $scope.hideLogs = true;
+ $scope.hideErrorLogs = true;
+
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ // notifications
+
+ $scope.logFileLoading = true;
+ $scope.logsFeteched = true;
+ $scope.couldNotFetchLogs = true;
+ $scope.couldNotConnect = false;
+ $scope.fetchedData = true;
+ $scope.hideLogs = true;
+ $scope.hideErrorLogs = true;
+
+ }
+
+
+ };
+
+ ///////// Configurations Part
+
+ $scope.configurationsBox = true;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+
+ $scope.hideconfigbtn = function () {
+
+ $scope.configurationsBox = true;
+ };
+
+ $scope.fetchConfigurations = function () {
+
+
+ $scope.hidsslconfigs = true;
+ $scope.configurationsBoxRewrite = true;
+ $scope.changePHPView = true;
+
+
+ //Rewrite rules
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+
+ ///
+
+ $scope.configFileLoading = false;
+
+
+ url = "/websites/getDataFromConfigFile";
+
+ var virtualHost = $("#childDomain").text();
+
+
+ var data = {
+ virtualHost: virtualHost,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.configstatus === 1) {
+
+ //Rewrite rules
+
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+
+ ///
+
+ $scope.configurationsBox = false;
+ $scope.configsFetched = false;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = false;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = false;
+
+
+ $scope.configData = response.data.configData;
+
+ } else {
+
+ //Rewrite rules
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+
+ ///
+ $scope.configurationsBox = false;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = false;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ //Rewrite rules
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+ ///
+
+ $scope.configurationsBox = false;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = false;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+
+
+ }
+
+
+ };
+
+ $scope.saveCongiruations = function () {
+
+ $scope.configFileLoading = false;
+
+
+ url = "/websites/saveConfigsToFile";
+
+ var virtualHost = $("#childDomain").text();
+ var configData = $scope.configData;
+
+
+ var data = {
+ virtualHost: virtualHost,
+ configData: configData,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.configstatus == 1) {
+
+ $scope.configurationsBox = false;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = false;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = true;
+
+
+ } else {
+ $scope.configurationsBox = false;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = false;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = false;
+
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.configurationsBox = false;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = false;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+
+
+ }
+
+
+ };
+
+
+ ///////// Rewrite Rules
+
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+
+ $scope.hideRewriteRulesbtn = function () {
+ $scope.configurationsBoxRewrite = true;
+ };
+
+
+ $scope.fetchRewriteFules = function () {
+
+ $scope.hidsslconfigs = true;
+ $scope.configurationsBox = true;
+ $scope.changePHPView = true;
+
+
+ $scope.configurationsBox = true;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.couldNotConnect = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = true;
+
+ $scope.configFileLoading = false;
+
+
+ url = "/websites/getRewriteRules";
+
+ var virtualHost = $("#childDomain").text();
+
+
+ var data = {
+ virtualHost: virtualHost,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.rewriteStatus == 1) {
+
+
+ // from main
+
+ $scope.configurationsBox = true;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = true;
+
+ // main ends
+
+ $scope.configFileLoading = true;
+
+ //
+
+
+ $scope.configurationsBoxRewrite = false;
+ $scope.rewriteRulesFetched = false;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = false;
+ $scope.saveRewriteRulesBTN = false;
+ $scope.couldNotConnect = true;
+
+
+ $scope.rewriteRules = response.data.rewriteRules;
+
+ } else {
+ // from main
+ $scope.configurationsBox = true;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = true;
+ // from main
+
+ $scope.configFileLoading = true;
+
+ ///
+
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = false;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+ $scope.couldNotConnect = true;
+
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+ // from main
+
+ $scope.configurationsBox = true;
+ $scope.configsFetched = true;
+ $scope.couldNotFetchConfigs = true;
+ $scope.fetchedConfigsData = true;
+ $scope.configFileLoading = true;
+ $scope.configSaved = true;
+ $scope.couldNotSaveConfigurations = true;
+ $scope.saveConfigBtn = true;
+
+ // from main
+
+ $scope.configFileLoading = true;
+
+ ///
+
+ $scope.configurationsBoxRewrite = true;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+
+ $scope.couldNotConnect = false;
+
+
+ }
+
+
+ };
+
+ $scope.saveRewriteRules = function () {
+
+ $scope.configFileLoading = false;
+
+
+ url = "/websites/saveRewriteRules";
+
+ var virtualHost = $("#childDomain").text();
+ var rewriteRules = $scope.rewriteRules;
+
+
+ var data = {
+ virtualHost: virtualHost,
+ rewriteRules: rewriteRules,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.rewriteStatus == 1) {
+
+ $scope.configurationsBoxRewrite = false;
+ $scope.rewriteRulesFetched = true;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = false;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = true;
+ $scope.configFileLoading = true;
+
+
+ } else {
+ $scope.configurationsBoxRewrite = false;
+ $scope.rewriteRulesFetched = false;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = false;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = false;
+
+ $scope.configFileLoading = true;
+
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.configurationsBoxRewrite = false;
+ $scope.rewriteRulesFetched = false;
+ $scope.couldNotFetchRewriteRules = true;
+ $scope.rewriteRulesSaved = true;
+ $scope.couldNotSaveRewriteRules = true;
+ $scope.fetchedRewriteRules = true;
+ $scope.saveRewriteRulesBTN = false;
+
+ $scope.configFileLoading = true;
+
+ $scope.couldNotConnect = false;
+
+
+ }
+
+
+ };
+
+
+ //////// SSL Part
+
+ $scope.sslSaved = true;
+ $scope.couldNotSaveSSL = true;
+ $scope.hidsslconfigs = true;
+ $scope.couldNotConnect = true;
+
+
+ $scope.hidesslbtn = function () {
+ $scope.hidsslconfigs = true;
+ };
+
+ $scope.addSSL = function () {
+ $scope.hidsslconfigs = false;
+ $scope.configurationsBox = true;
+ $scope.configurationsBoxRewrite = true;
+ $scope.changePHPView = true;
+ };
+
+
+ $scope.saveSSL = function () {
+
+
+ $scope.configFileLoading = false;
+
+ url = "/websites/saveSSL";
+
+ var virtualHost = $("#childDomain").text();
+ var cert = $scope.cert;
+ var key = $scope.key;
+
+
+ var data = {
+ virtualHost: virtualHost,
+ cert: cert,
+ key: key,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+ if (response.data.sslStatus === 1) {
+
+ $scope.sslSaved = false;
+ $scope.couldNotSaveSSL = true;
+ $scope.couldNotConnect = true;
+ $scope.configFileLoading = true;
+
+
+ } else {
+
+ $scope.sslSaved = true;
+ $scope.couldNotSaveSSL = false;
+ $scope.couldNotConnect = true;
+ $scope.configFileLoading = true;
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.sslSaved = true;
+ $scope.couldNotSaveSSL = true;
+ $scope.couldNotConnect = false;
+ $scope.configFileLoading = true;
+
+
+ }
+
+ };
+
+
+ //// Change PHP Master
+
+ $scope.failedToChangePHPMaster = true;
+ $scope.phpChangedMaster = true;
+ $scope.couldNotConnect = true;
+
+ $scope.changePHPView = true;
+
+
+ $scope.hideChangePHPMaster = function () {
+ $scope.changePHPView = true;
+ };
+
+ $scope.changePHPMaster = function () {
+ $scope.hidsslconfigs = true;
+ $scope.configurationsBox = true;
+ $scope.configurationsBoxRewrite = true;
+ $scope.changePHPView = false;
+ };
+
+
+ $scope.changePHPVersionMaster = function (childDomain, phpSelection) {
+
+ // notifcations
+
+ $scope.configFileLoading = false;
+
+ var url = "/websites/changePHP";
+
+ var data = {
+ childDomain: $("#childDomain").text(),
+ phpSelection: $scope.phpSelectionMaster,
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+
+ if (response.data.changePHP === 1) {
+
+ $scope.configFileLoading = true;
+ $scope.websiteDomain = $("#childDomain").text();
+
+
+ // notifcations
+
+ $scope.failedToChangePHPMaster = true;
+ $scope.phpChangedMaster = false;
+ $scope.couldNotConnect = true;
+
+
+ } else {
+
+ $scope.configFileLoading = true;
+ $scope.errorMessage = response.data.error_message;
+
+ // notifcations
+
+ $scope.failedToChangePHPMaster = false;
+ $scope.phpChangedMaster = true;
+ $scope.couldNotConnect = true;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.configFileLoading = true;
+
+ // notifcations
+
+ $scope.failedToChangePHPMaster = true;
+ $scope.phpChangedMaster = true;
+ $scope.couldNotConnect = false;
+
+ }
+
+ };
+
+
+ /// Open_basedir protection
+
+ $scope.baseDirLoading = true;
+ $scope.operationFailed = true;
+ $scope.operationSuccessfull = true;
+ $scope.couldNotConnect = true;
+ $scope.openBaseDirBox = true;
+
+
+ $scope.openBaseDirView = function () {
+ $scope.openBaseDirBox = false;
+ };
+
+ $scope.hideOpenBasedir = function () {
+ $scope.openBaseDirBox = true;
+ };
+
+ $scope.applyOpenBasedirChanges = function (childDomain, phpSelection) {
+
+ // notifcations
+
+ $scope.baseDirLoading = false;
+ $scope.operationFailed = true;
+ $scope.operationSuccessfull = true;
+ $scope.couldNotConnect = true;
+ $scope.openBaseDirBox = false;
+
+
+ var url = "/websites/changeOpenBasedir";
+
+ var data = {
+ domainName: $("#childDomain").text(),
+ openBasedirValue: $scope.openBasedirValue
+ };
+
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(ListInitialDatas, cantLoadInitialDatas);
+
+
+ function ListInitialDatas(response) {
+
+
+ if (response.data.changeOpenBasedir === 1) {
+
+ $scope.baseDirLoading = true;
+ $scope.operationFailed = true;
+ $scope.operationSuccessfull = false;
+ $scope.couldNotConnect = true;
+ $scope.openBaseDirBox = false;
+
+ } else {
+
+ $scope.baseDirLoading = true;
+ $scope.operationFailed = false;
+ $scope.operationSuccessfull = true;
+ $scope.couldNotConnect = true;
+ $scope.openBaseDirBox = false;
+
+ $scope.errorMessage = response.data.error_message;
+
+ }
+
+
+ }
+
+ function cantLoadInitialDatas(response) {
+
+ $scope.baseDirLoading = true;
+ $scope.operationFailed = true;
+ $scope.operationSuccessfull = true;
+ $scope.couldNotConnect = false;
+ $scope.openBaseDirBox = false;
+
+
+ }
+
+ }
+
+});
+
diff --git a/websiteFunctions/templates/websiteFunctions/WPsitesList.html b/websiteFunctions/templates/websiteFunctions/WPsitesList.html
index 14298a370..0a8f4f7fc 100644
--- a/websiteFunctions/templates/websiteFunctions/WPsitesList.html
+++ b/websiteFunctions/templates/websiteFunctions/WPsitesList.html
@@ -87,6 +87,42 @@
}
};
+ $scope.ScanWordpressSite = function () {
+ $('#cyberPanelLoading').show();
+ var url = "{% url 'ScanWordpressSite' %}";
+ var data = {};
+ var config = {
+ headers: {
+ 'X-CSRFToken': getCookie('csrftoken')
+ }
+ };
+
+ $http.post(url, data, config).then(function(response) {
+ $('#cyberPanelLoading').hide();
+ if (response.data.status === 1) {
+ new PNotify({
+ title: 'Success!',
+ text: 'WordPress sites scanned successfully!',
+ type: 'success'
+ });
+ location.reload();
+ } else {
+ new PNotify({
+ title: 'Operation Failed!',
+ text: response.data.error_message,
+ type: 'error'
+ });
+ }
+ }, function(response) {
+ $('#cyberPanelLoading').hide();
+ new PNotify({
+ title: 'Operation Failed!',
+ text: response.data.error_message,
+ type: 'error'
+ });
+ });
+ };
+
$scope.updateSetting = function(site, setting) {
var settingMap = {
'search-indexing': 'searchIndex',
@@ -355,6 +391,7 @@
{% trans "WordPress Sites" %}
+
Install WordPress
@@ -602,6 +639,4 @@
margin-left: 4px;
}
-{% endblock content %}
-
-
+{% endblock content %}
\ No newline at end of file
diff --git a/websiteFunctions/templates/websiteFunctions/listWebsites.html b/websiteFunctions/templates/websiteFunctions/listWebsites.html
index 44fbaf468..4e09feade 100755
--- a/websiteFunctions/templates/websiteFunctions/listWebsites.html
+++ b/websiteFunctions/templates/websiteFunctions/listWebsites.html
@@ -66,7 +66,7 @@