import os
import json
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
import requests

# === Config ===
CREDENTIALS_FILE = '/var/www/ScriptInstallationIndexaForContent/ScriptSynchroDocMagic/Credentials_Gdrive/credentials_Indexa.json'
TOKEN_GDRIVE = '/var/www/ScriptInstallationIndexaForContent/ScriptSynchroDocMagic/Credentials_Gdrive/token.json'
SCOPES_DRIVE = ['https://www.googleapis.com/auth/drive.readonly']

root_dir = '/var/www'
output_file = os.path.join(os.path.dirname(__file__), 'docsSynchro.txt')
REVISION_TRACK_FILE = os.path.join(os.path.dirname(__file__), 'revisions.json')

# === Authentification à l'API Google Drive ===
def get_drive_service():
    creds = None
    if os.path.exists(TOKEN_GDRIVE):
        creds = Credentials.from_authorized_user_file(TOKEN_GDRIVE, SCOPES_DRIVE)
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS_FILE, SCOPES_DRIVE)
            creds = flow.run_local_server(port=0)
        with open(TOKEN_GDRIVE, 'w') as token_file:
            token_file.write(creds.to_json())
    return build('drive', 'v3', credentials=creds)

# === Gestion des révisions ===
def load_revision_ids():
    if os.path.exists(REVISION_TRACK_FILE):
        with open(REVISION_TRACK_FILE, 'r', encoding='utf-8') as f:
            return json.load(f)
    return {}

def save_revision_ids(revision_data):
    with open(REVISION_TRACK_FILE, 'w', encoding='utf-8') as f:
        json.dump(revision_data, f, ensure_ascii=False, indent=4)

# Contrôle du délai entre appels Google Drive (500 ms minimum)
last_call_time = 0
call_lock = threading.Lock()
revision_lock = threading.Lock()

def get_last_revision_info(service, file_id):
    global last_call_time
    with call_lock:
        now = time.time()
        if now - last_call_time < 0.5:
            time.sleep(0.5 - (now - last_call_time))
        last_call_time = time.time()
    try:
        revisions = service.revisions().list(fileId=file_id, fields="revisions(id, modifiedTime)").execute()
        if 'revisions' in revisions and revisions['revisions']:
            last = revisions['revisions'][-1]
            return last['id'], last['modifiedTime']
        return None, None
    except Exception as e:
        return None, f"[Erreur révisions: {e}]"

# === Utilitaires ===
def get_first_url(filepath):
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip().startswith('http'):
                    url = line.strip().split('\t')[0]
                    url_parts = url.split('/')
                    if len(url_parts) > 2:
                        url_parts.insert(3, 'bypass')
                        url = '/'.join(url_parts)
                    return url
    except Exception as e:
        return f"[Erreur lecture: {e}]"
    return "[Aucune URL trouvée]"

def call_url_twice(url):
    def call():
        try:
            response1 = requests.get(url)
            response2 = requests.get(url)
            print(f"Appel à l'URL effectué deux fois: {url}")
            if response1.status_code == 200 and response2.status_code == 200:
                print("Les deux appels ont réussi.")
            else:
                print(f"Erreur lors de l'appel à l'URL: {url}")
        except requests.exceptions.RequestException as e:
            print(f"Erreur lors de l'appel HTTP à {url}: {e}")
    threading.Thread(target=call).start()

# === Tâche par fichier ===
def process_file(full_path):
    try:
        service = get_drive_service()
        filename = os.path.basename(full_path)
        if not filename.endswith('.magic'):
            return None

        url = get_first_url(full_path)
        doc_id = filename.replace('.magic', '')
        revision_id, revision_time = get_last_revision_info(service, doc_id)

        with revision_lock:
            previous_revision = revision_ids.get(doc_id)

        if revision_id and revision_time:
            if previous_revision:
                if revision_id != previous_revision:
                    line = f"{full_path} => {url} => Révision {revision_id} à {revision_time} (MODIFIÉ)"
                    call_url_twice(url)
                else:
                    line = f"{full_path} => {url} => Révision {revision_id} (AUCUN CHANGEMENT)"
            else:
                line = f"{full_path} => {url} => Révision {revision_id} (NOUVEAU DOCUMENT)"

            with revision_lock:
                revision_ids[doc_id] = revision_id
        else:
            line = f"{full_path} => {url} => Erreur récupération de la révision."
        print(line)
        return line
    except Exception as e:
        print(f"[ERREUR THREAD] {full_path}: {e}")
        return f"{full_path} => Erreur: {e}"

# === Exécution principale ===
if __name__ == '__main__':
    revision_ids = load_revision_ids()
    files_to_process = []

    for dirpath, _, filenames in os.walk(root_dir):
        for filename in filenames:
            if filename.endswith('.magic'):
                files_to_process.append(os.path.join(dirpath, filename))

    results = []
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_file, path) for path in files_to_process]
        for future in as_completed(futures):
            result = future.result()
            if result:
                results.append(result)

    with open(output_file, 'w', encoding='utf-8') as out_file:
        for line in results:
            out_file.write(line + '\n')

    save_revision_ids(revision_ids)
