import requests
import csv
import shutil
from datetime import datetime
import json
import os
import time
import logging

logging.basicConfig(filename='main.log', level=logging.ERROR,
                    format='%(asctime)s - %(levelname)s - %(message)s')


def csv_to_list(file_path):
    with open(file_path, 'r', newline="", encoding='utf-8', errors='replace') as file:
        reader = csv.reader(file)
        data = list(reader)
    return data


def convert_to_numeric(row1):
    bool_map = {'true': True, 'false': False}
    converted_row1 = []
    for value in row1:
        lower_value = value.lower()
        if lower_value in bool_map:
            converted_row1.append(bool_map[lower_value])
        else:
            try:
                converted_row1.append(float(value) if '.' in value else int(value))
            except ValueError:
                converted_row1.append(value)
    return converted_row1


def load_config():
    with open('config.json') as user_file:
        return json.load(user_file)


def create_session(config):
    session = requests.Session()
    session.headers.update({
        'x-api-key': config['api_key'],
        'Content-Type': 'application/json'
    })
    return session


def get_user_identity_id(cycle, session, config):
    response = session.get(config['url_link'] + '/api/v1/user_identities/')
    cycle_item = response.json()['user_identities']
    for item in cycle_item:
        if item['cycle'] == cycle:
            return item['id']


def get_field_type_by_id(data, search_id):
    try:
        search_id_int = int(search_id)
    except ValueError:
        return None
    for item in data:
        if item['id'] == search_id_int:
            return item['field_type']
    return None


def get_field_type(config, user_identity_id, questionID, programID, session):
    customFieldsArray = get_custom_fields(config, user_identity_id, programID, session)
    if customFieldsArray is None:
        return ""
    fieldType = get_field_type_by_id(customFieldsArray, questionID)
    return fieldType


def get_custom_fields(config, user_identity_id, program_id, session):
    response = session.get(
        config['url_link'] + f'/api/v1/user_identities/{user_identity_id}/programs/{program_id}/custom_fields'
    )
    json_data = response.json()
    if 'custom_fields' in json_data:
        return json_data['custom_fields']
    else:
        return None


def update_custom_field(config, user_identity_id, programID, casID, questionID, questionType, value, session):
    url = config[
              'url_link'] + f'/api/v1/user_identities/{user_identity_id}/programs/{programID}/applicants_by_cas_id/{casID}/custom_field_answers/{questionID}'
    request_body = {
        "custom_field_answer": {
            "field_type": questionType,
            "value": value
        }
    }
    response = session.put(url, json=request_body)
    return response


def archive_file(config, file_name):
    destination_directory = os.path.join(config['working_dir'], "Archive")
    os.makedirs(destination_directory, exist_ok=True)
    timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    destination_file = f"{timestamp}_{file_name}"
    destination_path = os.path.join(destination_directory, destination_file)
    shutil.move(config['working_dir'] + "test1.csv", destination_path)
    os.remove(os.path.join(config['working_dir'], config['file_name']))


def execute_job():
    config = load_config()
     # Create a persistent session
    file_path = os.path.join(config['working_dir'], config['file_name'])

    while True:
        session = create_session(config)
        try:

            if os.path.exists(file_path):
                shutil.copy(file_path, os.path.join(config['working_dir'], "test1.csv"))

                with open(file_path, 'r', encoding='utf-8', errors='replace') as file:
                    csvreader = csv.reader(file)
                    readCSVFirstRow = next(csvreader)[3:]

                with open(file_path, 'r', encoding='utf-8', errors='replace') as file:
                    csvreader2 = csv.reader(file)
                    next(csvreader2)
                    converted_csv_data = [convert_to_numeric(row) for row in csvreader2]

                    for row_number, row in enumerate(converted_csv_data, start=1):
                        cas_id = str(row[0])
                        program_id = str(row[1])
                        user_identity_id = get_user_identity_id(str(row[2]), session, config)

                        for i in range(len(readCSVFirstRow)):
                            if i + 3 < len(row):
                                value = row[i + 3]
                            else:
                                value = None

                            question_id = str(readCSVFirstRow[i])
                            question_type = str(
                                get_field_type(config, user_identity_id, question_id, program_id, session))

                            response = update_custom_field(
                                config, user_identity_id, program_id, cas_id, question_id, question_type,
                                value, session
                            )

                            rows = csv_to_list(os.path.join(config['working_dir'], "test1.csv"))

                            if 1 <= row_number <= len(rows) - 1:
                                rows[row_number].append(str(response.status_code).strip())
                            else:
                                break

                            with open(os.path.join(config['working_dir'], "test1.csv"), "w", newline="",
                                      encoding='utf-8', errors='replace') as file2:
                                writer = csv.writer(file2)
                                writer.writerows(rows)

                archive_file(config, config['file_name'])
        except Exception as e:
            logging.error(f'An error occurred: {str(e)}', exc_info=True)
        finally:
            session.close()
        time.sleep(10)


if __name__ == "__main__":
    execute_job()
