import os
import atexit
import argparse
import uuid

from flask import Flask, jsonify
from flask import request, send_file
from flask_executor import Executor

from apscheduler.schedulers.background import BackgroundScheduler

from main import main
from lib.my_logging import logging
from lib.geonetwork_helper import RecordNotFound
from lib.locker import unlock

from tools.obsolete_dataset_remover import main as obsolete_dataset_remover
from tools.field_type_detector import main as field_type_detector
from tools.queues_remover import main as queues_remover
from tools.locks_remover import main as locks_remover

from yaml import load, dump
try:
    from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
    from yaml import Loader, Dumper

# read 'n' parse the configuration
with open("config.yaml", 'r') as yamlfile:
    cfg = load(yamlfile, Loader=Loader)

cfg['rabbitmq']['host'] = os.environ['RMQ_HOST']

try:
    cfg['rabbitmq']['port'] = os.environ['RMQ_PORT']
except KeyError:
    cfg['rabbitmq']['port'] = 5672

cfg['rabbitmq']['exchange'] = os.environ['RMQ_EXCHANGE']

logging.getLogger().setLevel(os.environ['LOGLEVEL'])


scheduler = BackgroundScheduler()
scheduler.add_job(obsolete_dataset_remover, name="obsolete_dataset_remover", args=[cfg], trigger='cron', hour='4', minute='0')
#scheduler.add_job(field_type_detector, name="field_type_detector", args=[cfg], trigger='cron', hour='0', minute='0')
#scheduler.add_job(queues_remover, name="queues_remover", args=[cfg], trigger='cron', hour='6', minute='0')
#scheduler.add_job(locks_remover, name="locks_remover", args=[cfg['session']['working_directory']], trigger='cron', hour='6', minute='0')
scheduler.start()

# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())

api = Flask(__name__, static_url_path='')
api.config['EXECUTOR_PROPAGATE_EXCEPTIONS'] = True
executor = Executor(api)


@api.route("/uuid/<the_uuid>", methods=["GET"])
def _main(the_uuid):

    # force == "true" => remove any lock before triggering the indexation
    force = request.args.get('force', default = "false", type = str)
    if force == "true":
        unlock(cfg['session']['working_directory'], the_uuid)

    this_session_id = str(uuid.uuid4())

    cfg['metadata_getter']['uuids_to_get'] = [the_uuid]
    cfg['session']['id'] = this_session_id

    executor.submit(main, cfg)

    return jsonify({'id': this_session_id}), 200


if __name__ == '__main__':
    api.run(host='0.0.0.0', port=8000, debug=True)