#!/bin/bash

# Setting up COMPSs_HOME
if [ -z "${COMPSS_HOME}" ]; then
  COMPSS_HOME="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../.. && pwd )/"
fi
if [ ! "${COMPSS_HOME: -1}" = "/" ]; then
  COMPSS_HOME="${COMPSS_HOME}/"
fi
export COMPSS_HOME=${COMPSS_HOME}

# shellcheck source=../system/runtime/tracing.sh
# shellcheck disable=SC1091
source "${COMPSS_HOME}Runtime/scripts/system/runtime/tracing.sh"
# shellcheck source=../system/runtime/tracing.sh
# shellcheck disable=SC1091
source "${COMPSS_HOME}Runtime/scripts/system/runtime/adaptors.sh"
# shellcheck source=../system/runtime/scheduler.sh
# shellcheck disable=SC109
source "${COMPSS_HOME}Runtime/scripts/system/runtime/scheduler.sh"

#---------------------------------------------------------------------------------------
# SCRIPT CONSTANTS DECLARATION
#---------------------------------------------------------------------------------------
SHARED_DISK_NAME="shared_disk"
LOCAL_DISK_NAME="local_disk"

DEFAULT_SC_CFG="default"

# Next default values need to be consistent with runcompss
DEFAULT_LANGUAGE=java
DEFAULT_LIBRARY_PATH=$(pwd)
DEFAULT_APPDIR=$(pwd)
DEFAULT_CLASSPATH=$(pwd)
DEFAULT_PYTHONPATH=$(pwd)
DEFAULT_DEBUG=off
DEFAULT_DEBUG_ARG=debug
DEFAULT_COMMUNICATION_ADAPTOR=${NIO_ADAPTOR}
DEFAULT_TRACING_TASK_DEP=false
DEFAULT_CUSTOM_EXTRAE_FILE="null"
DEFAULT_CUSTOM_EXTRAE_FILE_PYTHON="null"
DEFAULT_MASTER_PORT_BASE=43000
DEFAULT_MASTER_PORT_RAND_RANGE=1000
DEFAULT_WORKER_MIN_PORT=43001
DEFAULT_WORKER_MAX_PORT=43005
DEFAULT_CPU_AFFINITY="automatic"
DEFAULT_GPU_AFFINITY="automatic"
DEFAULT_FPGA_AFFINITY="automatic"
DEFAULT_FPGA_REPROGRAM=""
DEFAULT_TASK_EXECUTION=compss
DEFAULT_STORAGE_CONF=null
DEFAULT_STREAMING="null"
DEFAULT_TRACE_GENERATION="false"
DEFAULT_TRACE_DELETE_PACKAGE="false"
BASE_STREAMING_PORT=49049
STREAMING_PORT_RAND_RANGE=100
DEFAULT_PERSISTENT_WORKER_C=false
DEFAULT_AUTOMATIC_SCALING=true
DEFAULT_PYTHON_INTERPRETER=python3
if [ -z "${VIRTUAL_ENV}" ]; then
  DEFAULT_PYTHON_VIRTUAL_ENVIRONMENT="null"
else
  DEFAULT_PYTHON_VIRTUAL_ENVIRONMENT="${VIRTUAL_ENV}"
fi
DEFAULT_PYTHON_PROPAGATE_VIRTUAL_ENVIRONMENT=true
DEFAULT_PYTHON_MPI_WORKER=false
DEFAULT_PYTHON_MEMORY_PROFILE=false
DEFAULT_PYTHON_WORKER_CACHE=false
DEFAULT_PYTHON_CACHE_PROFILER=false
DEFAULT_JUPYTER_NOTEBOOK=false
DEFAULT_JUPYTER_NOTEBOOK_PATH=$HOME
DEFAULT_IPYTHON=false
DEFAULT_SHARED_DISK_SCHEDULER=${LA_MT_SUCC_FIFO_SCHEDULER}
DEFAULT_LOCAL_DISK_SCHEDULER=${LA_MT_SUCC_LOCALITY_SCHEDULER}

#---------------------------------------------------------------------------------------
# ERROR CONSTANTS DECLARATION
#---------------------------------------------------------------------------------------

ERROR_CFG_SC="SuperComputer CFG file doesn't exist"
ERROR_CFG_Q="Queue system CFG file doesn't exist"
ERROR_MASTER_NODE="Missing master node parameter"
ERROR_WORKER_NODES="Missing worker nodes parameter"
ERROR_NUM_CPUS="Invalid number of CPUS per node"
ERROR_NUM_IO_EXECUTORS="Invalid number of IO executors per node. Only integers >= 0 allowed"
ERROR_WORKER_WD="Invalid Worker Working Dir option"
ERROR_MASTER_WD="Invalid Master Working Dir option"
ERROR_NETWORK="Invalid network option"
ERROR_WORKER_IN_MASTER_CPUS="Parameter worker_in_master_cpus is bigger than the maximum number of cpus_per_node"
ERROR_WORKER_IN_MASTER_MEMORY="Incorrect worker_in_master_memory parameter. Only disabled or <int> allowed. I.e. 33000, 66000"
ERROR_WORKER_IN_MASTER_MEMORY_TOO_HIGH="Parameter worker_in_master_memory exceeds the node_memory limit"
ERROR_WORKER_IN_MASTER_MEMORY_NOT_SPECIFIED="Parameter worker_in_master_memory is mandatory if worker_in_master_cpus is not 0"
ERROR_NODE_STORAGE_BANDWIDTH_TOO_HIGH="Parameter node_storage_bandwidth exceeds available storage bandwidth limit"
ERROR_PROLOG_ACTION="Exception executing prolog action"
ERROR_EPILOG_ACTION="Exception executing epilog action"
ERROR_LANGUAGE="Value of option --lang must be: java, c or python"
JAVA_JRE_ERROR="Can't find JVM libraries in JAVA_HOME. Please check your Java JRE Installation."

#---------------------------------------------------------------------------------------
# GENERIC HELPER FUNCTIONS
#---------------------------------------------------------------------------------------

###############################################
# Displays usage
###############################################
usage() {
  local exitValue=$1

  cat <<EOT
Usage: $0 [options] application_name application_arguments

* Options:
  General:
    --help, -h                              Print this help message

    --opts                                  Show available options

    --version, -v                           Print COMPSs version

    --sc_cfg=<name>                         SuperComputer configuration file to use. Must exist inside queues/cfgs/
                                            Mandatory
                                            Default: ${DEFAULT_SC_CFG}

    --master_node=<string>                  Node where to run the COMPSs Master
                                            Mandatory

    --worker_nodes="<string string...>"     Space separated nodes where to run the COMPSs Workers (Notice the quotes)
                                            Mandatory

  Launch configuration:
EOT

  show_opts "$exitValue"
}

###############################################
# Show Options
###############################################
show_opts() {
  local exitValue=$1

  # Load default CFG for default values
  local defaultSC_cfg="${COMPSS_HOME}Runtime/scripts/queues/supercomputers/${DEFAULT_SC_CFG}.cfg"
  # shellcheck source=../queues/supercomputers/default.cfg
  # shellcheck disable=SC1091
  source "${defaultSC_cfg}"
  local defaultQS_cfg="${COMPSS_HOME}Runtime/scripts/queues/queue_systems/${QUEUE_SYSTEM}.cfg"
  # shellcheck source=../queues/queue_systems/slurm.cfg
  # shellcheck disable=SC1091
  source "${defaultQS_cfg}"

  # Show usage
  cat <<EOT
    --cpus_per_node=<int>                   Available CPU computing units on each node
                                            Default: ${DEFAULT_CPUS_PER_NODE}
    --gpus_per_node=<int>                   Available GPU computing units on each node
                                            Default: ${DEFAULT_GPUS_PER_NODE}
    --fpgas_per_node=<int>                  Available FPGA computing units on each node
                                            Default: ${DEFAULT_FPGAS_PER_NODE}
    --io_executors=<int>                    Number of IO executors on each node
                                            Default: ${DEFAULT_IO_EXECUTORS}
    --fpga_reprogram="<string>              Specify the full command that needs to be executed to reprogram the FPGA with
                                            the desired bitstream. The location must be an absolute path.
                                            Default: ${DEFAULT_FPGA_REPROGRAM}
    --max_tasks_per_node=<int>              Maximum number of simultaneous tasks running on a node
                                            Default: ${DEFAULT_MAX_TASKS_PER_NODE}
    --node_memory=<MB>                      Maximum node memory: disabled | <int> (MB)
                                            Default: ${DEFAULT_NODE_MEMORY}
    --node_storage_bandwidth=<MB>           Maximum node storage bandwidth: <int> (MB)
                                            Default: ${DEFAULT_NODE_STORAGE_BANDWIDTH}

    --network=<name>                        Communication network for transfers: default | ethernet | infiniband | data.
                                            Default: ${DEFAULT_NETWORK}

    --prolog="<string>"                     Task to execute before launching COMPSs (Notice the quotes)
                                            If the task has arguments split them by "," rather than spaces.
                                            This argument can appear multiple times for more than one prolog action
                                            Default: Empty
    --epilog="<string>"                     Task to execute after executing the COMPSs application (Notice the quotes)
                                            If the task has arguments split them by "," rather than spaces.
                                            This argument can appear multiple times for more than one epilog action
                                            Default: Empty

    --master_working_dir=<name | path>      Working directory of the application ${LOCAL_DISK_NAME} | ${SHARED_DISK_NAME} | <path>
                                            Default: ${DEFAULT_MASTER_WORKING_DIR}
    --worker_working_dir=<name | path>      Worker directory. Use: ${LOCAL_DISK_NAME} | ${SHARED_DISK_NAME} | <path>
                                            Default: ${DEFAULT_WORKER_WORKING_DIR}

    --worker_in_master_cpus=<int>           Maximum number of CPU computing units that the master node can run as worker. Cannot exceed cpus_per_node.
                                            Default: ${DEFAULT_WORKER_IN_MASTER_CPUS}
    --worker_in_master_memory=<int> MB      Maximum memory in master node assigned to the worker. Cannot exceed the node_memory.
                                            Mandatory if worker_in_master_cpus is specified.
                                            Default: ${DEFAULT_WORKER_IN_MASTER_MEMORY}
    --worker_port_range=<min>,<max>	    Port range used by the NIO adaptor at the worker side
					    Default: ${DEFAULT_WORKER_MIN_PORT},${DEFAULT_WORKER_MAX_PORT}
    --jvm_worker_in_master_opts="<string>"  Extra options for the JVM of the COMPSs Worker in the Master Node.
                                            Each option separed by "," and without blank spaces (Notice the quotes)
                                            Default: ${DEFAULT_JVM_WORKER_IN_MASTER}
    --container_image=<path>                Runs the application by means of a container engine image
                                            Default: Empty
    --container_compss_path=<path>          Path where compss is installed in the container image
                                            Default: /opt/COMPSs
    --container_opts="<string>"             Options to pass to the container engine
                                            Default: empty
    --elasticity=<max_extra_nodes>          Activate elasticity specifiying the maximum extra nodes (ONLY AVAILABLE FORM SLURM CLUSTERS WITH NIO ADAPTOR)
                                            Default: 0
    --automatic_scaling=<bool>              Enable or disable the runtime automatic scaling (for elasticity)
                                            Default: ${DEFAULT_AUTOMATIC_SCALING}
    --jupyter_notebook=<path>,              Swap the COMPSs master initialization with jupyter notebook from the specified path.
    --jupyter_notebook                      Default: ${DEFAULT_JUPYTER_NOTEBOOK}
    --ipython                               Swap the COMPSs master initialization with ipython.
                                            Default: empty


  Runcompss configuration:

EOT

  export DEFAULT_GENERATE_TRACE="false"
  export DEFAULT_TRACING_DELETE_PACKAGES="false"

  "${COMPSS_HOME}Runtime/scripts/user/runcompss" --opts

  exit "$exitValue"
}

###############################################
# Displays version
###############################################
display_version() {
  local exitValue=$1

  "${COMPSS_HOME}Runtime/scripts/user/runcompss" --version

  exit "$exitValue"
}

###############################################
# Displays errors when treating arguments
###############################################
display_error() {
  local error_msg=$1

  echo "$error_msg"
  echo " "

  usage 1
}

###############################################
# Displays errors when executing actions
###############################################
action_error() {
  local error_msg=$1

  echo "$error_msg"
  echo " "

  exit 1
}

###############################################
# Infers the language from the Application Path
# WARN: Sets global lang variable
###############################################
infer_language() {
  local fullApp
  local fileName
  local extension
  local isPython
  local isPythonCompiled
  local isC

  fullApp=$1
  fileName=$(basename "$fullApp")
  extension=${fileName##*.}
  if [ "$extension" == "py" ] || [ "$extension" == "pyc" ]; then
    if [ "${log_level}" != "off" ]; then
       echo "[ INFO] Inferred PYTHON language"
    fi
    lang=python
  else
    # Try to infer language by file command
    isPython=$(file "$fullAppPath" | grep "Python" | cat)
    isPythonCompiled=$(file "$fullAppPath" | grep "python" | cat)
    isC=$(file "$fullAppPath" | grep "executable" | cat)
    if [ -n "$isPython" ] || [ -n "$isPythonCompiled" ]; then
      if [ "${log_level}" != "off" ]; then
	 echo "[  INFO] Inferred PYTHON language"
      fi
      lang=python
    elif [ -n "$isC" ]; then
      if [ "${log_level}" != "off" ]; then
         echo "[  INFO] Inferred C/C++ language"
      fi
      lang=c
    else
      # Lang cannot be infered or it's the default JAVA
      if [ "${log_level}" != "off" ]; then
         echo "[  INFO] Using default language: ${DEFAULT_LANGUAGE}"
      fi
      lang=${DEFAULT_LANGUAGE}
    fi
  fi
}

###############################################
# Loads the tracing environment
###############################################
load_tracing_env() {
  local module_tmp
  if [[ "$OSTYPE" == "darwin"* ]]; then
    module_tmp=$(mktemp -t /tmp)
  else
    module_tmp=$(mktemp -p /tmp)
  fi
  module list &> "${module_tmp}"
  if [ -z "${EXTRAE_HOME}" ]; then
     # Look for openmpi / impi / none
     impi=$(grep -i "impi" "${module_tmp}" | cat)
     openmpi=$(grep -i "openmpi" "${module_tmp}" | cat)

     if [ ! -z "$impi" ] && [ -d "${COMPSS_HOME}/Dependencies/extrae-impi/" ]; then
       # Load Extrae IMPI
       export EXTRAE_HOME=${COMPSS_HOME}/Dependencies/extrae-impi/
     elif [ ! -z "$openmpi" ] && [ -d "${COMPSS_HOME}/Dependencies/extrae-openmpi/" ]; then
       # Load Extrae OpenMPI
       export EXTRAE_HOME=${COMPSS_HOME}/Dependencies/extrae-openmpi/
     else
       # Load sequential extrae
       export EXTRAE_HOME=${COMPSS_HOME}/Dependencies/extrae/
     fi
  fi

  # Clean tmp file
  rm -f "${module_tmp}"
}

###############################################
# Creates Worker CMD
###############################################
worker_cmd() {
  # WARNING: SETS GLOBAL SCRIPT VARIABLE WCMD
  local nodeId=$1
  local nodeName=$2
  local master_name=$3
  local jvm_opts_size=$4
  local jvm_opts_str=$5
  local fpga_reprog_size=$6
  local fpga_reprog_str=$7
  local cusCPU=$8
  local cusGPU=$9
  local cusFPGA=${10}
  local cpuMap=${11}
  local gpuMap=${12}
  local fpgaMap=${13}
  local ioExec=${14}
  local lot=${15}
  local streaming_backend=${16}
  local streaming_master_port=${17}

  if [ -z "${streaming_master_port}" ] || [ "${streaming_master_port}" == "null" ]; then
    # Value will be ignored, just add a random number so that the parser does not complain
    streaming_master_port=44000
  fi


  local maxSend=5
  local maxReceive=5
  local worker_port=${worker_min_port}

  if [ -z "${master_name}" ]; then
     master_name="null"
  fi

  if [ -z "${container_image}" ]; then
     WCMD="${COMPSS_HOME}/Runtime/scripts/system/adaptors/nio/persistent_worker_starter.sh"
  else
     WCMD="singularity exec $container_opts $container_image ${cont_COMPSS_HOME}/Runtime/scripts/system/adaptors/nio/persistent_worker_starter.sh"
  fi

  if [ ! -z "${NODE_NAME_XML}" ]; then
     master_name=$(${NODE_NAME_XML} "${master_name}")
     nodeName=$(${NODE_NAME_XML} "${nodeName}")
  fi

  local sandbox_worker_working_dir=${worker_working_dir}/${uuid}/${nodeName}

  WCMD="$WCMD ${env_script} \
            ${library_path} \
            ${appdir} \
            ${cp} \
            ${streaming_backend} \
            ${jvm_opts_size} \
            ${jvm_opts_str} \
            ${fpga_reprog_size} \
            ${fpga_reprog_str} \
            ${debug} \
            ${maxSend} \
            ${maxReceive} \
            ${nodeName} \
            ${worker_port} \
            ${master_name} \
            ${master_port} \
            ${streaming_master_port} \
            ${cusCPU} \
            ${cusGPU} \
            ${cusFPGA} \
            ${cpuMap} \
            ${gpuMap} \
            ${fpgaMap} \
            ${ioExec} \
            ${lot} \
            ${uuid} \
            ${lang} \
            ${sandbox_worker_working_dir} \
            ${worker_install_dir} \
            ${appdir} \
            ${library_path} \
            ${cp} \
            ${pythonpath} \
            ${tracing} \
            ${custom_extrae_file} \
            ${nodeId} \
            ${tracing_task_dependencies} \
            ${storage_conf} \
            ${taskExecution} \
            ${persistent_worker_c} \
            ${python_interpreter} \
            ${python_version} \
            ${DEFAULT_PYTHON_VIRTUAL_ENVIRONMENT} \
            ${python_propagate_virtual_environment} \
            ${custom_extrae_config_file_python} \
            ${python_mpi_worker} \
            ${python_worker_cache} \
            ${python_cache_profiler}"
}

###############################################
# Create Master CMD
###############################################
master_cmd() {
  local master_name=$1

  if [ -z "${container_image}" ]; then
    MCMD="${COMPSS_HOME}/Runtime/scripts/user/runcompss"
  else
    MCMD="singularity exec $container_opts $container_image ${cont_COMPSS_HOME}/Runtime/scripts/user/runcompss"
  fi
 
  # WARNING: SETS GLOBAL SCRIPT VARIABLE MCMD
  MCMD="$MCMD --project=${PROJECT_FILE} \
            --resources=${RESOURCES_FILE} \
            --uuid=${uuid} \
            --specific_log_dir=${specific_log_dir} \
            --master_name=${master_name} \
            --master_port=${master_port} \
            --streaming=${streaming} \
            --streaming_master_port=${streaming_master_port} \
            --jvm_master_opts=${jvm_master_opts} \
            --trace_label=${trace_label} \
            --jvm_workers_opts=${jvm_workers_opts}"
  if [ -n "${master_working_dir}" ]; then
    MCMD="$MCMD --master_working_dir=${master_working_dir}"
  fi
}

export_jvm_library(){
  local libjava
  # Look for the JVM Library
    if [ -d "${JAVA_HOME}/jre/lib/" ]; then #Java 8 case
      libjava=$(find "${JAVA_HOME}"/jre/lib/ -name libjvm.so | head -n 1)
      if [ -z "$libjava" ]; then
        libjava=$(find "${JAVA_HOME}"/jre/lib/ -name libjvm.dylib | head -n 1)
        if [ -z "$libjava" ]; then
          action_error "${JAVA_JRE_ERROR}"
        fi
      fi
    else # Java 9+
      libjava=$(find "${JAVA_HOME}"/lib/ -name libjvm.so | head -n 1)
      if [ -z "$libjava" ]; then
        libjava=$(find "${JAVA_HOME}"/lib/ -name libjvm.dylib | head -n 1)
        if [ -z "$libjava" ]; then
          action_error "${JAVA_JRE_ERROR}"
        fi
      fi
    fi
    local libjavafolder=$(dirname "$libjava")
    export LD_LIBRARY_PATH=$libjavafolder:${LD_LIBRARY_PATH}
}

#---------------------------------------------------------------------------------------
# XML HELPER FUNCTIONS (PROJECT.XML)
#---------------------------------------------------------------------------------------

###############################################
# Setup to generate project.xml
###############################################
xml_project_setup() {
  # Shared disks information
  shared_disks_names=""
  shared_disks_info=""
  if [ -n "${SHARED_DISK_PREFIX}" ]; then
    shared_disks_names="${SHARED_DISK_NAME}"
    shared_disks_info="${SHARED_DISK_NAME}=${SHARED_DISK_PREFIX}"
  fi
  if [ -n "${SHARED_DISK_2_PREFIX}" ] && [ "${SHARED_DISK_2_PREFIX}" != "${SHARED_DISK_PREFIX}" ]; then
    shared_disks_names="${shared_disks_names} ${SHARED_DISK_NAME}2"
    shared_disks_info="${shared_disks_info} ${SHARED_DISK_NAME}2=${SHARED_DISK_2_PREFIX}"
  fi

  # Init project file
  init "${PROJECT_FILE}"
}

###############################################
# Add worker in master to project.xml
###############################################
xml_project_add_worker_in_master() {
  # Add worker in master if defined
  if [ "${worker_in_master_cpus}" -ne 0 ]; then
    local master_node_name
    # Rename master node if defined
    if [ -n "${NODE_NAME_XML}" ]; then
      master_node_name=$(${NODE_NAME_XML} "${master_node}" "${network}")
    elif [ -n "${NODE_NAME_QUEUE}" ]; then
      master_node_name=$(${NODE_NAME_QUEUE} "${master_node}")
    else
      master_node_name=${master_node}
    fi
    # Create working dirs
    if [ -z "${CREATE_WORKING_DIRS}" ] || [ "${CREATE_WORKING_DIRS}" == "true" ]; then
      CMD="${LAUNCH_CMD} ${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${master_node} ${CMD_SEPARATOR}mkdir -m 755 -p ${worker_working_dir}${CMD_SEPARATOR}"
      if [ "${log_level}" == "debug" ] || [ "${log_level}" == "trace" ]; then
         echo "[LAUNCH_COMPSS] Worker WD mkdir: ${CMD}"
      fi
      # shellcheck disable=SC2086
      $CMD
    fi
    # Add compute node
    master_node_name=${master_node_name}${network}
    add_compute_node "${master_node_name}" "${worker_install_dir}" "${worker_working_dir}" "NULL" "NULL" "${library_path}" "NULL" "NULL" "${max_tasks_per_node}"
  fi
}

###############################################
# Add workers to project.xml
###############################################
xml_project_add_workers() {
  # Add workers
  for worker_node in ${worker_nodes}; do
    local worker_node_name
    if [ ! -z "${NODE_NAME_XML}" ]; then
      worker_node_name=$(${NODE_NAME_XML} "${worker_node}" "${network}")
    elif [ -n "${NODE_NAME_QUEUE}" ]; then
      worker_node_name=$(${NODE_NAME_QUEUE} "${worker_node}")
    else
      worker_node_name=${worker_node}
    fi
    # Create working dirs
    if [ -z "${CREATE_WORKING_DIRS}" ] || [ "${CREATE_WORKING_DIRS}" == "true" ]; then
      CMD="${LAUNCH_CMD} ${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${worker_node} ${CMD_SEPARATOR}mkdir -m 755 -p ${worker_working_dir}${CMD_SEPARATOR}"
      if [ "${log_level}" == "debug" ] || [ "${log_level}" == "trace" ]; then
         echo "Worker WD mkdir: ${CMD}"
      fi
      # shellcheck disable=SC2086
      ${CMD}
    fi
    # Add compute node
    worker_node_name=${worker_node_name}${network}
    add_compute_node "${worker_node_name}" "${worker_install_dir}" "${worker_working_dir}" "NULL" "NULL" "${library_path}" "NULL" "NULL" "${max_tasks_per_node}"
  done
}

###############################################
# Add heterogeneity to project.xml
###############################################
xml_project_add_elasticity() {
  # Add elasticity if defined
  if [ -n "${elasticity}" ]; then
    local jvm_workers_opts_str
    local jvm_workers_opts_size
    local fpga_reprogram_str
    local fpga_reprogram_size
    jvm_workers_opts_str=$(echo "${jvm_workers_opts}" | tr "," " ")
    jvm_workers_opts_size=$(echo "${jvm_workers_opts_str}" | wc -w)
    fpga_reprogram_str=$(echo "${fpga_prog}" | tr "," " ")
    fpga_reprogram_size=$(echo "${fpga_prog}" | wc -w)

    local props
    props="master_name=${master_node}"
    props="$props master_port=${master_port}"
    props="$props worker_debug=${debug}"
    props="$props jvm_opts_size=${jvm_workers_opts_size}"
    props="$props jvm_opts_str=${jvm_workers_opts_str}"
    props="$props fpga_reprogram_size=${fpga_reprogram_size}"
    props="$props fpga_reprogram_str=${fpga_reprogram_str}"
    props="$props network=${network}"
    props="$props estimated-creation-time=10"
    if [ -z "${container_image}" ]; then
      props="$props slurm_over_ssh=false"
    else
      props="$props slurm_over_ssh=true"
    fi
    if [ -n "${queue}" ]; then
      props="$props queue=${queue}"
    fi
    if [ -n "${reservation}" ]; then
      props="$props reservation=${reservation}"
    fi
    if [ -n "${qos}" ]; then
      props="$props qos=${qos}"
    fi
    if [ -n "${constraints}" ]; then
     constraints_prop=$(echo ${constraints} | tr " " "#" | tr "=" "%")
     props="$props constraints=${constraints_prop}"
    fi
    if [ -n "${licenses}" ]; then
     props="$props licenses=${licenses}"
    fi
    if [ -n "${container_opts}" ]; then
      container_opts_prop=$(echo ${container_opts} | tr " " "#" | tr "=" "%")
      props="$props container_opts=${container_opts_prop}"
    fi
    if [ -n "${cpu_affinity}" ]; then
      props="$props cpu_affinity=${cpu_affinity}"
    fi
    if [ -n "${automatic_scaling}" ]; then
      props="$props automatic-scaling=${automatic_scaling}"
    fi
    if [ -n "${ELASTICITY_BATCH}" ]; then
      props="$props batch_job=${ELASTICITY_BATCH}"
    fi
    if [ -n "${cpus_per_task}" ] && [ "${cpus_per_task}" == "true" ]; then
            if [ -n "${cpus_per_node}" ] && [ "${cpus_per_node}" != "0" ]; then
                AUXILIAR_LAUNCH_PARAMS="${QARG_CPUS_PER_TASK} ${cpus_per_node} "
            fi
        fi
     launch_command=$(echo "${LAUNCH_CMD} ${AUXILIAR_LAUNCH_PARAMS}${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}" | tr " " "#" | tr "=" "%")
     props="$props launch_command=${launch_command}"

    add_cloud 0 0 "${elasticity}" "SLURM-Cluster" "${props}" "${container_image}" "${worker_install_dir}" "${worker_working_dir}" "NULL" "${appdir:-NULL}" "${library_path:-NULL}" "${cp:-NULL}" "${pythonpath:-NULL}" "${max_tasks_per_node}" "default"
  fi
}

###############################################
# Create complete project.xml
###############################################
create_xml_project() {
  #
  # GENERATE PROJECT
  #
  # Setup project variables and scripts
  xml_project_setup

  # Add header (from generate_project.sh)
  add_header

  # Add master information (from generate_project.sh)
  add_master_node "0" "0" "0" "NULL" "${shared_disks_info}"

  # Add workers
  xml_project_add_worker_in_master
  xml_project_add_workers

  # Add elasticity
  xml_project_add_elasticity

  # Close project (from generate_project.sh)
  add_footer
}


#---------------------------------------------------------------------------------------
# XML HELPER FUNCTIONS (RESOURCES.XML)
#---------------------------------------------------------------------------------------

###############################################
# Setup to generate resources.xml
###############################################
xml_resources_setup() {
  # Shared disks information
  shared_disks_names=""
  shared_disks_info=""
  if [ -n "${SHARED_DISK_PREFIX}" ]; then
    shared_disks_names="${SHARED_DISK_NAME}"
    shared_disks_info="${SHARED_DISK_NAME}=${SHARED_DISK_PREFIX}"
  fi
  if [ -n "${SHARED_DISK_2_PREFIX}" ] && [ "${SHARED_DISK_2_PREFIX}" != "${SHARED_DISK_PREFIX}" ]; then
    shared_disks_names="${shared_disks_names} ${SHARED_DISK_NAME}2"
    shared_disks_info="${shared_disks_info} ${SHARED_DISK_NAME}2=${SHARED_DISK_2_PREFIX}"
  fi

  # Init resources file
  init "${RESOURCES_FILE}"
}

###############################################
# Add worker in master to resources.xml
###############################################
xml_resources_add_worker_in_master() {
  # Add worker in master if defined
  if [ "${worker_in_master_cpus}" -ne 0 ]; then
    local master_node_name
    # Rename master node if defined
    if [ -n "${NODE_NAME_XML}" ]; then
      master_node_name=$(${NODE_NAME_XML} "${master_node}" "${network}")
    elif [ -n "${NODE_NAME_QUEUE}" ]; then
      master_node_name=$(${NODE_NAME_QUEUE} "${master_node}")
    else
      master_node_name=${master_node}
    fi
    # Add compute node
    master_node_name=${master_node_name}${network}
    add_compute_node "${master_node_name}" "${worker_in_master_cpus}" "${gpus_per_node}" "${fpgas_per_node}" "${worker_in_master_memory}" "${node_storage_bandwidth}" "${worker_min_port}" "${worker_max_port}" "${REMOTE_EXECUTOR:-NULL}" "${shared_disks_info}"
  fi
}

###############################################
# Add workers to resources.xml
###############################################
xml_resources_add_workers() {
  # Add workers
  for worker_node in ${worker_nodes}; do
    local worker_node_name
    if [ ! -z "${NODE_NAME_XML}" ]; then
      worker_node_name=$(${NODE_NAME_XML} "${worker_node}" "${network}")
    elif [ -n "${NODE_NAME_QUEUE}" ]; then
      worker_node_name=$(${NODE_NAME_QUEUE} "${worker_node}")
    else
      worker_node_name=${worker_node}
    fi
    # Add compute node
    worker_node_name=${worker_node_name}${network}
    add_compute_node "${worker_node_name}" "${cpus_per_node}" "${gpus_per_node}" "${fpgas_per_node}" "${node_memory}" "${node_storage_bandwidth}" "${worker_min_port}" "${worker_max_port}" "${REMOTE_EXECUTOR:-NULL}" "${shared_disks_info}"
  done
}

###############################################
# Add heterogeneity to resources.xml
###############################################
xml_resources_add_elasticity() {
  # Add elasticity if defined
  if [ -n "${elasticity}" ]; then
    local instance_types="default:${cpus_per_node}:${gpus_per_node}:${fpgas_per_node}:${node_memory}:1:0.085"
    add_cloud "SLURM-Cluster" "NULL" "slurm-conn.jar" "es.bsc.conn.slurm.SlurmConnector" "${container_image}" "${shared_disks_info:-NULL}" "10" "${worker_min_port}" "${worker_max_port}" "${REMOTE_EXECUTOR:-NULL}" "${instance_types}"
  fi
}

###############################################
# Create complete resources.xml
###############################################
create_xml_resources() {
  #
  # GENERATE RESOURCES FILE
  #

  # Setup resources variables and scripts
  xml_resources_setup

  # Add header (from generate_resources.sh)
  add_header

  # Add shared disks (from generate_resources.sh)
  add_shared_disks "${shared_disks_names}"

  # Add workers
  xml_resources_add_worker_in_master
  xml_resources_add_workers

  # Add elasticity
  xml_resources_add_elasticity

  # Close resources (from generate_resources.sh)
  add_footer
}


#---------------------------------------------------------------------------------------
# XML MAIN GENERATION FUNCTIONS
#---------------------------------------------------------------------------------------

###############################################
# Create XML files
###############################################
create_xml_files() {
  if [ "${log_level}" != "off" ]; then
     echo "[LAUNCH_COMPSS] XML - Begin generation of project and resources files"
     # Log Master Node
     echo "[LAUNCH_COMPSS] Master will run in ${master_node}"
     # Log List of Workers
     echo "[LAUNCH_COMPSS] List of workers: ${worker_nodes}"
  fi
  local sec
  sec=$(date +%s)

  # Generate project file
  # shellcheck source=../system/xmls/generate_project.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_project.sh"
  PROJECT_FILE=${worker_working_dir}/project_$sec.xml
  if [ "${log_level}" != "off" ]; then
     echo "[LAUNCH_COMPSS] XML - Generating project at ${PROJECT_FILE}"
  fi
  export PROJECT_FILE
  create_xml_project

  # Generate resources file
  if [ -n "$resources" ]; then
    # Custom user resources.xml file
    if [ "${log_level}" != "off" ]; then
        echo "[LAUNCH_COMPSS] XML - Using user-defined resources.xml file at ${resources}"
    fi
    RESOURCES_FILE=${resources}
  else
    # Generate file
    # shellcheck source=../system/xmls/generate_resources.sh
    # shellcheck disable=SC1091
    source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_resources.sh"
    RESOURCES_FILE=${worker_working_dir}/resources_$sec.xml
    if [ "${log_level}" != "off" ]; then
        echo "[LAUNCH_COMPSS] XML - Generating resources at ${RESOURCES_FILE}"
    fi
    export RESOURCES_FILE
    create_xml_resources
  fi

  # LOG AND FINISH
  if [ "${log_level}" != "off" ]; then
     echo "[LAUNCH_COMPSS] XML - Generation of project and resources files finished"
     echo "[LAUNCH_COMPSS] XML - Project.xml:   ${PROJECT_FILE}"
     echo "[LAUNCH_COMPSS] XML - Resources.xml: ${RESOURCES_FILE}"
  fi
}

###############################################
# Initialize Heterogeneous XML files
###############################################
init_het_xml_files() {
  local suffix=$1

  if [ "${log_level}" != "off" ]; then
      # Log Master Node
      echo "[LAUNCH_COMPSS] Master will run in ${master_node}"
      # Log List of Workers
      echo "[LAUNCH_COMPSS] List of workers: ${worker_nodes}"

      # LOG
      echo "[LAUNCH_COMPSS] HT-XML - Generating project and resources files"
  fi
  # GENERATE PROJECT FILE
  # shellcheck source=../system/xmls/generate_project.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_project.sh"
  PROJECT_FILE=${PWD}/project_$suffix.xml
  export PROJECT_FILE
  if [ "${log_level}" != "off" ]; then
    echo "[LAUNCH_COMPSS] HT-XML - Project.xml:   ${PROJECT_FILE}"
  fi
  # Setup project variables and scripts
  xml_project_setup
  # Add header (from generate_project.sh)
  add_header
  # Add master information (from generate_project.sh)
  add_master_node "0" "0" "0" "NULL" "${shared_disks_info}"
  # Add workers
  xml_project_add_worker_in_master
  xml_project_add_workers
  # Add elasticity
  if [ -n "${elasticity}" ]; then
    echo " WARNING: Heterogeneity with elasticity not yet supported !"
  fi

  # GENERATE RESOURCES FILE
  # shellcheck source=../system/xmls/generate_resources.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_resources.sh"
  RESOURCES_FILE=${PWD}/resources_$suffix.xml
  export RESOURCES_FILE
  if [ "${log_level}" != "off" ]; then
    echo "[LAUNCH_COMPSS] HT-XML Resources.xml: ${RESOURCES_FILE}"
  fi
  # Setup resources variables and scripts
  xml_resources_setup
  # Add header (from generate_resources.sh)
  add_header
  # Add shared disks (from generate_resources.sh)
  add_shared_disks "${shared_disks_names}"
  # Add workers
  xml_resources_add_worker_in_master
  xml_resources_add_workers
  # Add elasticity
  if [ -n "${elasticity}" ]; then
    echo " WARNING: Heterogeneity with elasticity not yet supported !"
  fi
}

###############################################
# Add worker nodes to Heterogeneous XML files
###############################################
add_het_xml_files() {
  local suffix=$1
  if [ "${log_level}" != "off" ]; then
    # LOG
    echo "[LAUNCH_COMPSS] HT-XML - Continue generating project and resources files"
    echo "[LAUNCH_COMPSS] HT-XML - List of workers: ${worker_nodes}"
  fi
  # GENERATE PROJECT FILE
  # shellcheck source=../system/xmls/generate_project.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_project.sh"
  PROJECT_FILE=${PWD}/project_$suffix.xml
  export PROJECT_FILE
  if [ "${log_level}" != "off" ]; then
    echo "[LAUNCH_COMPSS] HT-XML - Project.xml:   ${PROJECT_FILE}"
  fi
  # Setup project variables and scripts
  xml_project_setup
  # Add workers
  xml_project_add_workers

  # GENERATE RESOURCES FILE
  # shellcheck source=../system/xmls/generate_resources.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_resources.sh"
  RESOURCES_FILE=${PWD}/resources_$suffix.xml
  export RESOURCES_FILE
  if [ "${log_level}" != "off" ]; then
    echo "[LAUNCH_COMPSS] HT-XML - Resources.xml: ${RESOURCES_FILE}"
  fi
  # Setup resources variables and scripts
  xml_resources_setup
  # Add workers
  xml_resources_add_workers
}

###############################################
# Finish Heterogeneous XML files
###############################################
fini_het_xml_files() {
  local suffix=$1
  if [ "${log_level}" != "off" ]; then
     # LOG
     echo "[LAUNCH_COMPSS] HT-XML - Finishing generation of project and resources files"
     echo "[LAUNCH_COMPSS] HT-XML - Project.xml:   ${PROJECT_FILE}"
     echo "[LAUNCH_COMPSS] HT-XML - Resources.xml: ${RESOURCES_FILE}"
  fi

  # GENERATE PROJECT FILE
  # shellcheck source=../system/xmls/generate_project.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_project.sh"
  PROJECT_FILE=${PWD}/project_$suffix.xml
  export PROJECT_FILE
  if [ "${log_level}" != "off" ]; then
     echo "[LAUNCH_COMPSS] HT-XML - Project.xml:   ${PROJECT_FILE}"
  fi

  # Setup project variables and scripts
  xml_project_setup
  # Close project (from generate_project.sh)
  add_footer

  # GENERATE RESOURCES FILE
  # shellcheck source=../system/xmls/generate_resources.sh
  # shellcheck disable=SC1091
  source "${COMPSS_HOME}Runtime/scripts/system/xmls/generate_resources.sh"
  RESOURCES_FILE=${PWD}/resources_$suffix.xml
  export RESOURCES_FILE
  if [ "${log_level}" != "off" ]; then
     echo "[LAUNCH_COMPSS] HT-XML - Resources.xml: ${RESOURCES_FILE}"
  fi

  # Setup resources variables and scripts
  xml_resources_setup
  # Close resources (from generate_resources.sh)
  add_footer

  # LOG
  if [ "${log_level}" != "off" ]; then
     # LOG
     echo "[LAUNCH_COMPSS] HT-XML -Generation of project and resources files finished"
  fi
}


#---------------------------------------------------------------------------------------
# MAIN FUNCTIONS
#---------------------------------------------------------------------------------------

###############################################
# Function to get the arguments
###############################################
get_args() {
  # Avoid enqueue if there is no application
  if [ $# -eq 0 ]; then
    usage 1
  fi

  # Parse COMPSs Options
  while getopts hvgtmdp-: flag; do
    # Treat the argument
    case "$flag" in
      h)
        # Display help
        usage 0
        ;;
      v)
        # Display version
        display_version 0
        ;;
      d)
	      log_level=${DEFAULT_DEBUG_ARG}
        #Keep it for runcompss (to add them to master)
        args_pass="$args_pass -$flag"
        ;;
      t)
        tracing=${TRACING_ENABLED}
        #Keep it for runcompss (to add them to master)
        args_pass="$args_pass -$flag"
        ;;
      -)
      # Check more complex arguments
      case "$OPTARG" in
        help)
          # Display help
          usage 0
          ;;
        version)
          # Display compss version
          display_version 0
          ;;
        opts)
          # Display options
          show_opts 0
          ;;
        master_node=*)
	  if [ -z "${master_node}" ]; then
             master_node=${OPTARG//master_node=/}
          else
             action_error "Duplicated master_node parameter. Please check command arguments."
	  fi
          ;;
        worker_nodes=*)
	  if [ -z "${worker_nodes}" ]; then
             worker_nodes=${OPTARG//worker_nodes=/}
	  else
             action_error "Duplicated worker_nodes parameter. Please check command arguments."
          fi
          ;;
        resources=*)
          resources=${OPTARG//resources=/}
          ;;
        sc_cfg=*)
          sc_cfg=${OPTARG//sc_cfg=/}
          ;;
        cpus_per_node=*)
          cpus_per_node=${OPTARG//cpus_per_node=/}
          ;;
        gpus_per_node=*)
          gpus_per_node=${OPTARG//gpus_per_node=/}
          ;;
        fpgas_per_node=*)
          fpgas_per_node=${OPTARG//fpgas_per_node=/}
          ;;
        io_executors=*)
          io_executors=${OPTARG//io_executors=/}
          ;;
        max_tasks_per_node=*)
          max_tasks_per_node=${OPTARG//max_tasks_per_node=}
          ;;
        cpu_affinity=*)
          cpu_affinity=${OPTARG//cpu_affinity=}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        gpu_affinity=*)
          gpu_affinity=${OPTARG//gpu_affinity=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        fpga_affinity=*)
          fpga_affinity=${OPTARG//fpga_affinity=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        fpga_reprogram=*)
          fpga_prog=${OPTARG//fpga_reprogram=/}
          ;;
        master_working_dir=*)
          master_working_dir=${OPTARG//master_working_dir=/}
          ;;
        worker_working_dir=*)
          worker_working_dir=${OPTARG//worker_working_dir=/}
          ;;
        worker_in_master_cpus=*)
          worker_in_master_cpus=${OPTARG//worker_in_master_cpus=/}
          ;;
        worker_in_master_memory=*)
          worker_in_master_memory=${OPTARG//worker_in_master_memory=/}
          ;;
        node_memory=*)
          node_memory=${OPTARG//node_memory=/}
          ;;
        node_storage_bandwidth=*)
          node_storage_bandwidth=${OPTARG//node_storage_bandwidth=/}
          ;;
        network=*)
          network=${OPTARG//network=/}
          ;;
        lang=*)
          lang=${OPTARG//lang=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        streaming=*)
          streaming=${OPTARG//streaming=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        streaming_master_port=*)
          streaming_master_port=${OPTARG//streaming_master_port=/}
          # Keep the argument because we will overwrite it
          ;;
        library_path=*)
          library_path=${OPTARG//library_path=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        classpath=*)
          cp=${OPTARG//classpath=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        pythonpath=*)
          pythonpath=${OPTARG//pythonpath=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        appdir=*)
          appdir=${OPTARG//appdir=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
	env_script=*)
          if [ -z "${env_script}" ]; then
                env_script=${OPTARG//env_script=/}
          else
                env_script="${env_script}:${OPTARG//env_script=/}"
          fi
	  # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        log_level=*)
          log_level=${OPTARG//log_level=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        debug)
          log_level=${DEFAULT_DEBUG_ARG}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        tracing=*)
          tracing=${OPTARG//tracing=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        tracing)
          tracing=${TRACING_ENABLED}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        extrae_config_file=*)
          custom_extrae_file=${OPTARG//extrae_config_file=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        extrae_config_file_python=*)
          custom_extrae_config_file_python=${OPTARG//extrae_config_file_python=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        trace_label=*)
          trace_label=${OPTARG//trace_label=/}
          ;;
        tracing_task_dependencies=*)
          # Activates the communication tracing of task dependencies for the first worker node
          tracing_task_dependencies=${OPTARG//tracing_task_dependencies=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        generate_trace=*)
          # Specify if the tracing system merges the events in a trace file
          tracing_generate_trace=${OPTARG//generate_trace=/}
          args_pass="$args_pass --$OPTARG"
          ;;
        delete_trace_packages=*)
          # Specify if the tracing system re-organizes the threads
          tracing_delete_packages=${OPTARG//delete_trace_packages=/}
          args_pass="$args_pass --$OPTARG"
          ;;
        custom_threads=*)
          # Specify if the tracing system re-organizes the threads
          tracing_custom_threads=${OPTARG//custom_threads=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        comm=*)
          comm=${OPTARG//comm=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        jvm_master_opts=*)
          jvm_master_opts=${OPTARG//jvm_master_opts=/}
          ;;
        jvm_workers_opts=*)
          jvm_workers_opts=${OPTARG//jvm_workers_opts=/}
          ;;
        jvm_worker_in_master_opts=*)
          jvm_worker_in_master_opts=${OPTARG//jvm_worker_in_master_opts=/}
          ;;
        storage_conf=*)
          storage_conf=${OPTARG//storage_conf=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        task_execution=*)
          taskExecution=${OPTARG//task_execution=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        persistent_worker_c=*)
          persistent_worker_c=${OPTARG//persistent_worker_c=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_interpreter=*)
          python_interpreter=${OPTARG//python_interpreter=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_propagate_virtual_environment=*)
          python_propagate_virtual_environment=${OPTARG//python_propagate_virtual_environment=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_mpi_worker=*)
          python_mpi_worker=${OPTARG//python_mpi_worker=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_memory_profile)
          python_memory_profile=true
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_worker_cache=*)
          python_worker_cache=${OPTARG//python_worker_cache=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        python_cache_profiler=*)
          python_cache_profiler=${OPTARG//python_cache_profiler=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        scheduler=*)
          scheduler=${OPTARG//scheduler=/}
          # Keep it for runcompss (to add them to master)
          args_pass="$args_pass --$OPTARG"
          ;;
        prolog=*)
          action=${OPTARG//prolog=/}
          prologActions[numPrologActions]="$action"
          numPrologActions=$((numPrologActions + 1))
          ;;
        epilog=*)
          action=${OPTARG//epilog=/}
          epilogActions[numEpilogActions]="$action"
          numEpilogActions=$((numEpilogActions + 1))
          ;;
        uuid=*)
          # Modified for heterogeneity
          uuid=${OPTARG//uuid=/}
          ;;
        specific_log_dir=*)
          # Specific log dir is automatically generated by launch_compss.sh. Remove it from COMPSs flags
          echo "WARNING: specific_log_dir is automatically generated. Omitting parameter. Define log_dir instead."
          ;;
        log_dir=*)
          # Log dir is automatically generated by launch_compss.sh. Remove it from COMPSs flags
          # echo "WARNING: log_dir is automatically generated. Omitting parameter"
          log_dir=${OPTARG//log_dir=/}
          ;;
        base_log_dir=*)
          log_dir=${OPTARG//base_log_dir=/}
          echo "WARNING: base_log_dir is deprecated. Please, change it to log_dir."
          ;;
        master_name=*)
          # Master name is automatically generated by launch_compss.sh. Remove it from COMPSs flags
          echo "WARNING: master_name is automatically generated. Omitting parameter"
          ;;
        master_port=*)
          # Remove from runcompss since launcher will add it
          master_port=${OPTARG//master_port=/}
          ;;
	      worker_port_range=*)
          # Remove from runcompss since launcher will add it
          worker_port_range=${OPTARG//worker_port_range=/}
          ;;
        container_image=*)
          container_image=${OPTARG//container_image=/}
          ;;
        container_compss_path=*)
          container_compss_path=${OPTARG//container_compss_path=/}
          ;;
        container_opts=*)
          container_opts=${OPTARG//container_opts=/}
          ;;
        elasticity=*)
          elasticity=${OPTARG//elasticity=/}
          args_pass="$args_pass --conn=es.bsc.compss.connectors.DefaultNoSSHConnector"
          ;;
        automatic_scaling=*)
          automatic_scaling=${OPTARG//automatic_scaling=/}
          ;;
        queue=*)
          # Added for elasticity
          queue=${OPTARG//queue=/}
          ;;
        reservation=*)
          # Added for elasticity
          reservation=${OPTARG//reservation=/}
          ;;
        qos=*)
          # Added for elasticity
          qos=${OPTARG//qos=/}
          ;;
        forward_cpus_per_node=*)
          cpus_per_task=${OPTARG//forward_cpus_per_node=/}
          ;;
        constraints=*)
          # Added for elasticity
          constraints=${OPTARG//constraints=/}
          ;;
        licenses=*)
          # Added for lenox/slurm
          licenses=${OPTARG//licenses=/}
          ;;
        jupyter_notebook)
          jupyter_notebook=true
          jupyter_notebook_path=${DEFAULT_JUPYTER_NOTEBOOK_PATH}
          ;;
        jupyter_notebook=*)
          jupyter_notebook=true
          jupyter_notebook_path=${OPTARG//jupyter_notebook=/}
          ;;
        ipython)
          ipython=true
          ;;
        command)
          run_command=true
          ;;
        xmls_phase=*)
          # Added for heterogeneity
          xmls_phase=${OPTARG//xmls_phase=/}
          ;;
        xmls_suffix=*)
          # Added for heterogeneity
          xmls_suffix=${OPTARG//xmls_suffix=/}
          ;;
        initial_hostid=*)
          # Added for tracing in heterogeneity
          initial_hostid=${OPTARG//initial_hostid=/}
          ;;
        gen_coredump)
          # Generate the coredump
          gen_coredump=true
                args_pass="$args_pass --$OPTARG"
          ;;
        keep_workingdir)
          #Keep the working dir after execution
          keep_wd=true
          args_pass="$args_pass --$OPTARG"
          ;;
        *)
          # Flag didn't match any patern. Add to COMPSs
          args_pass="$args_pass --$OPTARG"
          ;;
      esac
      ;;
    *)
      # Flag didn't match any patern. End of COMPSs flags
      args_pass="$args_pass -$flag"
      ;;
    esac
  done

  # Shift COMPSs arguments
  shift $((OPTIND-1))

  # Wrap full app path for lang inference
  fullAppPath=$1

  if [ -z "${tracing_generate_trace}" ]; then
    args_pass="$args_pass --generate_trace=${DEFAULT_TRACE_GENERATION}"
  fi

  if [ -z "${tracing_delete_packages}" ]; then
    args_pass="$args_pass --delete_trace_packages=${DEFAULT_TRACE_DELETE_PACKAGE}"
  fi
  # Pass application name and args
  args_pass="$args_pass $*"
  command_to_run="$*"
}

###############################################
# Function to check the arguments
###############################################
check_args() {
  #
  # SC Configuration checks
  #
  # Check sc configuration argument
  if [ -z "${sc_cfg}" ]; then
    sc_cfg=${DEFAULT_SC_CFG}
  fi
  if [ -f "${sc_cfg}" ]; then
     #sc_cfg is a file
     local scCfgFullPath=${sc_cfg}
  else
     #if not check if it is one of the already installed
     if [[ ${sc_cfg} != *cfg ]]; then
        # Add cfg suffix
        sc_cfg=${sc_cfg}.cfg
     fi

     local scCfgFullPath="${COMPSS_HOME}Runtime/scripts/queues/supercomputers/${sc_cfg}"

     if [ ! -f "${scCfgFullPath}" ]; then
        # CFG file doesn't exist
        display_error "${ERROR_CFG_SC}"
     fi
  fi

  # Source SC CFG env
  # shellcheck source=../queues/supercomputers/default.cfg
  # shellcheck disable=SC1091
  source "${scCfgFullPath}"

  # Check queue configuration env
  local queueCfgFullPath="${COMPSS_HOME}Runtime/scripts/queues/queue_systems/${QUEUE_SYSTEM}.cfg"
  if [ ! -f "${queueCfgFullPath}" ]; then
    # CFG file doesn't exist
    display_error "${ERROR_CFG_Q}"
  fi

  # Source queue system CFG env
  # shellcheck source=../queues/queue_systems/slurm.cfg
  # shellcheck disable=SC1091
  source "${queueCfgFullPath}"

  #
  # Infrastructure checks
  #
  # Changed error by warning because in heterogeneous there can be a launch of just workers
  if [ -z "${master_node}" ]; then
    echo "${ERROR_MASTER_NODE}"
  fi

  if [ -z "${network}" ]; then
    network=${DEFAULT_NETWORK}
  elif [ "${network}" == "default" ]; then
    network=${DEFAULT_NETWORK}
  elif [ "${network}" != "ethernet" ] && [ "${network}" != "infiniband" ] && [ "${network}" != "data" ]; then
    display_error "${ERROR_NETWORK}"
  fi

  if [ -z "${storage_conf}" ]; then
    storage_conf=${DEFAULT_STORAGE_CONF}
  fi

  if [ -z "${taskExecution}" ]; then
    taskExecution=${DEFAULT_TASK_EXECUTION}
  fi

  if [ -z "${persistent_worker_c}" ]; then
    persistent_worker_c=${DEFAULT_PERSISTENT_WORKER_C}
  fi

  if [ -z "${python_interpreter}" ]; then
    python_interpreter=${DEFAULT_PYTHON_INTERPRETER}
    args_pass="--python_interpreter=${python_interpreter} ${args_pass}"
  fi
  python_version=$( ${python_interpreter} -c "import sys; print(sys.version_info[:][0])" )

  if [ -z "${python_propagate_virtual_environment}" ]; then
    python_propagate_virtual_environment=${DEFAULT_PYTHON_PROPAGATE_VIRTUAL_ENVIRONMENT}
  fi

  if [ -z "${custom_extrae_config_file_python}" ]; then
    custom_extrae_config_file_python=${DEFAULT_CUSTOM_EXTRAE_FILE_PYTHON}
  fi

  if [ -z "${python_mpi_worker}" ]; then
    python_mpi_worker=${DEFAULT_PYTHON_MPI_WORKER}
  fi

  if [ -z "${python_memory_profile}" ]; then
    python_memory_profile=${DEFAULT_PYTHON_MEMORY_PROFILE}
  fi

  if [ -z "${python_worker_cache}" ]; then
    python_worker_cache=${DEFAULT_PYTHON_WORKER_CACHE}
  fi

  if [ -z "${python_cache_profiler}" ]; then
    python_cache_profiler=${DEFAULT_PYTHON_CACHE_PROFILER}
  fi

  if [ -z "${automatic_scaling}" ]; then
    automatic_scaling=${DEFAULT_AUTOMATIC_SCALING}
  fi

  #
  # Node checks
  #
  if [ -z "${max_tasks_per_node}" ]; then
    max_tasks_per_node=${DEFAULT_MAX_TASKS_PER_NODE}
  fi

  if [ -z "${cpus_per_node}" ]; then
    cpus_per_node=${DEFAULT_CPUS_PER_NODE}
  fi

  if [ "${cpus_per_node}" -lt "${MINIMUM_CPUS_PER_NODE}" ]; then
    display_error "${ERROR_NUM_CPUS}"
  fi

  if [ -z "${cpus_per_task}" ]; then
    cpus_per_task=${DEFAULT_FORWARD_CPUS_PER_NODE}
  fi

  if [ -z "${gpus_per_node}" ]; then
    gpus_per_node=${DEFAULT_GPUS_PER_NODE}
  fi

  if [ -z "${fpgas_per_node}" ]; then
    fpgas_per_node=${DEFAULT_FPGAS_PER_NODE}
  fi

  if [ -z "${io_executors}" ]; then
    io_executors=${DEFAULT_IO_EXECUTORS}
  fi

  if [ "${io_executors}" -lt 0 ]; then
    display_error "${ERROR_NUM_IO_EXECUTORS}"
  fi

  if [ -z "${cpu_affinity}" ]; then
    cpu_affinity=${DEFAULT_CPU_AFFINITY}
  fi

  if [ -z "${gpu_affinity}" ]; then
    gpu_affinity=${DEFAULT_GPU_AFFINITY}
  fi

  if [ -z "${fpga_affinity}" ]; then
    fpga_affinity=${DEFAULT_FPGA_AFFINITY}
  fi

  if [ -z "${fpga_prog}" ]; then
    fpga_prog=${DEFAULT_FPGA_REPROGRAM}
  fi

  if [ -z "${node_memory}" ]; then
    node_memory=${DEFAULT_NODE_MEMORY}
  fi

  if [ -z "${node_storage_bandwidth}" ]; then
    node_storage_bandwidth=${DEFAULT_NODE_STORAGE_BANDWIDTH}
  elif [ "${node_storage_bandwidth}" -gt "${DEFAULT_NODE_STORAGE_BANDWIDTH}" ]; then
    display_error "${ERROR_NODE_STORAGE_BANDWIDTH_TOO_HIGH} ${node_storage_bandwidth} > ${DEFAULT_NODE_STORAGE_BANDWIDTH}"
  elif [ "${node_storage_bandwidth}" -le 0 ]; then
    display_error "${ERROR_NODE_STORAGE_BANDWIDTH_INVALID} ${node_storage_bandwidth} <= 0"
  fi

  if [ -z "${worker_in_master_cpus}" ]; then
    worker_in_master_cpus=${DEFAULT_WORKER_IN_MASTER_CPUS}
  fi

  if [ "${worker_in_master_cpus}" -gt "${cpus_per_node}" ]; then
    display_error "${ERROR_WORKER_IN_MASTER_CPUS}"
  fi

  # Check worker nodes and calculate number of workers
  if [ -z "${worker_nodes}" ]; then
    if [ "${worker_in_master_cpus}"  -lt "1" ]; then
      display_error "${ERROR_WORKER_NODES}"
    else
      worker_nodes=""
      total_num_workers=1
    fi
  else
    local words=( ${worker_nodes} )
    total_num_workers=${#words[@]}
    if [ "${worker_in_master_cpus}" -gt "0" ]; then
      total_num_workers=$((total_num_workers+1))
    fi
  fi

  if [ -z "${worker_in_master_memory}" ]; then
    worker_in_master_memory=${DEFAULT_WORKER_IN_MASTER_MEMORY}
  elif [ "${worker_in_master_memory}" != "disabled" ] && ! [[ "${worker_in_master_memory}" =~ ^[0-9]+$ ]]; then
    display_error "${ERROR_WORKER_IN_MASTER_MEMORY}"
  fi
  if [ "${worker_in_master_memory}" != "${DEFAULT_WORKER_IN_MASTER_MEMORY}" ] && [ "${node_memory}" != "${DEFAULT_NODE_MEMORY}" ]; then
    if [ "${worker_in_master_memory}" -gt "${node_memory}" ]; then
      display_error "${ERROR_WORKER_IN_MASTER_MEMORY_TOO_HIGH} ${worker_in_master_memory} < ${node_memory} "
    fi
  fi

  if [ "${worker_in_master_cpus}" -gt 0 ] && [ "${worker_in_master_memory}" -le 0 ]; then
    display_error "${ERROR_WORKER_IN_MASTER_MEMORY_NOT_SPECIFIED}"
  fi

  if [ -z "${master_port}" ]; then
    rand_num=$RANDOM
    offset=$((rand_num % DEFAULT_MASTER_PORT_RAND_RANGE))
    master_port=$((DEFAULT_MASTER_PORT_BASE + offset))
  fi

  while [ "$(netstat | grep -v CLOSED | grep -c ${master_port})" -gt 0 ] || [ ! -z "$(lsof -i :${master_port})" ]; do
    echo "Port ${master_port} is already in use or time_wait, incrementing port by 1"
    master_port=$((master_port+1))
  done

  #
  # Worker port range
  #
  if [ -z "${worker_port_range}" ]; then
    worker_max_port=${DEFAULT_WORKER_MAX_PORT}
    worker_min_port=${DEFAULT_WORKER_MIN_PORT}
  else
    worker_max_port=$(echo "${worker_port_range}" | tr "," "\\t" | awk '{ print $2 }')
    worker_min_port=$(echo "${worker_port_range}" | tr "," "\\t" | awk '{ print $1 }')
  fi

  #
  # Working Directory Checks
  #
  if [ -z "${master_working_dir}" ]; then
    if [ -n "${DEFAULT_MASTER_WORKING_DIR}" ]; then
      master_working_dir=${DEFAULT_MASTER_WORKING_DIR}
    fi
  elif [ "${master_working_dir}" != "${LOCAL_DISK_NAME}" ] && [ "${master_working_dir}" != "${SHARED_DISK_NAME}" ] && [[ ${master_working_dir} != /* ]]; then
    display_error "${ERROR_MASTER_WD}"
  fi

  if [ -z "${worker_working_dir}" ]; then
    worker_working_dir=${DEFAULT_WORKER_WORKING_DIR}
  elif [ "${worker_working_dir}" != "${LOCAL_DISK_NAME}" ] && [ "${worker_working_dir}" != "${SHARED_DISK_NAME}" ] && [[ ${worker_working_dir} != /* ]]; then
    display_error "${ERROR_WORKER_WD}"
  fi

  if [ -z "${scheduler}" ]; then
    if [ "${worker_working_dir}" != "$LOCAL_DISK_NAME" ]; then
        scheduler=${DEFAULT_SHARED_DISK_SCHEDULER}
    else
	scheduler=${DEFAULT_LOCAL_DISK_SCHEDULER}
    fi
    args_pass="--scheduler=${scheduler} ${args_pass}"
  fi

  #
  # JVM Checks
  #
  if [ -z "${jvm_master_opts}" ]; then
    jvm_master_opts=${DEFAULT_JVM_MASTER}
  fi
  jvm_master_opts=${jvm_master_opts//\"/}

  if [ -z "${jvm_workers_opts}" ]; then
    jvm_workers_opts=${DEFAULT_JVM_WORKERS}
  fi
  jvm_workers_opts=${jvm_workers_opts//\"/}

  if [ -z "${jvm_worker_in_master_opts}" ] && [ -n "${worker_in_master_cpus}" ]; then
    jvm_worker_in_master_opts=${DEFAULT_JVM_WORKER_IN_MASTER}
  fi
  jvm_worker_in_master_opts=${jvm_worker_in_master_opts//\"/}

  #
  # Runtime and Tools Checks
  #
  if [ -z "${log_level}" ]; then
    log_level=${DEFAULT_DEBUG}
  fi

  if [ -z "${comm}" ]; then
    comm=${DEFAULT_COMMUNICATION_ADAPTOR}
  fi

  if [ -z "${tracing}" ]; then
    tracing=${DEFAULT_TRACING}
  fi

  if [ -z "${tracing_task_dependencies}" ]; then
    tracing_task_dependencies=${DEFAULT_TRACING_TASK_DEP}
  fi

  if [ -z "${custom_extrae_file}" ]; then
    custom_extrae_file=${DEFAULT_CUSTOM_EXTRAE_FILE}
  fi

  if [ -z "${trace_label}" ]; then
    trace_label=${total_num_workers}workers_${cpus_per_node}cpus_${!ENV_VAR_JOB_ID}
  fi

  if [ -z "${jupyter_notebook}" ]; then
    jupyter_notebook=${DEFAULT_JUPYTER_NOTEBOOK}
    jupyter_notebook_path=${DEFAULT_JUPYTER_NOTEBOOK_PATH}
  fi

  if [ -z "${ipython}" ]; then
    ipython=${DEFAULT_IPYTHON}
  fi

  if [ -z "${run_command}" ]; then
    run_command=false
  fi

  #
  # Application Checks
  #
  # Lang
  if [ -z "$lang" ]; then
    # Try to infer language
    infer_language "$fullAppPath"
  elif [ "$lang" = "java" ]; then
    lang=java
  elif [ "$lang" = "c" ]; then
    lang=c
  elif [ "$lang" = "python" ]; then
    lang=python
  else
    display_error "${ERROR_LANGUAGE}"
  fi

  if [ -z "${env_script}" ]; then
    env_script="null"
  fi

  # Library path
  if [ -z "${library_path}" ]; then
    library_path=${DEFAULT_LIBRARY_PATH}
  fi

  # Classpath
  if [ -z "$cp" ]; then
    cp=${DEFAULT_CLASSPATH}
  else
    fcp=""
    for currcp in ${cp//:/$'\n'}; do
    if [ ! "${currcp:0:1}" == '/' ]; then              # Relative paths to abs
      if [ -d "$currcp" ] || [ -f "$currcp" ]; then  # If the dir/file exists
        absdir="$(cd "$(dirname "$currcp")" && pwd)" # Get absolute dir
        file="$(basename "$currcp")"
        currcp="$absdir/$file"
      else
        echo "[ WARNING ]: Classpath \"$currcp\" does not exist..."
      fi
    fi
    fcp="${fcp}:$currcp"
    done
    cp="$(echo "$fcp" | cut -c2-)"
  fi

  # Pythonpath
  if [ -z "$pythonpath" ]; then
    if [ -z "$fullAppPath" ]; then
      pythonpath=${DEFAULT_PYTHONPATH}
    else
      if [ -a "${fullAppPath%/*}/__init__.py" ]; then
        pythonpath=${DEFAULT_PYTHONPATH}
      else
        pythonpath=${fullAppPath%/*}
      fi
    fi
  fi


  # AppDir
  if [ -z "$appdir" ]; then
    appdir=${DEFAULT_APPDIR}
  fi

  # Streaming
  if [ -z "${streaming}" ]; then
    streaming=${DEFAULT_STREAMING}
  fi
  if [ "${streaming}" != "null" ] && [ "${streaming}" != "NONE" ]; then
    if [ -z "${streaming_master_port}" ]; then
      streaming_master_port=$((BASE_STREAMING_PORT + RANDOM % STREAMING_PORT_RAND_RANGE))
    fi
  else
    streaming_master_port="null"
  fi

  if [ -n "${keep_wd}" ]; then
    prepare_keep_workingdir
  fi

  if [ -n "${gen_coredump}" ]; then
    prepare_coredump_generation
  fi
}

prepare_coredump_generation() {
    if [ -z "${jvm_workers_opts}" ] || [ "${jvm_workers_opts}" = \"\" ];then
        jvm_workers_opts="-Dcompss.worker.gen_coredump=true"
    else
        jvm_workers_opts+=",-Dcompss.worker.gen_coredump=true"
    fi
    if [ -z "${jvm_worker_in_master_opts}" ] || [ "${jvm_worker_in_master_opts}" = \"\" ];then
        jvm_worker_in_master_opts="-Dcompss.worker.gen_coredump=true"
    else
        jvm_worker_in_master_opts+=",-Dcompss.worker.gen_coredump=true"
    fi
}

prepare_keep_workingdir() {
    if [ -z "${jvm_workers_opts}" ] || [ "${jvm_workers_opts}" = \"\" ];then
        jvm_workers_opts="-Dcompss.worker.removeWD=false"
    else
        jvm_workers_opts+=",-Dcompss.worker.removeWD=false"
    fi
    if [ -z "${jvm_worker_in_master_opts}" ] || [ "${jvm_worker_in_master_opts}" = \"\" ];then
        jvm_worker_in_master_opts="-Dcompss.worker.removeWD=false"
    else
        jvm_worker_in_master_opts+=",-Dcompss.worker.removeWD=false"
    fi
}

###############################################
# Sets job variables
###############################################
set_variables() {
  # Set script variables
  if [ -z "${container_image}" ]; then
    export COMPSS_HOME=${COMPSS_HOME}
    export GAT_LOCATION=${COMPSS_HOME}/Dependencies/JAVA_GAT
    worker_install_dir=${COMPSS_HOME}
  else
    if [ -z "${container_compss_path}" ]; then

      export SINGULARITYENV_COMPSS_HOME=/opt/COMPSs/
      export SINGULARITYENV_COMPSS_MASTER_WORKING_DIR=$COMPSS_MASTER_WORKING_DIR
      export SINGULARITYENV_GAT_LOCATION=/opt/COMPSs/Dependencies/JAVA_GAT

      export MASTER_CONTAINER_IMAGE=${container_image}
      export MPI_RUNNER_SCRIPT=/opt/COMPSs/Runtime/scripts/utils/mpi_run.sh
      export SINGULARITYENV_MASTER_CONTAINER_IMAGE=${container_image}
      export SINGULARITYENV_MPI_RUNNER_SCRIPT=/opt/COMPSs/Runtime/scripts/utils/mpi_run.sh
      # to load the modules when ssh'ing
      export SINGULARITYENV_ENV_SCRIPT=$ENV_LOAD_MODULES_SCRIPT
      export SINGULARITYENV_SLURM_JOBID=$SLURM_JOBID
      export SINGULARITYENV_SLURM_JOB_ID=$SLURM_JOB_ID


      worker_install_dir=/opt/COMPSs
      cont_COMPSS_HOME=/opt/COMPSs/
    else
      export SINGULARITYENV_COMPSS_HOME=${container_compss_path}/
      export SINGULARITYENV_COMPSS_MASTER_WORKING_DIR=$COMPSS_MASTER_WORKING_DIR
      export SINGULARITYENV_GAT_LOCATION=${container_compss_path}/Dependencies/JAVA_GAT

      export MASTER_CONTAINER_IMAGE=${container_image}
      export MPI_RUNNER_SCRIPT=${container_compss_path}/Runtime/scripts/utils/mpi_run.sh
      export SINGULARITYENV_MASTER_CONTAINER_IMAGE=${container_image}
      export SINGULARITYENV_MPI_RUNNER_SCRIPT=${container_compss_path}/Runtime/scripts/utils/mpi_run.sh
      # to load the modules when ssh'ing
      export SINGULARITYENV_ENV_SCRIPT=$ENV_LOAD_MODULES_SCRIPT
      export SINGULARITYENV_SLURM_JOBID=$SLURM_JOBID
      export SINGULARITYENV_SLURM_JOB_ID=$SLURM_JOB_ID

      worker_install_dir=${container_compss_path}/
      cont_COMPSS_HOME=${container_compss_path}/
    fi
  fi

  # Create .COMPSs log dir for application execution
  if [ -z "${log_dir}" ]; then
        log_dir=$HOME
  fi
  specific_log_dir=${log_dir}/.COMPSs/${!ENV_VAR_JOB_ID}/
  mkdir -p "${specific_log_dir}"

  # treat master_working_dir
  # SharedDisk variables
  if [ -n "${master_working_dir}" ]; then
    if [ "${master_working_dir}" == "${SHARED_DISK_NAME}" ]; then
      if [[ "$OSTYPE" == "darwin"* ]]; then
        master_working_dir=$(mktemp -d -t "${SHARED_DISK_PREFIX}${HOME}")
      else
        master_working_dir=$(mktemp -d -p "${SHARED_DISK_PREFIX}${HOME}")
      fi
    elif [ "${master_working_dir}" == "${LOCAL_DISK_NAME}" ]; then
      master_working_dir=${LOCAL_DISK_PREFIX}/${!ENV_VAR_JOB_ID}
      mkdir -p "${master_working_dir}"
    else
      mkdir -p "${master_working_dir}"
    fi
  fi


  # SharedDisk variables
  if [ "${worker_working_dir}" == "${SHARED_DISK_NAME}" ]; then
    if [[ "$OSTYPE" == "darwin"* ]]; then
      worker_working_dir=$(mktemp -d -t "${SHARED_DISK_PREFIX}${HOME}")
    else
      worker_working_dir=$(mktemp -d -p "${SHARED_DISK_PREFIX}${HOME}")
    fi
  elif [ "${worker_working_dir}" == "${LOCAL_DISK_NAME}" ]; then
    worker_working_dir=${LOCAL_DISK_PREFIX}/${!ENV_VAR_JOB_ID}
    mkdir -p "${worker_working_dir}"
  else
    # The working dir is a custom absolute path, create tmp
    if [[ "$OSTYPE" == "darwin"* ]]; then
      worker_working_dir=$(mktemp -d -t "${worker_working_dir}")
    else
      worker_working_dir=$(mktemp -d -p "${worker_working_dir}")
    fi
  fi

  # Network variables
  if [ "${network}" == "ethernet" ]; then
    network=""
  elif [ "${network}" == "infiniband" ]; then
    network=${NETWORK_INFINIBAND_SUFFIX}
  elif [ "${network}" == "data" ]; then
    network=${NETWORK_DATA_SUFFIX}
  fi

  # Memory variables
  if [ "${node_memory}" == "${DEFAULT_NODE_MEMORY}" ]; then
    # Default value
    node_memory=${DEFAULT_NODE_MEMORY_SIZE}
  else
    # Change from MB to GB
    node_memory=$(( node_memory / 1024 - 4))
  fi

  # Disk Storage variables
  if [ "${node_storage_bandwidth}" == "${DEFAULT_NODE_STORAGE_BANDWIDTH}" ]; then
    # Default value
    node_storage_bandwidth=${DEFAULT_NODE_STORAGE_BANDWIDTH}
  else
    node_storage_bandwidth=${node_storage_bandwidth}
  fi

  if [ "${worker_in_master_memory}" == "${DEFAULT_WORKER_IN_MASTER_MEMORY}" ]; then
    # Default value
    worker_in_master_memory=${DEFAULT_NODE_MEMORY_SIZE}
  else
    # Change from MB to GB
    worker_in_master_memory=${DEFAULT_NODE_MEMORY_SIZE}
  fi

  # Load tracing and debug only for NIO
  if [ "${comm/NIO}" != "${comm}" ]; then
    # Adapting tracing flag to worker tracing level
    if [ "${tracing}" == "${TRACING_ENABLED}" ]; then
        load_tracing_env
    fi

    # Adapt debug flag to worker script
    if [ "${log_level}" == "debug" ] || [ "${log_level}" == "trace" ]; then
      debug="true"
    else
      debug="false"
    fi
  fi

  if [ -z "${uuid}" ]; then
    # Generate a UUID for workers and runcompss
    uuid=$(cat /proc/sys/kernel/random/uuid)
  fi
}

###############################################
# Log execution variables
###############################################
log_variables() {
  echo "-------- Launch arguments --------"
  echo "Master:                    ${master_node}"
  echo "Workers:                   ${worker_nodes}"
  echo "Tasks per Node:            ${max_tasks_per_node}"
  echo "CPUs per Node:             ${cpus_per_node}"
  echo "GPUs per Node:             ${gpus_per_node}"
  echo "FPGAs per Node:            ${fpgas_per_node}"
  echo "IO Executors:              ${io_executors}"
  echo "CPU Affinity:              ${cpu_affinity}"
  echo "GPU Affinity:              ${gpu_affinity}"
  echo "FPGA Affinity:             ${fpga_affinity}"
  echo "FPGA reprogram command:    ${fpga_prog}"
  echo "Network:                   ${network}"
  echo "Worker in Master CPUs:     ${worker_in_master_cpus}"
  echo "Worker in Master Memory:   ${worker_in_master_memory}"
  echo "Master Port:               ${master_port}"
  echo "Master WD:                 ${master_working_dir}"
  echo "Worker WD:                 ${worker_working_dir}"
  echo "Master JVM Opts:           ${jvm_master_opts}"
  echo "Workers JVM Opts:          ${jvm_workers_opts}"
  echo "Worker in Master JVM Opts: ${jvm_worker_in_master_opts}"
  echo "Streaming Backend:         ${streaming}"
  echo "Streaming Master Port:     ${streaming_master_port}"
  echo "Library Path:              ${library_path}"
  echo "Classpath:                 ${cp}"
  echo "Pythonpath:                ${pythonpath}"
  echo "Appdir:                    ${appdir}"
  echo "Env. Script:               ${env_script}"
  echo "Lang:                      ${lang}"
  echo "Python Interpreter:        ${python_interpreter}"
  echo "Python version:            ${python_version}"
  echo "Python virtual env:        ${DEFAULT_PYTHON_VIRTUAL_ENVIRONMENT}"
  echo "Python propagate virt env. ${python_propagate_virtual_environment}"
  echo "Python extrae config file  ${custom_extrae_config_file_python}"
  echo "Python use MPI worker      ${python_mpi_worker}"
  echo "Python memory profile      ${python_memory_profile}"
  echo "Python worker cache        ${python_worker_cache}"
  echo "Python profiler cache      ${python_cache_profiler}"
  echo "COMM:                      ${comm}"
  echo "Prolog:                    ${prologActions[*]}"
  echo "Epilog:                    ${epilogActions[*]}"
  echo "Storage conf:              ${storage_conf}"
  echo "Task execution:            ${taskExecution}"
  echo "To COMPSs:                 ${args_pass}"
  echo "-----------------------------------"
  echo " "
}


###############################################
# Write usage to log file
###############################################
write_log_usage() {
  if [ -w ${COMPSS_HOME} ]; then
    # The folder is writable

    # Create file with header if required
    local log_usage_file="${COMPSS_HOME}/usage.log"
    if [ ! -f "${log_usage_file}" ]; then
      echo -e "USAGE DATE\\t\\t\\tUSER" > "${log_usage_file}"
    fi

    # Write usage message
    msg="$(date)\\t$(whoami)"
    echo -e "$msg" >> "${log_usage_file}"

    # Ensure file is public
    chmod 777 "${log_usage_file}"
  fi
}


###############################################
# Launches the application
###############################################
launch() {
  echo "------ Launching COMPSs application ------"

  # Launch workers separately if they are persistent
  if [ "${comm/NIO}" != "${comm}" ]; then
    # Start workers' processes
    local hostid=1
    if [ ! -z "${initial_hostid}" ]; then
      if [ "${log_level}" != "off" ]; then
         echo "[LAUNCH_COMPSS] Host id set to ${initial_hostid}"
      fi
      hostid=${initial_hostid}
    fi
    if [ ! -z "${master_node}" ]; then
      if [ "${worker_in_master_cpus}" -ne 0 ]; then
        # Worker in master node
        local jvm_worker_in_master_opts_str
        local jvm_worker_in_master_opts_size
        local fpga_reprogram_str
        local fpga_reprogram_size
        jvm_worker_in_master_opts_str=$(echo "${jvm_worker_in_master_opts}" | tr "," " ")
        jvm_worker_in_master_opts_size=$(echo "${jvm_worker_in_master_opts_str}" | wc -w)
        fpga_reprogram_str=$(echo "${fpga_prog}" | tr "," " ")
        fpga_reprogram_size=$(echo "${fpga_prog}" | wc -w)
        # worker_cmd id name master_name jvmOptsSize jvmOpts cpus gpus cpuMap gpuMap lot streaming_backend streaming_master_port
        worker_cmd "$hostid" "${master_node}${network}" "${master_node}${network}" "${jvm_worker_in_master_opts_size}" "${jvm_worker_in_master_opts_str}" "${fpga_reprogram_size}" "${fpga_reprogram_str}" "${worker_in_master_cpus}" "${gpus_per_node}" "${fpgas_per_node}" "${cpu_affinity}" "${gpu_affinity}" "${fpga_affinity}" "${io_executors}" "${max_tasks_per_node}" "${streaming}" "${streaming_master_port}"

        if [ ! -z "${NODE_NAME_QUEUE}" ]; then
          master_node=$(${NODE_NAME_QUEUE} "${master_node}")
        fi

        if [ -n "${cpus_per_task}" ] && [ "${cpus_per_task}" == "true" ]; then
            if [ -n "${worker_in_master_cpus}" ] && [ "${worker_in_master_cpus}" != "0" ]; then
                AUXILIAR_LAUNCH_PARAMS="${QARG_CPUS_PER_TASK} ${worker_in_master_cpus} "
            fi
        fi

        WCMD="${LAUNCH_CMD} ${AUXILIAR_LAUNCH_PARAMS}${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${master_node} ${PRE_CMD}${WCMD}${POST_CMD}"
        if [ "${log_level}" != "off" ]; then
	    echo "[LAUNCH_COMPSS] Launching worker in master with id $hostid and command: $WCMD"
        fi
        $WCMD&
        hostid=$((hostid+1))
      fi
    fi

    if [ ! -z "${worker_nodes}" ]; then
      local jvm_workers_opts_str
      local jvm_workers_opts_size
      jvm_workers_opts_str=$(echo "${jvm_workers_opts}" | tr "," " ")
      jvm_workers_opts_size=$(echo "${jvm_workers_opts_str}" | wc -w)
      fpga_reprogram_str=$(echo "${fpga_prog}" | tr "," " ")
      fpga_reprogram_size=$(echo "${fpga_prog}" | wc -w)
      for node in ${worker_nodes}; do
        # worker_cmd id name master_name jvmOptsSize jvmOpts cpus gpus cpuMap gpuMap lot streaming_backend streaming_master_port
        worker_cmd $hostid "$node${network}" "${master_node}${network}" "${jvm_workers_opts_size}" "${jvm_workers_opts_str}" "${fpga_reprogram_size}" "${fpga_reprogram_str}" "${cpus_per_node}" "${gpus_per_node}" "${fpgas_per_node}" "${cpu_affinity}" "${gpu_affinity}" "${fpga_affinity}" "${io_executors}" "${max_tasks_per_node}" "${streaming}" "${streaming_master_port}"

        if [ ! -z "${NODE_NAME_QUEUE}" ]; then
            node=$(${NODE_NAME_QUEUE} ${node})
        fi

        if [ -n "${cpus_per_task}" ] && [ "${cpus_per_task}" == "true" ]; then
            if [ -n "${cpus_per_node}" ] && [ "${cpus_per_node}" != "0" ]; then
                AUXILIAR_LAUNCH_PARAMS="${QARG_CPUS_PER_TASK} ${cpus_per_node} "
            fi
        fi

        WCMD="${LAUNCH_CMD} ${AUXILIAR_LAUNCH_PARAMS}${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${node} ${PRE_CMD}${WCMD}${POST_CMD}"
        if [ "${log_level}" != "off" ]; then
	    echo "[LAUNCH_COMPSS] Launching worker with id $hostid and command: $WCMD"
        fi
        $WCMD&
        hostid=$((hostid+1))
      done
    fi

    # Sleep a little before launching master
    local num_workers=$((hostid-1))
    local sleep_time
    if [ ${num_workers} -lt 100 ]; then
      sleep_time="10s"
    else
      if [ ${num_workers} -ge 100 ] && [ ${num_workers} -lt 800 ]; then
        sleep_time="$((num_workers/10))s"
      else
        sleep_time="80s"
      fi
    fi
    if [ "${log_level}" != "off" ]; then
        echo "[LAUNCH_COMPSS] Number of spawned workers: $num_workers"
        echo "[LAUNCH_COMPSS] Waiting ${sleep_time} ..."
    fi
    sleep ${sleep_time}
  fi

  # Launching the master
  if [ ! -z "${master_node}" ]; then
    # Launch master
    if [ "${jupyter_notebook}" = false ] && [ "${ipython}" = false ] && [ "${run_command}" = false ]; then
      master_cmd "${master_node}${network}"
      MCMD="${MCMD} ${args_pass}"
      if [ "${log_level}" != "off" ]; then
          echo "[LAUNCH_COMPSS] Executing Master with command: $MCMD"
      fi
      $MCMD
      ev=$?
      if [ $ev -ne 0 ]; then
        echo "Master execution failed. Exiting job."
        exit $ev
      fi
    else
      load_external_compss_environment
      if [ "${ipython}" = true ]; then
      	 ipython
      elif [ "${run_command}" = true ]; then
	 echo "Running command: ${command_to_run}"
         eval ${command_to_run}
      elif [ "${jupyter_notebook}" = true ]; then
         # Run jupyter notebook instead of the master within the jupyter_notebook_path
         # The master will be started when the user wants later from the notebook (the workers have already been launched)
         (cd "${jupyter_notebook_path}" && jupyter notebook --no-browser --ip=127.0.0.1 --port=8888)
      fi
    fi
    no_wait=false
  else
    echo "No master to run..."
    no_wait=true
  fi
}

load_external_compss_environment() {
      export PYTHONPATH=${jupyter_notebook_path}:${COMPSS_HOME}/Bindings/python/${python_version}/:${COMPSS_HOME}/Bindings/bindings-common/lib/:$PYTHONPATH
      # JAVA_HOME variable must exist
      export LD_LIBRARY_PATH=${COMPSS_HOME}/Bindings/bindings-common/lib/:${COMPSS_HOME}/Runtime/compss-engine.jar:$LD_LIBRARY_PATH
      export_jvm_library
      unset XDG_RUNTIME_DIR  # Required for Jupyter in MN4. Supposed to be necessary also in other supercomputers. Otherwise, move this line to the supercomputer "module"
      export COMPSS_LOG_LEVEL=${log_level}
      export COMPSS_TRACING=${tracing}
      if [ -v "$tracing" ] && [ $tracing != "false" ]; then
        load_tracing_env
      fi
      # Export project and resources xmls as environment variables to be used from PyCOMPSs binding
      # Also export the master name, master port, and worker nodes to be used from PyCOMPSs binding
      export COMPSS_PROJECT_XML="${PROJECT_FILE}"
      export COMPSS_RESOURCES_XML="${RESOURCES_FILE}"
      export COMPSS_MASTER_NODE="${master_node}"
      export COMPSS_MASTER_PORT="${master_port}"
      export COMPSS_WORKER_NODES="${worker_nodes}"
      export COMPSS_UUID="${uuid}"
      export COMPSS_LOG_DIR="${specific_log_dir}"
      if [ -z "${master_working_dir}" ]; then
	  export COMPSS_MASTER_WORKING_DIR="${specific_log_dir}/tmpFiles/"
      else
	  export COMPSS_MASTER_WORKING_DIR="${master_working_dir}/tmpFiles/"
      fi
      export COMPSS_STORAGE_CONF="${storage_conf}"
}

###############################################
# Prolog actions
###############################################
prolog() {
  # Execute user prolog actions
  if [ "${log_level}" != "off" ]; then
      echo "---- Executing Prolog actions ----"
  fi
  for action in "${prologActions[@]}"; do
    realAction=$(echo "${action}" | tr "," " ")
    for node in ${worker_nodes}; do
        if [ ! -z "${NODE_NAME_QUEUE}" ]; then
            node=$(${NODE_NAME_QUEUE} ${node})
        fi
        w_realAction="${LAUNCH_CMD} ${AUXILIAR_LAUNCH_PARAMS}${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${node} ${PRE_CMD}${realAction}${POST_CMD}"
        if [ "${log_level}" != "off" ]; then
            echo "- Prolog in $node: $realAction"
        fi
        ${w_realAction}
    done
    if [ "${log_level}" != "off" ]; then
       echo "- Prolog: $realAction"
    fi
    $realAction
    local actionExitValue=$?
    if [ $actionExitValue -ne 0 ]; then
      action_error "$ERROR_PROLOG_ACTION"
    fi
  done
}

###############################################
# Epilog actions
###############################################
epilog() {
  # Execute user epilog actions
  if [ "${log_level}" != "off" ]; then
      echo "---- Executing Epilog actions ----"
  fi
  for action in "${epilogActions[@]}"; do
    realAction=$(echo "${action}" | tr "," " ")
    for node in ${worker_nodes}; do
        if [ ! -z "${NODE_NAME_QUEUE}" ]; then
            node=$(${NODE_NAME_QUEUE} ${node})
        fi
        w_realAction="${LAUNCH_CMD} ${AUXILIAR_LAUNCH_PARAMS}${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${node} ${PRE_CMD}${realAction}${POST_CMD}"
        if [ "${log_level}" != "off" ]; then
            echo "- Epilog in $node: $w_realAction"
        fi
        ${w_realAction}
    done
    if [ "${log_level}" != "off" ]; then
       echo "- Epilog: $realAction"
    fi
    $realAction
    local actionExitValue=$?
    if [ $actionExitValue -ne 0 ]; then
      action_error "$ERROR_EPILOG_ACTION"
    fi
  done
}

###############################################
# Wait for execution end
###############################################
wait_for_completion() {
  # Wait for Master and Workers to finish
  echo "[LAUNCH_COMPSS] Waiting for application completion"
  wait
}

###############################################
# Clean function for trap
###############################################
cleanup() {
  # Avoid clean up if removeWD=flase worker flag is set
  if [[ -n "$keep_wd" ]]; then
    echo "[WARN] Flag removeWD set to false. Not cleaning Workers WD"
    echo "[WARN] Flag removeWD set to false. Not cleaning XML files"
  else
    # Cleanup
    if [ "${log_level}" != "off" ]; then
       echo "[LAUNCH_COMPSS] Cleanup Worker TMP files"
    fi
    for node in ${worker_nodes}; do
      local sandboxWD=${worker_working_dir}/
      if [ ! -z "${NODE_NAME_QUEUE}" ]; then
        node=$(${NODE_NAME_QUEUE} ${node})
      fi
      if [ "${log_level}" != "off" ]; then
          echo "[LAUNCH_COMPSS] Removing ${sandboxWD}"
      fi
      # shellcheck disable=SC2086
      ${LAUNCH_CMD} ${LAUNCH_EXTRA_FLAGS}${LAUNCH_PARAMS}${LAUNCH_SEPARATOR}${node} ${CMD_SEPARATOR}rm -rf ${sandboxWD}${CMD_SEPARATOR}
    done
    if [ "${log_level}" != "off" ]; then
        echo "[LAUNCH_COMPSS] Cleanup TMP files"
    fi
    if [ -z "${xmls_phase}" ] || [ "${xmls_phase}" = "all" ] || [ "${xmls_phase}" = "fini" ]; then
    	rm -f "${PROJECT_FILE}"
    	rm -f "${RESOURCES_FILE}"
    fi
  fi
}


#---------------------------------------------------------------------------------------
# MAIN EXECUTION
#---------------------------------------------------------------------------------------

  numPrologActions=0
  declare -a prologActions
  numEpilogActions=0
  declare -a epilogActions

  # Get command args
  get_args "$@"

  # Check other command args
  check_args

  # Set job variables
  set_variables

  # Log variables
  if [ "${log_level}" == "debug" ] || [ "${log_level}" == "trace" ]; then
     log_variables
  fi
  # Write log usage
  write_log_usage

  # Add clean up for execution end
  trap cleanup EXIT

  # Create XML files
  CREATE_WORKING_DIRS=true  # used by xml_project_add_worker_in_master and xml_project_add_workers
  if [ -z "${xmls_phase}" ] || [ "${xmls_phase}" == "all" ]; then
    create_xml_files
  elif [ "${xmls_phase}" == "init" ]; then
    init_het_xml_files "${xmls_suffix}"
  elif [ "${xmls_phase}" == "add" ]; then
    add_het_xml_files "${xmls_suffix}"
  elif [ "${xmls_phase}" == "fini" ]; then
    fini_het_xml_files "${xmls_suffix}"
  fi

  # Prolog
  prolog

  # Launch execution
  launch

  # Wait
  if [ "${no_wait}" = false ]; then
     wait_for_completion
  fi

  # Epilog
  epilog
