# Configuration file
# These values could be overridden by ~/.astkrc/prefs

#-------------------------------------------------------------------------------
# [REMOTE SERVERS]
# Code_Aster web server (for updates)
http_server_ip : www.code-aster.org

# User on Code_Aster web server (for updates)
http_server_user : anonymous

# Repository of update files on the http server
http_rep_maj : /FICHIERS

# EDF development server (only for intranet usage
devel_server_ip : aster4.cla.edfgdf.fr

# User on EDF development server should be set in ~/.astkrc/config
devel_server_user :

# Local repository where update files can be downloaded before
local_rep_maj : /tmp


#-------------------------------------------------------------------------------
# [NETWORK CONFIGURATION]
# The protocol the clients must used to connect this server.
protocol_exec : asrun.plugins.server.SSHServer

# The protocol the clients must used to copy data files onto this server.
protocol_copyto : asrun.plugins.server.SCPServer

# The protocol the clients must used to download results files from this server.
protocol_copyfrom : asrun.plugins.server.SCPServer

# Directory shared by all compute nodes and used as a proxy between then
# clients and the server : clients upload data files into this directory and
# download results files from it. For example : /export/tmp/PROXY_DIR...
proxy_dir : /tmp


#-------------------------------------------------------------------------------
# [PLUGINS CONFIGURATION]
# Default values are defined in services.py
# Schema used by the client to refresh jobs status
schema_actu :

# Schema used by the client to edit job outputs
schema_edit :

# Schema used by the client to retreive the jobs results
schema_get_results :

# Schema used by the client to get server informations
schema_info :

# Schema used by the client to purge flasheur
schema_purge_flash :

# Schema used by the client to send a mail
schema_sendmail :

# Schema used by the client to call the server to start a new job
schema_serv :

# Schema used by the client to interrupt and delete a job
schema_stop_del :

# Schema used by the client to get tail of job output
schema_tail :

# Schema used by the client to build a export file
schema_get_export :

# Schema used by the client to create a new issue
schema_create_issue :

# Schema used by the client to insert a study in the database
schema_insert_in_db :

# Schema used by the server to modify the profile before running the execution
schema_profile_modifier :

# Schema used by the server to adjust parameters for execution
schema_calcul :

# Schema used by the server to adjust the Code_Aster command line
schema_execute :


#-------------------------------------------------------------------------------
# ----- local configuration
# message of the day
#motd : $ASTER_ETC/codeaster/motd

# root of temporary space for astk services
rep_tmp : /tmp

# "Aster" platform
# (one of SOLARIS, SOLARIS64, TRU64, IRIX, P_LINUX)
plate-forme : ?IFDEF?

# Temporary directory for Aster executions
rep_trav : /tmp

# Temporary directory for Aster executions shared by all processors
# (used by mpi executions). For example : /export/tmp, /home/tmp...
shared_tmp : /tmp

# shared folders = for which remote copy is never necessary (comma separated)
shared_folders :

# choose True to create symbolic links instead of copying executable and catalogs
symlink : True

# text editor
editor : ?EDITOR?

# terminal for interactive calculation following output
#    @E will be remplaced by the commande line
terminal : ?TERMINAL?

#-------------------------------------------------------------------------------
# nodes of the cluster for interactive calculation or
# to call batch commands
noeud : ?NODE?

# add frontal machine (which is astk server in GUI) as a compute node
serv_as_node : True

# ...and just keep it (ignore 'noeud' list).
# This has no effect if serv_as_node is False.
only_serv_as_node : False

#-------------------------------------------------------------------------------
# ----- batch/interactive configuration
# Only LSF and Sun Grid Engine are supported now
# PBS support is always experimental

# batch : oui/non Choose 'oui' if a batch scheduler is available
# batch_memmax : memory limit in batch mode (MB)
# batch_tpsmax : cpu time limit in batch mode (hh:mm:ss)
# batch_nbpmax : max proc number in batch mode (OpenMP)
# batch_mpi_nbpmax : max proc number in batch mode (MPI))
batch : non
# automatically estimate from localhost resource if not defined
#batch_memmax : 12000
#batch_tpsmax : 9999:00:00
#batch_nbpmax : 16
batch_mpi_nbpmax : 32

# groups of batch queues and queues in each group (batch_queue_'group name')
batch_queue_group : urgent dvp
batch_queue_urgent : urgent
batch_queue_dvp : dev128M_3m dev256M_2h dev512M_2h dev2G_5h

# interactif : oui/non Choose 'non' to prohibit interactive calculation
# memory limit in interactive mode (MB)
# cpu time limit in interactive mode (hh:mm:ss)
# max proc number in interactive mode (OpenMP)
# max proc number in interactive mode (MPI)
interactif : oui
# automatically estimate from localhost resource if not defined
#interactif_memmax : 2048
#interactif_tpsmax : 9999:00:00
#interactif_nbpmax : 16
interactif_mpi_nbpmax : 32

#-------------------------------------------------------------------------------
# ----- batch commands
# bach_nom : one of LSF, SunGE, PBS
# batch_ini : initialisation (shell script, sh/ksh syntax)
# batch_sub : command to submit a job
# batch_jid : variable to get the job number
# batch_job : to display information about jobs
# batch_kil : to delete jobs

# --- batch configuration (see examples below)
batch_nom : SunGE
batch_ini : /opt/SunGE6.0/default/common/settings.sh
batch_sub : /opt/SunGE6.0/bin/lx24-x86/qsub
batch_jid : JOB_ID
batch_job : /opt/SunGE6.0/bin/lx24-x86/qstat
batch_kil : /opt/SunGE6.0/bin/lx24-x86/qdel

# --- Sun Grid Engine example
#batch_nom : SunGE
#batch_ini : /opt/SunGE6.0/default/common/settings.sh
#batch_sub : /opt/SunGE6.0/bin/lx24-x86/qsub
#batch_jid : JOB_ID
#batch_job : /opt/SunGE6.0/bin/lx24-x86/qstat
#batch_kil : /opt/SunGE6.0/bin/lx24-x86/qdel

# --- LSF example
#batch_nom : LSF
#batch_ini : /usr/share/lsf/conf/profile.lsf
#batch_sub : /usr/share/lsf/6.2/linux2.6-glibc2.3-ia64/bin/bsub
#batch_jid : LSB_JOBID
#batch_job : /usr/share/lsf/6.2/linux2.6-glibc2.3-ia64/bin/bjobs
#batch_kil : /usr/share/lsf/6.2/linux2.6-glibc2.3-ia64/bin/bkill

# --- PBS example
#batch_nom : PBS
#batch_ini :
#batch_sub : /usr/bin/qsub
#batch_jid : PBS_JOBID
#batch_job : /usr/bin/qstat
#batch_kil : /usr/bin/qdel

#-------------------------------------------------------------------------------
# ----- MPI commands and parameters
# mpirun
#    available arguments are : mpi_hostfile, mpi_nbnoeud, mpi_nbcpu
#    (use Python string formatting style)
mpirun_cmd : ?MPIRUN? -np %(mpi_nbcpu)s --hostfile %(mpi_hostfile)s %(program)s

# file which contains list of hosts (REQUIRED even if it is not used in mpirun_cmd)
mpi_hostfile : $ASTER_ETC/codeaster/mpi_hostfile

# command called to initialize MPI environment (for example for mpich2)
#mpi_ini : mpdboot --totalnum=%(mpi_nbnoeud)s --file=%(mpi_hostfile)s ; sleep 10

# command called to close the MPI session (for example for mpich2)
#mpi_end : mpdallexit

# shell command to get processor id
# LAM/MPI : echo $LAMRANK
# OpenMPI (1.2) : echo $OMPI_MCA_ns_nds_vpid
# OpenMPI (1.3) : echo $OMPI_MCA_orte_ess_vpid
# OpenMPI (1.34 : echo $OMPI_COMM_WORLD_RANK
# Mpich2  : echo $PMI_RANK
mpi_get_procid_cmd : echo $PMI_RANK

# should heavily decrease the elapsed time copying the environment
# for a lot of processors (but not yet very qualified, may fail)
use_parallel_cp : no

#-------------------------------------------------------------------------------
# ------ default parameters for distributed calculations (ex. astout)
# uncomment these lines
#batch_distrib_hostfile : $ASTER_ETC/codeaster/batch_distrib_hostfile
#interactif_distrib_hostfile : $ASTER_ETC/codeaster/interactif_distrib_hostfile

#-------------------------------------------------------------------------------
# command lines for 'exectool' (see Options menu in astk)
# uncomment these lines
#memcheck : valgrind --tool=memcheck --leak-check=full --error-limit=no --track-origins=yes --suppressions=$HOME/python.supp

#-------------------------------------------------------------------------------
# ps commands : uncomment lines corresponding to your platform
#ps_cpu : /bin/ps -e --width=512 -ocputime -ocommand
#ps_pid : /bin/ps -e --width=512 -opid -ocommand
ps_cpu : ?PS_COMMAND_CPU?
ps_pid : ?PS_COMMAND_PID?

#-------------------------------------------------------------------------------
# debug command :
#  @E will be replaced by the name of the executable
#  @C will be replaced by the name of the corefile
#  @D will be replaced by the filename which contains "where+quit"
#  @d will be replaced by the string 'where ; quit'
#  @a will be replaced by the arguments of Code_Aster command line
#
#cmd_post : gdb -batch --command=@D @E @C
#cmd_dbg : gdb --command=@D @E
#cmd_dbg : ddd --debugger gdb --command=@D @E @C
#cmd_dbg : idb -gui -gdb -command @D -exec @E
#cmd_dbg : nemiver @E @a
cmd_post : ?DEBUGGER_COMMAND_POST?
cmd_dbg : ?DEBUGGER_COMMAND?

#-------------------------------------------------------------------------------
# [OPTIONNAL]
# style of ctags used (optionnal, leave commented to ignore ctags generation)
# Valid values : 'exuberant' or 'emacs'
#?CTAGS_STYLE?
#ctags_style : exuberant

# Allow to print compiler output (to see all warnings for example)
#print_compiler_output : yes
print_compiler_output : no

# statistics of usage (this file must be rw for all users)
#log_usage_version : $ASTER_ROOT/log/usage_version.log

