Hi everyone,
I'm trying to port cesm to a new machine (linux server with no batch system). First, I installed the cesm2.1.5 following the steps described here (Downloading CESM2 (CESM2.1) — CESM CESM2.1 documentation), then created a ~/.cime/config_machines.xml specifically for this machine by referring to the description here (6. Porting and validating CIME on a new platform — CIME master documentation). The configuration template is /data/warmcold/my_cesm/cime/config/xml_schemas/config_machines_template.xml.
The part of the configuration confusing to me is:
The perl (v5.32.1) and python (3.9.18) that I'm using are in my conda environment. I don't know how to specify these in the configuration file ~/.cime/config_machines.xml. Besides, for csh and sh, I'm not sure if I included them correctly. When I run the scripts_regression_tests.py to test my setup, it shows many failures (The tests output is attached below).
It would be great if someone can give me some help on these problems. Thank you in advance!
Best,
Nuanliang
~/.cime/config_machines.xml is shown here below:
I'm trying to port cesm to a new machine (linux server with no batch system). First, I installed the cesm2.1.5 following the steps described here (Downloading CESM2 (CESM2.1) — CESM CESM2.1 documentation), then created a ~/.cime/config_machines.xml specifically for this machine by referring to the description here (6. Porting and validating CIME on a new platform — CIME master documentation). The configuration template is /data/warmcold/my_cesm/cime/config/xml_schemas/config_machines_template.xml.
The part of the configuration confusing to me is:
<module_system type="module">
<init_path lang="perl">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/init/perl</init_path>
<init_path lang="python">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/init/env_modules_python.py</init_path>
<init_path lang="csh">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/init/csh</init_path>
<init_path lang="sh">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/init/sh</init_path>
<cmd_path lang="perl">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/libexec/lmod perl</cmd_path>
<cmd_path lang="python">/glade/u/apps/ch/opt/lmod/7.2.1/lmod/lmod/libexec/lmod python</cmd_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>
......
The perl (v5.32.1) and python (3.9.18) that I'm using are in my conda environment. I don't know how to specify these in the configuration file ~/.cime/config_machines.xml. Besides, for csh and sh, I'm not sure if I included them correctly. When I run the scripts_regression_tests.py to test my setup, it shows many failures (The tests output is attached below).
It would be great if someone can give me some help on these problems. Thank you in advance!
Best,
Nuanliang
~/.cime/config_machines.xml is shown here below:
<?xml version="1.0"?>
<config_machines version="2.0">
<machine MACH="whirls">
<DESC>group server with 104 pes in 2 nodes, no batch system</DESC>
<NODENAME_REGEX>whirls.uchicago.edu</NODENAME_REGEX>
<OS>LINUX</OS>
<COMPILERS>intel</COMPILERS>
<MPILIBS>intelmpi</MPILIBS>
<PROJECT>nobatch</PROJECT>
<SAVE_TIMING_DIR> </SAVE_TIMING_DIR>
<SAVE_TIMING_DIR_PROJECTS> </SAVE_TIMING_DIR_PROJECTS>
<CIME_OUTPUT_ROOT>/data/warmcold/project2/dry_held_suarez</CIME_OUTPUT_ROOT>
<DIN_LOC_ROOT>/data/warmcold/project2/inputdata</DIN_LOC_ROOT>
<DIN_LOC_ROOT_CLMFORC>/data/warmcold/project2/clmforcing</DIN_LOC_ROOT_CLMFORC>
<DOUT_S_ROOT>/data/warmcold/project2/archive</DOUT_S_ROOT>
<BASELINE_ROOT>/data/warmcold/project2/cesm_baselines</BASELINE_ROOT>
<CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne</CCSM_CPRNC>
<GMAKE></GMAKE>
<GMAKE_J>8</GMAKE_J>
<BATCH_SYSTEM>none</BATCH_SYSTEM>
<SUPPORTED_BY>nuanliang: warmcold@uchicago.edu</SUPPORTED_BY>
<MAX_TASKS_PER_NODE>40</MAX_TASKS_PER_NODE>
<MAX_MPITASKS_PER_NODE>40</MAX_MPITASKS_PER_NODE>
<PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
<mpirun mpilib="intelmpi">
<executable>mpirun</executable>
<arguments>
<arg name="num_tasks">-n {{ total_tasks }}</arg>
</arguments>
</mpirun>
<module_system type="module">
<init_path lang="sh">/usr/share/Modules/init/sh</init_path>
<init_path lang="csh">/usr/share/Modules/init/csh</init_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>
<modules compiler='intel'>
<command name="purge"/>
<command name="load">intel/2020</command>
<command name="load">hdf5/1.12.2</command>
<command name="load">mkl/2020</command>
<command name="load">intelmpi/2020</command>
<command name="load">netcdf/4.9.0+intel-2020</command>
</modules>
</module_system>
<!-- environment variables, a blank entry will unset a variable -->
<environment_variables>
<env name="OMP_STACKSIZE">64M</env>
<env name="MPI_TYPE_DEPTH">16</env>
</environment_variables>
<!-- resource settings as defined in https://docs.python.org/2/library/resource.html -->
<resource_limits>
<resource name="RLIMIT_STACK">-1</resource>
</resource_limits>
</machine>