Hi everyone,
I am testing an experiment in CESM2. Below shows the steps:
*************************************************
cd cime/scripts
./create_newcase --case mycase --compset X --res f19_g16
cd mycase
./case.setup
./case.build
*************************************************
However, I get errors (shown below, just showing part of errors since they all saying "undefined reference to `netcdf_mp_nf90**' ") when I executed "./case.build":
*************************************************
/mnt/scratch/nfs_fs02/yangx2/mycase/bld/intel/openmpi/nodebug/nothreads/pio/pio1/pio//home/yangx2/my_cesm_sandbox/cime/src/externals/pio1/pio/pionfwrite_mod.F90.in:185: undefined reference to `netcdf_mp_nf90_put_var_1d_eightbytereal_'
/mnt/scratch/nfs_fs02/yangx2/mycase/bld/intel/openmpi/nodebug/nothreads/pio/pio1/pio//home/yangx2/my_cesm_sandbox/cime/src/externals/pio1/pio/pionfwrite_mod.F90.in:220: undefined reference to `netcdf_mp_nf90_put_var_1d_eightbytereal_'
make: *** [/mnt/scratch/nfs_fs02/yangx2/mycase/bld/cesm.exe] Error 1
*************************************************
I thought it would be related to openmpi so my config_mechines.xml will be shown below:
*************************************************
<?xml version="1.0"?>
<!-- This is an ordered list, not all fields are required, optional fields are noted below. -->
<config_machines version="2.0">
<!-- MACH is the name that you will use in machine options -->
<machine MACH="yangx2">
<!-- DESC: a text description of the machine, this field is current not used in code-->
<DESC>University of Hawaii HPC</DESC>
<!-- NODENAME_REGEX: a regular expression used to identify this machine
it must work on compute nodes as well as login nodes, use machine option
to create_test or create_newcase if this flag is not available -->
<NODENAME_REGEX>node</NODENAME_REGEX>
<!-- OS: the operating system of this machine. Passed to cppflags for
compiled programs as -DVALUE recognized are LINUX, AIX, Darwin, CNL -->
<OS>LINUX</OS>
<!-- PROXY: optional http proxy for access to the internet-->
<PROXY> https://howto.get.out </PROXY>
<!-- COMPILERS: compilers supported on this machine, comma seperated list, first is default -->
<COMPILERS>intel,gnu</COMPILERS>
<!-- MPILIBS: mpilibs supported on this machine, comma seperated list,
first is default, mpi-serial is assumed and not required in this list-->
<!--<MPILIBS>openmpi,impi</MPILIBS> -->
<MPILIBS compiler="intel" >openmpi</MPILIBS>
<!-- PROJECT: A project or account number used for batch jobs
This value is used for directory names. If different from
actual accounting project id, use CHARGE_ACCOUNT
can be overridden in environment or $HOME/.cime/config -->
<PROJECT>none</PROJECT>
<!-- CHARGE_ACCOUNT: A project or account number used for batch jobs
This is the actual project used for cost accounting set in
the batch script (ex. #PBS -A charge_account). Will default
to PROJECT if not set.
can be overridden in environment or $HOME/.cime/config -->
<!--<CHARGE_ACCOUNT></CHARGE_ACCOUNT> yxy -->
<!-- SAVE_TIMING_DIR: (Acme only) directory for archiving timing output -->
<SAVE_TIMING_DIR> </SAVE_TIMING_DIR>
<!-- SAVE_TIMING_DIR_PROJECTS: (Acme only) projects whose jobs archive timing output -->
<SAVE_TIMING_DIR_PROJECTS> </SAVE_TIMING_DIR_PROJECTS>
<!-- CIME_OUTPUT_ROOT: Base directory for case output,
the case/bld and case/run directories are written below here -->
<CIME_OUTPUT_ROOT>/mnt/scratch/nfs_fs02/$USER</CIME_OUTPUT_ROOT>
<!-- DIN_LOC_ROOT: location of the inputdata data directory
inputdata is downloaded automatically on a case by case basis as
long as the user has write access to this directory. We recommend that
all cime model users on a system share an inputdata directory
as it can be quite large -->
<!--<DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT> -->
<DIN_LOC_ROOT>/home/yangx2/my_cesm_sandbox/inputdata</DIN_LOC_ROOT>
<!-- DIN_LOC_ROOT_CLMFORC: override of DIN_LOC_ROOT specific to CLM
forcing data -->
<DIN_LOC_ROOT_CLMFORC>$ENV{CESMDATAROOT}/lmwg</DIN_LOC_ROOT_CLMFORC>
<!-- DOUT_S_ROOT: root directory of short term archive files, short term
archiving moves model output data out of the run directory, but
keeps it on disk-->
<DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
<!-- BASELINE_ROOT: Root directory for system test baseline files -->
<BASELINE_ROOT>$ENV{CESMDATAROOT}/cesm_baselines</BASELINE_ROOT>
<!-- CCSM_CPRNC: location of the cprnc tool, compares model output in testing-->
<CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
<!-- GMAKE: gnu compatible make tool, default is 'gmake' -->
<GMAKE>make</GMAKE>
<!-- GMAKE_J: optional number of threads to pass to the gmake flag -->
<GMAKE_J>8</GMAKE_J>
<!-- BATCH_SYSTEM: batch system used on this machine,
supported values are: none, cobalt, lsf, pbs, slurm -->
<BATCH_SYSTEM>slurm</BATCH_SYSTEM>
<!-- SUPPORTED_BY: contact information for support for this system
this field is not used in code -->
<SUPPORTED_BY>UHM</SUPPORTED_BY>
<!-- MAX_TASKS_PER_NODE: maximum number of threads*tasks per
shared memory node on this machine,
should always be >= MAX_MPITASKS_PER_NODE -->
<MAX_TASKS_PER_NODE>20</MAX_TASKS_PER_NODE>
<!-- MAX_MPITASKS_PER_NODE: number of physical PES per shared node on
this machine, in practice the MPI tasks per node will not exceed this value -->
<MAX_MPITASKS_PER_NODE>20</MAX_MPITASKS_PER_NODE>
<!-- PROJECT_REQUIRED: Does this machine require a project to be specified to
the batch system? See PROJECT above -->
<PROJECT_REQUIRED>False</PROJECT_REQUIRED>
<!-- mpirun: The mpi exec to start a job on this machine, supported values
are values listed in MPILIBS above, default and mpi-serial -->
<mpirun mpilib="openmpi">
<executable>mpirun</executable>
<arguments>
<arg name="num_tasks">-np {{ total_tasks }}</arg>
<!-- <arg name="num_tasks">-np 40 </arg> -->
</arguments>
</mpirun>
<!-- module system: allowed module_system type values are:
module http://www.tacc.utexas.edu/tacc-projects/mclay/lmod
soft softenv-intro - An introduction to the SoftEnv system and how to use it
none
-->
<module_system type="module">
<init_path lang="perl">/opt/ohpc/admin/lmod/8.1.18/init/perl</init_path>
<init_path lang="python">/opt/ohpc/admin/lmod/8.1.18/init/env_modules_python.py</init_path>
<init_path lang="sh">/opt/ohpc/admin/lmod/8.1.18/init/sh</init_path>
<init_path lang="csh">/opt/ohpc/admin/lmod/8.1.18/init/csh</init_path>
<cmd_path lang="perl">/opt/ohpc/admin/lmod/8.1.18/libexec/lmod perl</cmd_path>
<cmd_path lang="python">/opt/ohpc/admin/lmod/8.1.18/libexec/lmod python</cmd_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>
<modules>
<command name="purge"/>
</modules>
<modules compiler="intel">
<command name="load">toolchain/intel/2018.5.274</command>
<command name="load">devel/CMake/3.12.1-intel-2018.5.274</command>
<command name="load">data/netCDF/4.6.2-intel-2018.5.274</command>
<command name="load">data/netCDF-Fortran/4.4.5-intel-2018.5.274</command>
<!--<command name="load">devel/CMake/3.15.3</command>-->
<command name="load">geo/ESMF/7.1.0r-intel-2018.5.274</command>
<command name="load">lib/libfabric/1.7.1</command>
<command name="load">bio/BioPerl</command>
<command name="load">compiler/ifort/2018.5.274-GCC-6.3.0-2.26</command>
<command name="load">data/PnetCDF/1.9.0-intel-2018.5.274</command>
</modules>
<modules compiler="gnu">
<command name="load">toolchain/intel/2018.5.274</command>
<command name="load">compiler/ifort/2018.5.274-GCC-6.3.0-2.26</command>
<command name="load">data/netCDF/4.6.2-intel-2018.5.274</command>
<command name="load">data/netCDF-Fortran/4.4.5-intel-2018.5.274</command>
<command name="load">devel/CMake/3.15.3</command>
<command name="load">lib/libfabric/1.7.1</command>
<command name="load">lang/Perl/5.28.1-GCCcore-6.3.0</command>
<command name="load">lib/libxml2/2.9.8-GCCcore-6.3.0</command>
<command name="load">bio/BioPerl/1.7.2-foss-2018b-Perl-5.28.0</command>
<command name="load">data/PnetCDF/1.9.0-intel-2018.5.274</command>
</modules>
<modules mpilib="openmpi">
<command name="load">mpi/OpenMPI/3.1.1-iccifort-2018.5.274-GCC-6.3.0-2.26</command>
</modules>
</module_system>
<environment_variables>
<env name="OMP_STACKSIZE">256M</env>
<env name="I_MPI_FABRICS">shm:ofi</env>
<env name="FI_PROVIDER">psm</env>
<env name="FI_PSM_TAGGED_RMA">0</env>
<env name="FI_PSM_AM_MSG">1</env>
</environment_variables>
<resource_limits>
<resource name="RLIMIT_STACK">-1</resource>
</resource_limits>
</machine>
</config_machines>
*************************************************
Any hints for solving this problem would be helpful! Thanks in advance!
Best,
Xinyi
I am testing an experiment in CESM2. Below shows the steps:
*************************************************
cd cime/scripts
./create_newcase --case mycase --compset X --res f19_g16
cd mycase
./case.setup
./case.build
*************************************************
However, I get errors (shown below, just showing part of errors since they all saying "undefined reference to `netcdf_mp_nf90**' ") when I executed "./case.build":
*************************************************
/mnt/scratch/nfs_fs02/yangx2/mycase/bld/intel/openmpi/nodebug/nothreads/pio/pio1/pio//home/yangx2/my_cesm_sandbox/cime/src/externals/pio1/pio/pionfwrite_mod.F90.in:185: undefined reference to `netcdf_mp_nf90_put_var_1d_eightbytereal_'
/mnt/scratch/nfs_fs02/yangx2/mycase/bld/intel/openmpi/nodebug/nothreads/pio/pio1/pio//home/yangx2/my_cesm_sandbox/cime/src/externals/pio1/pio/pionfwrite_mod.F90.in:220: undefined reference to `netcdf_mp_nf90_put_var_1d_eightbytereal_'
make: *** [/mnt/scratch/nfs_fs02/yangx2/mycase/bld/cesm.exe] Error 1
*************************************************
I thought it would be related to openmpi so my config_mechines.xml will be shown below:
*************************************************
<?xml version="1.0"?>
<!-- This is an ordered list, not all fields are required, optional fields are noted below. -->
<config_machines version="2.0">
<!-- MACH is the name that you will use in machine options -->
<machine MACH="yangx2">
<!-- DESC: a text description of the machine, this field is current not used in code-->
<DESC>University of Hawaii HPC</DESC>
<!-- NODENAME_REGEX: a regular expression used to identify this machine
it must work on compute nodes as well as login nodes, use machine option
to create_test or create_newcase if this flag is not available -->
<NODENAME_REGEX>node</NODENAME_REGEX>
<!-- OS: the operating system of this machine. Passed to cppflags for
compiled programs as -DVALUE recognized are LINUX, AIX, Darwin, CNL -->
<OS>LINUX</OS>
<!-- PROXY: optional http proxy for access to the internet-->
<PROXY> https://howto.get.out </PROXY>
<!-- COMPILERS: compilers supported on this machine, comma seperated list, first is default -->
<COMPILERS>intel,gnu</COMPILERS>
<!-- MPILIBS: mpilibs supported on this machine, comma seperated list,
first is default, mpi-serial is assumed and not required in this list-->
<!--<MPILIBS>openmpi,impi</MPILIBS> -->
<MPILIBS compiler="intel" >openmpi</MPILIBS>
<!-- PROJECT: A project or account number used for batch jobs
This value is used for directory names. If different from
actual accounting project id, use CHARGE_ACCOUNT
can be overridden in environment or $HOME/.cime/config -->
<PROJECT>none</PROJECT>
<!-- CHARGE_ACCOUNT: A project or account number used for batch jobs
This is the actual project used for cost accounting set in
the batch script (ex. #PBS -A charge_account). Will default
to PROJECT if not set.
can be overridden in environment or $HOME/.cime/config -->
<!--<CHARGE_ACCOUNT></CHARGE_ACCOUNT> yxy -->
<!-- SAVE_TIMING_DIR: (Acme only) directory for archiving timing output -->
<SAVE_TIMING_DIR> </SAVE_TIMING_DIR>
<!-- SAVE_TIMING_DIR_PROJECTS: (Acme only) projects whose jobs archive timing output -->
<SAVE_TIMING_DIR_PROJECTS> </SAVE_TIMING_DIR_PROJECTS>
<!-- CIME_OUTPUT_ROOT: Base directory for case output,
the case/bld and case/run directories are written below here -->
<CIME_OUTPUT_ROOT>/mnt/scratch/nfs_fs02/$USER</CIME_OUTPUT_ROOT>
<!-- DIN_LOC_ROOT: location of the inputdata data directory
inputdata is downloaded automatically on a case by case basis as
long as the user has write access to this directory. We recommend that
all cime model users on a system share an inputdata directory
as it can be quite large -->
<!--<DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT> -->
<DIN_LOC_ROOT>/home/yangx2/my_cesm_sandbox/inputdata</DIN_LOC_ROOT>
<!-- DIN_LOC_ROOT_CLMFORC: override of DIN_LOC_ROOT specific to CLM
forcing data -->
<DIN_LOC_ROOT_CLMFORC>$ENV{CESMDATAROOT}/lmwg</DIN_LOC_ROOT_CLMFORC>
<!-- DOUT_S_ROOT: root directory of short term archive files, short term
archiving moves model output data out of the run directory, but
keeps it on disk-->
<DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
<!-- BASELINE_ROOT: Root directory for system test baseline files -->
<BASELINE_ROOT>$ENV{CESMDATAROOT}/cesm_baselines</BASELINE_ROOT>
<!-- CCSM_CPRNC: location of the cprnc tool, compares model output in testing-->
<CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
<!-- GMAKE: gnu compatible make tool, default is 'gmake' -->
<GMAKE>make</GMAKE>
<!-- GMAKE_J: optional number of threads to pass to the gmake flag -->
<GMAKE_J>8</GMAKE_J>
<!-- BATCH_SYSTEM: batch system used on this machine,
supported values are: none, cobalt, lsf, pbs, slurm -->
<BATCH_SYSTEM>slurm</BATCH_SYSTEM>
<!-- SUPPORTED_BY: contact information for support for this system
this field is not used in code -->
<SUPPORTED_BY>UHM</SUPPORTED_BY>
<!-- MAX_TASKS_PER_NODE: maximum number of threads*tasks per
shared memory node on this machine,
should always be >= MAX_MPITASKS_PER_NODE -->
<MAX_TASKS_PER_NODE>20</MAX_TASKS_PER_NODE>
<!-- MAX_MPITASKS_PER_NODE: number of physical PES per shared node on
this machine, in practice the MPI tasks per node will not exceed this value -->
<MAX_MPITASKS_PER_NODE>20</MAX_MPITASKS_PER_NODE>
<!-- PROJECT_REQUIRED: Does this machine require a project to be specified to
the batch system? See PROJECT above -->
<PROJECT_REQUIRED>False</PROJECT_REQUIRED>
<!-- mpirun: The mpi exec to start a job on this machine, supported values
are values listed in MPILIBS above, default and mpi-serial -->
<mpirun mpilib="openmpi">
<executable>mpirun</executable>
<arguments>
<arg name="num_tasks">-np {{ total_tasks }}</arg>
<!-- <arg name="num_tasks">-np 40 </arg> -->
</arguments>
</mpirun>
<!-- module system: allowed module_system type values are:
module http://www.tacc.utexas.edu/tacc-projects/mclay/lmod
soft softenv-intro - An introduction to the SoftEnv system and how to use it
none
-->
<module_system type="module">
<init_path lang="perl">/opt/ohpc/admin/lmod/8.1.18/init/perl</init_path>
<init_path lang="python">/opt/ohpc/admin/lmod/8.1.18/init/env_modules_python.py</init_path>
<init_path lang="sh">/opt/ohpc/admin/lmod/8.1.18/init/sh</init_path>
<init_path lang="csh">/opt/ohpc/admin/lmod/8.1.18/init/csh</init_path>
<cmd_path lang="perl">/opt/ohpc/admin/lmod/8.1.18/libexec/lmod perl</cmd_path>
<cmd_path lang="python">/opt/ohpc/admin/lmod/8.1.18/libexec/lmod python</cmd_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>
<modules>
<command name="purge"/>
</modules>
<modules compiler="intel">
<command name="load">toolchain/intel/2018.5.274</command>
<command name="load">devel/CMake/3.12.1-intel-2018.5.274</command>
<command name="load">data/netCDF/4.6.2-intel-2018.5.274</command>
<command name="load">data/netCDF-Fortran/4.4.5-intel-2018.5.274</command>
<!--<command name="load">devel/CMake/3.15.3</command>-->
<command name="load">geo/ESMF/7.1.0r-intel-2018.5.274</command>
<command name="load">lib/libfabric/1.7.1</command>
<command name="load">bio/BioPerl</command>
<command name="load">compiler/ifort/2018.5.274-GCC-6.3.0-2.26</command>
<command name="load">data/PnetCDF/1.9.0-intel-2018.5.274</command>
</modules>
<modules compiler="gnu">
<command name="load">toolchain/intel/2018.5.274</command>
<command name="load">compiler/ifort/2018.5.274-GCC-6.3.0-2.26</command>
<command name="load">data/netCDF/4.6.2-intel-2018.5.274</command>
<command name="load">data/netCDF-Fortran/4.4.5-intel-2018.5.274</command>
<command name="load">devel/CMake/3.15.3</command>
<command name="load">lib/libfabric/1.7.1</command>
<command name="load">lang/Perl/5.28.1-GCCcore-6.3.0</command>
<command name="load">lib/libxml2/2.9.8-GCCcore-6.3.0</command>
<command name="load">bio/BioPerl/1.7.2-foss-2018b-Perl-5.28.0</command>
<command name="load">data/PnetCDF/1.9.0-intel-2018.5.274</command>
</modules>
<modules mpilib="openmpi">
<command name="load">mpi/OpenMPI/3.1.1-iccifort-2018.5.274-GCC-6.3.0-2.26</command>
</modules>
</module_system>
<environment_variables>
<env name="OMP_STACKSIZE">256M</env>
<env name="I_MPI_FABRICS">shm:ofi</env>
<env name="FI_PROVIDER">psm</env>
<env name="FI_PSM_TAGGED_RMA">0</env>
<env name="FI_PSM_AM_MSG">1</env>
</environment_variables>
<resource_limits>
<resource name="RLIMIT_STACK">-1</resource>
</resource_limits>
</machine>
</config_machines>
*************************************************
Any hints for solving this problem would be helpful! Thanks in advance!
Best,
Xinyi