yaozhixiong@foxmail_com
Member
Hi,I am tring to install cesm1_2_0 on grex machine of westgrid. And, I can run the compset X successfully, but failed in the compset B. Here is my setting and error information:***env_mach_specific***set netcdf = /global/software/netcdf/netcdf-4.2.1.1-intel-rh6
set mpich = /global/software/openmpi-1.6.5-intelsetenv INC_NETCDF ${netcdf}/include
setenv LIB_NETCDF ${netcdf}/lib
setenv INC_MPI ${mpich}/include
setenv LIB_MPI ${mpich}/lib ***Macros***CPPDEFS+= -DFORTRANUNDERSCORE -DNO_R16 -DLINUX -DCPRINTELSLIBS+= -L$(LIB_NETCDF) -lnetcdf -lnetcdffCFLAGS:= -O2 -fp-model preciseCONFIG_ARGS:=CXX_LDFLAGS:= -cxxlibCXX_LINKER:=FORTRANESMF_LIBDIR:=FC_AUTO_R8:= -r8FFLAGS:= -O2 -fp-model source -convert big_endian -assume byterecl -ftz -tracebackFFLAGS_NOOPT:= -O0FIXEDFLAGS:= -fixed -132FREEFLAGS:= -freeMPICC:= mpiccMPICXX:= mpicxxMPIFC:= mpif90MPI_LIB_NAME:=MPI_PATH:= /global/software/openmpi-1.6.5-intelNETCDF_PATH:= /global/software/netcdf/netcdf-4.2.1.1-intel-rh6PNETCDF_PATH:=SCC:= iccSCXX:= icpcSFC:= ifortSUPPORTS_CXX:=TRUEifeq ($(DEBUG), TRUE)
FFLAGS += -g -CU -check pointers -fpe0
endififeq ($(compile_threaded), true)
LDFLAGS += -openmp
CFLAGS += -openmp
FFLAGS += -openmp
endif ***part of error information ***CalcWorkPerBlock: Total blocks: 290 Ice blocks: 113 IceFree blocks: 147 Land blocks: 30
forrtl: No such file or directory
forrtl: severe (29): file not found, unit 82, file /global/scratch/zhixiong/CESM/input_data/ice/cice/iced.0001-01-01.gx3v7_20080212
Image PC Routine Line Source
cesm.exe 0000000001A85AAA Unknown Unknown Unknown
cesm.exe 0000000001A845A6 Unknown Unknown Unknown
cesm.exe 0000000001A3A930 Unknown Unknown Unknown
cesm.exe 00000000019CECCE Unknown Unknown Unknown
cesm.exe 00000000019CE20F Unknown Unknown Unknown
cesm.exe 00000000019E5B0D Unknown Unknown Unknown
cesm.exe 0000000000D2CE00 ice_read_write_mp 86 ice_read_write.F90
cesm.exe 0000000000D336B4 ice_restart_mp_re 722 ice_restart.F90
cesm.exe 0000000000DC10BF cice_initmod_mp_c 132 CICE_InitMod.F90
cesm.exe 0000000000C49D0B ice_comp_mct_mp_i 254 ice_comp_mct.F90
cesm.exe 0000000000510803 ccsm_comp_mod_mp_ 1152 ccsm_comp_mod.F90
cesm.exe 0000000000517396 MAIN__ 90 ccsm_driver.F90
cesm.exe 00000000004F3D0C Unknown Unknown Unknown
libc.so.6 000000336361ECDD Unknown Unknown Unknown
cesm.exe 00000000004F3C09 Unknown Unknown Unknown
--------------------------------------------------------------------------
mpirun has exited due to process rank 0 with PID 21770 on
node n225 exiting improperly. There are two reasons this could occur:1. this process did not call "init" before exiting, but others in
the job did. This can cause a job to hang indefinitely while it waits
for all processes to call "init". By rule, if one process calls "init",
then ALL processes must call "init" prior to termination.2. this process called "init", but exited without calling "finalize".
By rule, all processes that call "init" MUST call "finalize" prior to
exiting or it will be considered an "abnormal termination"This may have caused other processes in the application to be
terminated by signals sent by mpirun (as reported here).
-------------------------------------------------------------------------- Thank you very much!Best regards, yao zhixiong
set mpich = /global/software/openmpi-1.6.5-intelsetenv INC_NETCDF ${netcdf}/include
setenv LIB_NETCDF ${netcdf}/lib
setenv INC_MPI ${mpich}/include
setenv LIB_MPI ${mpich}/lib ***Macros***CPPDEFS+= -DFORTRANUNDERSCORE -DNO_R16 -DLINUX -DCPRINTELSLIBS+= -L$(LIB_NETCDF) -lnetcdf -lnetcdffCFLAGS:= -O2 -fp-model preciseCONFIG_ARGS:=CXX_LDFLAGS:= -cxxlibCXX_LINKER:=FORTRANESMF_LIBDIR:=FC_AUTO_R8:= -r8FFLAGS:= -O2 -fp-model source -convert big_endian -assume byterecl -ftz -tracebackFFLAGS_NOOPT:= -O0FIXEDFLAGS:= -fixed -132FREEFLAGS:= -freeMPICC:= mpiccMPICXX:= mpicxxMPIFC:= mpif90MPI_LIB_NAME:=MPI_PATH:= /global/software/openmpi-1.6.5-intelNETCDF_PATH:= /global/software/netcdf/netcdf-4.2.1.1-intel-rh6PNETCDF_PATH:=SCC:= iccSCXX:= icpcSFC:= ifortSUPPORTS_CXX:=TRUEifeq ($(DEBUG), TRUE)
FFLAGS += -g -CU -check pointers -fpe0
endififeq ($(compile_threaded), true)
LDFLAGS += -openmp
CFLAGS += -openmp
FFLAGS += -openmp
endif ***part of error information ***CalcWorkPerBlock: Total blocks: 290 Ice blocks: 113 IceFree blocks: 147 Land blocks: 30
forrtl: No such file or directory
forrtl: severe (29): file not found, unit 82, file /global/scratch/zhixiong/CESM/input_data/ice/cice/iced.0001-01-01.gx3v7_20080212
Image PC Routine Line Source
cesm.exe 0000000001A85AAA Unknown Unknown Unknown
cesm.exe 0000000001A845A6 Unknown Unknown Unknown
cesm.exe 0000000001A3A930 Unknown Unknown Unknown
cesm.exe 00000000019CECCE Unknown Unknown Unknown
cesm.exe 00000000019CE20F Unknown Unknown Unknown
cesm.exe 00000000019E5B0D Unknown Unknown Unknown
cesm.exe 0000000000D2CE00 ice_read_write_mp 86 ice_read_write.F90
cesm.exe 0000000000D336B4 ice_restart_mp_re 722 ice_restart.F90
cesm.exe 0000000000DC10BF cice_initmod_mp_c 132 CICE_InitMod.F90
cesm.exe 0000000000C49D0B ice_comp_mct_mp_i 254 ice_comp_mct.F90
cesm.exe 0000000000510803 ccsm_comp_mod_mp_ 1152 ccsm_comp_mod.F90
cesm.exe 0000000000517396 MAIN__ 90 ccsm_driver.F90
cesm.exe 00000000004F3D0C Unknown Unknown Unknown
libc.so.6 000000336361ECDD Unknown Unknown Unknown
cesm.exe 00000000004F3C09 Unknown Unknown Unknown
--------------------------------------------------------------------------
mpirun has exited due to process rank 0 with PID 21770 on
node n225 exiting improperly. There are two reasons this could occur:1. this process did not call "init" before exiting, but others in
the job did. This can cause a job to hang indefinitely while it waits
for all processes to call "init". By rule, if one process calls "init",
then ALL processes must call "init" prior to termination.2. this process called "init", but exited without calling "finalize".
By rule, all processes that call "init" MUST call "finalize" prior to
exiting or it will be considered an "abnormal termination"This may have caused other processes in the application to be
terminated by signals sent by mpirun (as reported here).
-------------------------------------------------------------------------- Thank you very much!Best regards, yao zhixiong