Monday, September 14, 2020

Taiwania: NWChem 7.0.0 (release 20200226) compile log (Intel Compiler 18.0.1.163)

 clogin2: Red Hat Enterprise Linux Server release 7.3 (Maipo)
Intel Compiler Version 18.0.1.163 Build 20171018
gcc v6.3.0, Python v2.7.5

module load intel/2018_u1 
module load cuda/8.0.61 
module load mvapich2/gcc/64/2.2rc1 
# If you want to use gcc 6.3.0 to work with Intel Compiler 18
module load gcc/6.3.0 
echo $MKLROOT

# Should be /pkg/intel/2018_u1/compilers_and_libraries_2018.1.163/linux/mkl

# Get the master release if CUDA is not being compiled.
cd /home/molpro/src
mkdir nwchem-6.8.1.opa.scalapack.cuda-tce
cd nwchem-6.8.1.opa.scalapack.cuda-tce
unzip ../nwchem-6.8.1-20180206.zip

mv nwchem-master nwchem-6.8.1

 
# Or get the 6.8.1 Branch to support multiple CUDA cards within one single node. Thanks to Edoardo Aprà!
git clone -b hotfix/release-6-8 https://github.com/nwchemgit/nwchem nwchem-6.8.1


# Get NWChem 7.0.0 master release from github.com
mkdir /lscratch/nwchem ; cd /lscratch/nwchem  

wget https://github.com/nwchemgit/nwchem/releases/download/v7.0.0-release/nwchem-7.0.0-release.revision2c9a1c7c-src.2020-02-26.tar.bz2

mkdir -p Casper/i18gcc6

tar jxf nwchem-7.0.0-release.revision-2c9a1c7c-src.2020-02-26.tar.bz2 -C Casper/i18gcc6/

cd Casper/i18gcc6

export NWCHEM_ROOT=/lscratch/nwchem/Casper/i18gcc6
cd $NWCHEM_ROOT

Intel MPI
export MPI_ROOT=$I_MPI_ROOT/intel64
export MPICC=$MPI_ROOT/bin/mpiicc
export MPICXX=$MPI_ROOT/bin/mpiicpc
export MPIFC=$MPI_ROOT/bin/mpiifort

# Starting here, refer to Jeff Hammond's page for NWChem with Intel Omni-PATH Architecture
#    https://github.com/jeffhammond/HPCInfo/blob/master/ofi/NWChem-OPA.md
# Required minimal versions of tools (Already installed under /pkg/chem/sys/bin):
# M4_VERSION=1.4.17
# LIBTOOL_VERSION=2.4.4
# AUTOCONF_VERSION=2.69
# AUTOMAKE_VERSION=1.15
export PATH="/pkg/chem/sys/bin:$PATH"
export NWCHEM_ROOT=/lscratch/nwchem/Casper/i18gcc6
cd $NWCHEM_ROOT


libfabric and ARMCI-MPI required for Intel OMNI-PATH Architecture; Casper is optional

###
libfabric:

#  Need to reorder /cm/local/apps/gcc/6.3.0/lib64 in front of /cm/local/apps/gcc/6.3.0/lib

#    in the $LD_LIBRARY_PATH variable, otherwise the build will fail!

wget https://github.com/ofiwg/libfabric/archive/master.zip
unzip master.zip
mv libfabric-master libfabric
cd $NWCHEM_ROOT/libfabric/
./autogen.sh
mkdir $NWCHEM_ROOT/libfabric/build

cd $NWCHEM_ROOT/libfabric/build
../configure CC=icc CXX=icpc --enable-psm2 --disable-udp --disable-sockets --disable-rxm \
   --prefix=$NWCHEM_ROOT/deps
## Default gcc 4.8.3 does not work; use gcc 6.3.0 fixed the problems.
make -j 16 >& make.log &
make install
cd $NWCHEM_ROOT

# Choose either ARMCI-MPI only, or with additional Casper support.
# But if Casper support is chosen, install Casper before AMRCI-MPI !

### Casper: See https://www.mcs.anl.gov/project/casper for details of asynchronous progress model
cd $NWCHEM_ROOT

git clone https://github.com/pmodels/casper
cd $NWCHEM_ROOT/casper
Ming Si's instructions to resolve hwloc problem:

git submodule init
git submodule update
# Fallback to Jeff's instruction:
./autogen.sh
mkdir $NWCHEM_ROOT/casper/build

cd $NWCHEM_ROOT/casper/build
../configure CC=$MPICC --prefix=$NWCHEM_ROOT/deps
make -j 16 >& make.log &
make install
cd $NWCHEM_ROOT 
### End of Casper 

### ARMCI-MPI:
git clone --depth 10 https://github.com/jeffhammond/armci-mpi.git || \
wget https://github.com/jeffhammond/armci-mpi/archive/master.zip && \
unzip master.zip
cd armci-mpi
./autogen.sh
mkdir $NWCHEM_ROOT/armci-mpi/build

cd $NWCHEM_ROOT/armci-mpi/build
../configure MPICC=$MPICC MPIEXEC=$MPI_ROOT/bin/mpirun --enable-win-allocate --enable-explicit-progress \
  --prefix=$NWCHEM_ROOT/deps
configure: WARNING: unrecognized options: --enable-win-allocate, --enable-explicit-progress
make -j 16 >& make.log &
make install
# Now testing ARMCI-MPI
make checkprogs -j8 | tee checkprogs.log
make check MPIEXEC="$MPI_ROOT/bin/mpirun -n 2" | tee check-mpiexec.log
# avoid loading mvapich2 modules to eliminate the following three errors
FAIL:  3
# FAIL: tests/test_malloc
# FAIL: tests/test_malloc_irreg
# FAIL: tests/contrib/armci-test

### End of ARMCI-MPI

 

# Continue to compile NWChem, if gcc version >5, such as 6.3.0 cannot compile CUDA's memory.cu
# set "nvcc --compiler-bindir=<path to older GCC>" to use the old gcc 4.8.5 , see bashrc below
module unload gcc/6.3.0
cd $NWCHEM_ROOT
source ~nwchem/nwchem-config-backup/bashrc.nwchem700.opa.scalapack.cuda-tce.Casper
cd $NWCHEM_TOP/src
make nwchem_config >& nwchem_config.log &
make -j 32 >& make.log &

# End of NWChem compilation #
# Refer to Jeff Hammond's page above to setup the script of mpirun to work with Casper.

Contents of ~nwchem/nwchem-config-backup/bashrc.nwchem700.opa.scalapack.cuda-tce.Casper
export NWCHEM_ROOT=/lscratch/nwchem/Casper/i18gcc6
export NWCHEM_TOP="${NWCHEM_ROOT}/nwchem-7.0.0"
export NWCHEM_TARGET=LINUX64
export USE_PYTHONCONFIG=y
export USE_PYTHON64=y
export PYTHONVERSION=2.7
export PYTHONHOME=/usr

export NWCHEM_MODULES="all python"
export MRCC_METHODS=TRUE

export EACCSD=y
export IPCCSD=y


export CUDA="nvcc --compiler-bindir=/usr/bin"
export TCE_CUDA=Y
export CUDA_LIBS="-L/pkg/cuda/8.0.61/lib64 -lcudart -lcublas -lstdc++"
export CUDA_FLAGS="-arch sm_60 "
export CUDA_ARCH="-arch sm60"
export CUDA_INCLUDE="-I. -I/pkg/cuda/8.0.61/include"

export USE_OPENMP=T
export ARMCI_NETWORK=ARMCI
export EXTERNAL_ARMCI_PATH=${NWCHEM_ROOT}/deps
MPI_DIR=${MPI_ROOT}
export USE_MPI=y
export USE_MPIF=y
export USE_MPIF4=y
export MPI_LIB="${MPI_DIR}/lib"
export MPI_INCLUDE="${MPI_DIR}/include"
MPICH_LIBS="-lmpifort -lmpi"
SYS_LIBS="-ldl -lrt -lpthread -static-intel"
export LIBMPI="-L${MPI_DIR}/lib -Wl,-rpath -Wl,${MPI_DIR}/lib ${MPICH_LIBS} ${SYS_LIBS}"
export CC=icc
export CXX=icpc
export FC=ifort
export F77=ifort

export BLAS_SIZE=8
export BLASOPT="-mkl=parallel -qopenmp"
export LAPACK_SIZE=8
export LAPACK_LIB="$BLASOPT"
export LAPACK_LIBS="$BLASOPT"
export USE_SCALAPACK=y
export SCALAPACK_SIZE=8
export SCALAPACK="-L${MKLROOT}/lib/intel64 -lmkl_scalapack_ilp64 -lmkl_intel_ilp64 -lmkl_intel_thread \
 -lmkl_core -lmkl_blacs_intelmpi_ilp64 -liomp5 -lpthread -lm -ldl"

# End of ~nwchem/nwchem-config-backup/bashrc.nwchem700.opa.scalapack.cuda-tce.Casper



Thursday, May 14, 2020

Wild: Old disk note and new machine configuration



OLD wild disk note: Slackware 10.2.0 32bit kernel 3.2.42smp
Gigabyte GA-G33M-DS2R BIOS F9a

/dev/sda: 250GB
ATA device, with non-removable media
        Model Number:       WDC WD2500KS-00MJB0
        Serial Number:      WD-WCANK1766523
        Firmware Revision:  02.01C03

/dev/sdb: 250GB
ATA device, with non-removable media
        Model Number:       WDC WD2500KS-00MJB0
        Serial Number:      WD-WCANK1799460
        Firmware Revision:  02.01C03

/dev/sdc: 80GB
ATA device, with non-removable media
        Model Number:       ST380023AS
        Serial Number:      3KB11LPQ
        Firmware Revision:  3.01

/dev/sdd: 80GB (disk almost fail)
ATA device, with non-removable media
        Model Number:       ST380013AS
        Serial Number:      3JV5H92P
        Firmware Revision:  3.05

/dev/sde: 750GB
ATA device, with non-removable media
        Model Number:       WDC WD7500BPKX-22HPJT0
        Serial Number:      WD-WXC1A84E65DT
        Firmware Revision:  01.01A01

/dev/sdf: 750GB
ATA device, with non-removable media
        Model Number:       WDC WD7500BPKX-22HPJT0
        Serial Number:      WD-WXL1E840S5UV
        Firmware Revision:  01.01A01


# cat /proc/mdstat
Personalities : [linear] [raid1]
md1 : active raid1 sdb3[1] sda3[0]
      20008832 blocks [2/2] [UU]

md5 : active raid1 sdb5[0] sda5[1]
      203173952 blocks [2/2] [UU]

md4 : active raid1 sdd2[1] sdc2[0]
      77150208 blocks [2/2] [UU]

md10 : active raid1 sdf2[0] sde2[1]
      250011456 blocks [2/2] [UU]

md11 : active raid1 sdf3[0] sde3[1]
      466929152 blocks [2/2] [UU]

md0 : active raid1 sdb2[1] sda2[0]
      20008832 blocks [2/2] [UU]


# mount
/dev/md1 on / type reiserfs (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/md10 on /user type xfs (rw)
/dev/md5 on /user1 type xfs (rw)
/dev/md11 on /rd11 type xfs (rw)
usbfs on /proc/bus/usb type usbfs (rw)



# cat /etc/fstab 
/dev/sda1       swap            swap    defaults        0 0
/dev/sdb1       swap            swap    defaults        0 0
/dev/sdc1       swap            swap    defaults        0 0
/dev/sdd1       swap            swap    defaults        0 0
# /dev/md1: UUID="4b0da317-e066-459d-9785-724527b3d6b5" TYPE="reiserfs"
/dev/md1         /                reiserfs    defaults         1   1
/dev/cdrom       /mnt/cdrom       auto        noauto,owner,ro  0   0
/dev/fd0         /mnt/floppy      auto        noauto,owner     0   0
devpts           /dev/pts         devpts      gid=5,mode=620   0   0
proc             /proc            proc        defaults         0   0
# md4: UUID="4898a5e7-6890-47fb-8fc1-94837fdd9cf7" SEC_TYPE="ext2" TYPE="ext3"
# /dev/md4       /user            ext3        defaults          1   2
# 20200222: switch md4 to md10 due to /dev/sdd diskfail
# md10: UUID="1271ca1f-4b62-4cb8-8d56-c372f10ed8be" TYPE="xfs"
/dev/md10       /user            xfs         defaults          1   2
# /dev/md5: UUID="abb266a0-19f8-455d-b7cd-a9eb90dff396" TYPE="xfs"
/dev/md5       /user1           xfs    defaults          1   2
# /dev/md11: UUID="24ec35de-912a-488b-91fe-cb9540515953" TYPE="xfs"
#/dev/md11       /rd11           xfs    defaults          1   2
UUID="24ec35de-912a-488b-91fe-cb9540515953" /rd11 xfs defaults 1   2
# mount smbfs by hand
# mount -t smbfs -o username=ccu //172.22.222.241/disk1_pt1 /user/disk1_pt1


# df
Filesystem           1K-blocks      Used Available Use% Mounted on
/dev/md1              20008216  12256988   7751228  62% /
/dev/md10            249889384  62302000 187587384  25% /user
/dev/md5             203074688  56987000 146087688  29% /user1
/dev/md11            466798080  67465728 399332352  15% /rd11



NEW wild disk note: Ubuntu Desktop 20.04 64bit kernel 5.4.0-26-generic SMP
Gigabyte GA-EG45M-DS2H BIOS F4

/dev/sda: 250GB
ATA device, with non-removable media
        Model Number:       WDC WD2500KS-00MJB0
        Serial Number:      WD-WCANK1766523
        Firmware Revision:  02.01C03

/dev/sdb: 250GB
ATA device, with non-removable media
        Model Number:       WDC WD2500KS-00MJB0
        Serial Number:      WD-WCANK1799460
        Firmware Revision:  02.01C03

/dev/sdc: 80GB
ATA device, with non-removable media
        Model Number:       ST380023AS
        Serial Number:      3KB11LPQ
        Firmware Revision:  3.01

/dev/sdd: 80GB (disk almost fail)
ATA device, with non-removable media
        Model Number:       ST380013AS
        Serial Number:      3JV5H92P
        Firmware Revision:  3.05

/dev/sde: 750GB
ATA device, with non-removable media
        Model Number:       WDC WD7500BPKX-22HPJT0
        Serial Number:      WD-WXC1A84E65DT
        Firmware Revision:  01.01A01

/dev/sdf: 750GB
ATA device, with non-removable media
        Model Number:       WDC WD7500BPKX-22HPJT0
        Serial Number:      WD-WXL1E840S5UV
        Firmware Revision:  01.01A01


# cat /proc/mdstat
Personalities : [linear] [raid1]
md1 : active raid1 sdb3[1] sda3[0]
      20008832 blocks [2/2] [UU]

md5 : active raid1 sdb5[0] sda5[1]
      203173952 blocks [2/2] [UU]

md4 : active raid1 sdd2[1] sdc2[0]
      77150208 blocks [2/2] [UU]

md10 : active raid1 sdf2[0] sde2[1]

# uptime
 14:10:21 up 10 days, 18:02,  1 user,  load average: 0.00, 0.01, 0.05





Monday, July 02, 2018

Compile MOLCAS 8.2 at ccuf1 with OpenMPI 3.0.0 and libhdf5-1.10.2

Note that libhdf5 installed at ccuf1 supports OpenMPI, not MPICH2
apt-get install libhdf5-openmpi-dev

Use the new libhdf5-1.10.2 compiled at ccuf1 and installed under
/pkg1/local/lib 
(libhdf5 configured with
./configure --prefix=/pkg1/local --enable-parallel --enable-fortran --enable-direct-vfd  and compiled by Intel Fortran)

Enter bash
. /pkg1/intel/compilers_and_libraries_2018.1.163/linux/bin/compilervars.sh intel64
export PATH="/pkg1/local/openmpi-3.0.0-i18/bin:$PATH"
export LD_LIBRARY_PATH="/pkg1/local/openmpi-3.0.0-i18/lib:$LD_LIBRARY_PATH"

mkdir /temp/molcas82.hdf5
cd /temp/molcas82.hdf5
tar zxf /f01/source/chem/molcas/molcas82.tar.gz
cd molcas82
cp /f01/source/chem/molcas/license.dat.gz .
gzip -d license.dat.gz 
vi cfg/intel.comp
   and add -prec-sqrt to  OPT='-O3 -no-prec-div -static -xHost' if set -speed fast

(Use ./setup first to determine configure parameters beforehand, the following is concluded: )
./configure -64 -parallel -compiler intel -mpiroot /pkg1/local/openmpi-3.0.0-i18 -mpirun /pkg1/local/openmpi-3.0.0-i18/bin -blas MKL -blas_lib BEGINLIST -Wl,--no-as-needed -L/pkg1/intel/compilers_and_libraries_2018.1.163/linux/mkl/lib/intel64 -lmkl_intel_ilp64 -lmkl_core -lmkl_sequential -lpthread -lm ENDLIST -hdf5_lib /pkg1/local/lib -hdf5_inc /pkg1/local/include

make -j 8 >& make.log &

If this fails, type make >& make.log2 & again without -j parallellism. After successful build,
copy the whole directory to /pkg1/chem/molcas/molcas82.i18.ompi3.hdf5

Put the following line
/pkg1/chem/molcas/molcas82.i18.ompi3.hdf5
under your $HOME/.Molcas/molcas 
and run with the script
/usr/local/bin/molcas82.i18.ompi3.hdf5

Also include /pkg1/local/lib within $LD_LIBRARY_PATH at run time if libhdf5.so.101 is not found.
In addition, pay attention to the warning of ld at the linking stage:
ld: warning: libmpi.so.12, needed by /pkg1/local/lib/libhdf5.so, may conflict with libmpi.so.40 

************ NECI test compile with MOLCAS 8.4  ************
Test 1:
cmake -DENABLE_HDF5=ON -DFFTW=ON -DMPI=ON -DSHARED_MEMORY=ON \ -DCMAKE_C_COMPILER=mpicc -DCMAKE_CXX_COMPILER=mpicxx \ -DCMAKE_Fortran_COMPILER=mpif77 ../../neci/



Test 2:

Tuesday, April 03, 2018

Taiwania: NWChem 6.8.1 (release 20180206) compile log (Intel Compiler 18.0.1.163)

glogin1: Red Hat Enterprise Linux Server release 7.3 (Maipo)
Intel Compiler Version 18.0.1.163 Build 20171018
gcc v4.8.5, Python v2.7.5

module load intel/2018_u1 
module load cuda/8.0.61 
module load mvapich2/gcc/64/2.2rc1 
# If you want to use gcc 6.3.0 to work with Intel Compiler 18
module load gcc/6.3.0 
echo $MKLROOT

/pkg/intel/2018_u1/compilers_and_libraries_2018.1.163/linux/mkl

# Get the master release if CUDA is not being compiled.
cd /home/molpro/src
mkdir nwchem-6.8.1.opa.scalapack.cuda-tce
cd nwchem-6.8.1.opa.scalapack.cuda-tce
unzip ../nwchem-6.8.1-20180206.zip

mv nwchem-master nwchem-6.8.1


# Or get the 6.8.1 Branch to support multiple CUDA cards within one single node. Thanks to Edoardo Aprà!
git clone -b hotfix/release-6-8 https://github.com/nwchemgit/nwchem nwchem-6.8.1

# Starting here, refer to Jeff Hammond's page 
# https://github.com/jeffhammond/HPCInfo/blob/master/ofi/NWChem-OPA.md
# Required minimum versions of tools:
# M4_VERSION=1.4.17
# LIBTOOL_VERSION=2.4.4
# AUTOCONF_VERSION=2.69
# AUTOMAKE_VERSION=1.15
export PATH="$HOME/local/bin:$PATH"
export NWCHEM_ROOT=/home/molpro/src/nwchem-6.8.1.opa.scalapack.cuda-tce
cd $NWCHEM_ROOT

libfabric
wget https://github.com/ofiwg/libfabric/archive/master.zip
unzip master.zip
mv libfabric-master libfabric
cd $NWCHEM_ROOT/libfabric/
./autogen.sh
mkdir $NWCHEM_ROOT/libfabric/build

cd $NWCHEM_ROOT/libfabric/build
../configure CC=icc CXX=icpc --enable-psm2 --disable-udp --disable-sockets --disable-rxm \
   --prefix=$NWCHEM_ROOT/deps
## Default gcc 4.8.3 does not work, use gcc 6.3.0 fixed
make -j 16 >& make.log &
make install
cd $NWCHEM_ROOT

Intel MPI
export MPI_ROOT=$I_MPI_ROOT/intel64
export MPICC=$MPI_ROOT/bin/mpiicc
export MPICXX=$MPI_ROOT/bin/mpiicpc
export MPIFC=$MPI_ROOT/bin/mpiifort

Casper
cd $NWCHEM_ROOT
git clone https://github.com/pmodels/casper
cd $NWCHEM_ROOT/casper
Ming Si's instructions:
git submodule init
git submodule update
# Fallback to Jeff's instruction:
./autogen.sh
mkdir $NWCHEM_ROOT/casper/build

cd $NWCHEM_ROOT/casper/build
../configure CC=$MPICC --prefix=$NWCHEM_ROOT/deps
make -j 16 >& make.log &
make install
cd $NWCHEM_ROOT

ARMCI-MPI
git clone --depth 10 https://github.com/jeffhammond/armci-mpi.git || \
wget https://github.com/jeffhammond/armci-mpi/archive/master.zip && \
unzip master.zip
cd armci-mpi
./autogen.sh
mkdir $NWCHEM_ROOT/armci-mpi/build

cd $NWCHEM_ROOT/armci-mpi/build
../configure MPICC=$MPICC MPIEXEC=$MPI_ROOT/bin/mpirun --enable-win-allocate --enable-explicit-progress \
  --prefix=$NWCHEM_ROOT/deps
configure: WARNING: unrecognized options: --enable-win-allocate, --enable-explicit-progress
make -j 16 >& make.log &
make install
# Now testing ARMCI-MPI
make checkprogs -j8 | tee checkprogs.log
make check MPIEXEC="$MPI_ROOT/bin/mpirun -n 2" | tee check-mpiexec.log
# avoid loading mvapich2 modules can eliminated the following three errors
FAIL:  3
# FAIL: tests/test_malloc
# FAIL: tests/test_malloc_irreg
# FAIL: tests/contrib/armci-test

# Continue to compile NWChem, if gcc version >5, such as 6.3.0 cannot compile CUDA's memory.cu
# set "nvcc --compiler-bindir=<path to older GCC>" to use the old gcc
module unload gcc/6.3.0
cd $NWCHEM_ROOT
source ../bashrc.nwchem.opa.scalapack.cuda-tce
cd $NWCHEM_TOP/src
make nwchem_config >& nwchem_config.log &
make -j 32 >& make.log &

# End of NWChem compilation #
# Refer to Jeff Hammond's page to setup the script of mpirun to work with Casper.

Contents of bashrc.nwchem.opa.scalapack.cuda-tce
export NWCHEM_ROOT=/home/molpro/src/nwchem-6.8.1.opa.scalapack.cuda-tce
export NWCHEM_TOP="${NWCHEM_ROOT}/nwchem-6.8.1"
export NWCHEM_TARGET=LINUX64
export USE_PYTHONCONFIG=y
export USE_PYTHON64=y
export PYTHONVERSION=2.7
export PYTHONHOME=/usr

export NWCHEM_MODULES="all python"
export MRCC_METHODS=TRUE

export CUDA="nvcc --compiler-bindir=/usr/bin"
export TCE_CUDA=Y
export CUDA_LIBS="-L/pkg/cuda/8.0.61/lib64 -lcudart -lcublas -lstdc++"
export CUDA_FLAGS="-arch sm_60 "
export CUDA_ARCH="-arch sm60"
export CUDA_INCLUDE="-I. -I/pkg/cuda/8.0.61/include"

export USE_OPENMP=T
export ARMCI_NETWORK=ARMCI
export EXTERNAL_ARMCI_PATH=${NWCHEM_ROOT}/deps
MPI_DIR=${MPI_ROOT}
export USE_MPI=y
export USE_MPIF=y
export USE_MPIF4=y
export MPI_LIB="${MPI_DIR}/lib"
export MPI_INCLUDE="${MPI_DIR}/include"
MPICH_LIBS="-lmpifort -lmpi"
SYS_LIBS="-ldl -lrt -lpthread -static-intel"
export LIBMPI="-L${MPI_DIR}/lib -Wl,-rpath -Wl,${MPI_DIR}/lib ${MPICH_LIBS} ${SYS_LIBS}"
export CC=icc
export CXX=icpc
export FC=ifort
export F77=ifort

export BLAS_SIZE=8
export BLASOPT="-mkl=parallel -qopenmp"
export LAPACK_SIZE=8
export LAPACK_LIB="$BLASOPT"
export LAPACK_LIBS="$BLASOPT"
export USE_SCALAPACK=y
export SCALAPACK_SIZE=8
export SCALAPACK="-L${MKLROOT}/lib/intel64 -lmkl_scalapack_ilp64 -lmkl_intel_ilp64 -lmkl_intel_thread \
 -lmkl_core -lmkl_blacs_intelmpi_ilp64 -liomp5 -lpthread -lm -ldl"



Tuesday, February 06, 2018

nwchem 6.8 for Intel Phi 7120P (KNC) compile log -- Must use Intel Compiler 2017.4.196

q183 Phi: CentOS 6.6 (Final)
Intel Compiler Version 18.0.1.163 Build 20171018
Python v2.6.6
(see also http://www.nwchem-sw.org/index.php/Compiling_NWChem)

. /opt/intel/compilers_and_libraries_2018.1.163/linux/bin/compilervars.sh intel64
echo $MKLROOT

/opt/intel/compilers_and_libraries_2018.1.163/linux/mkl

cd /phi
tar jxf /f01/source/chem/nwchem/nwchem-6.8-release.revision-v6.8-47-gdf6c956-srconly.2017-12-14.tar.bz2
cd nwchem-6.8/src

# apply patches

# Compilation setup for Phi
export NWCHEM_TOP=/phi/nwchem-6.8
export USE_MPI=y
export USE_MPIF=y
export USE_MPIF4=y
export NWCHEM_TARGET=LINUX64
export USE_PYTHONCONFIG=y
export PYTHONHOME=/usr
export PYTHONVERSION=2.6
export FC=ifort
export CC=icc
export CXX=icpc

export USE_OPENMP=1
export USE_OFFLOAD=1

export BLASOPT="-mkl -qopenmp   -lpthread -lm"
export USE_SCALAPACK=y
export SCALAPACK="-mkl -qopenmp -lmkl_scalapack_ilp64 -lmkl_blacs_intelmpi_ilp64 -lpthread -lm"

export NWCHEM_MODULES="all python"
export MRCC_METHODS=TRUE

### compilation error @libtce.a(ccsd_t.o): Intel Compiler 18 does not support KNC offload
### Trying Intel Compiler 17.0.4.196 Build 20170411

. /opt/intel/compilers_and_libraries_2017.4.196/linux/bin/compilervars.sh intel64
echo $MKLROOT

/opt/intel/compilers_and_libraries_2017.4.196/linux/mkl

cd /phi/jsyu/git
git clone https://github.com/nwchemgit/nwchem.git

Initialized empty Git repository in /phi/jsyu/git/nwchem/.git/
remote: Counting objects: 238809, done.
remote: Compressing objects: 100% (63/63), done.
remote: Total 238809 (delta 45), reused 44 (delta 23), pack-reused 238723
Receiving objects: 100% (238809/238809), 280.07 MiB | 13.34 MiB/s, done.
Resolving deltas: 100% (191961/191961), done.

cd nwchem/src 

# this is nwchem-6.8.1

# Compilation setup for Phi
export NWCHEM_TOP=/phi/jsyu/git/nwchem
export USE_MPI=y
export USE_MPIF=y
export USE_MPIF4=y
export NWCHEM_TARGET=LINUX64
export USE_PYTHONCONFIG=y
export PYTHONHOME=/usr
export PYTHONVERSION=2.6
export FC=ifort
export CC=icc
export CXX=icpc

export USE_OPENMP=1
export USE_OFFLOAD=1

export BLASOPT="-mkl -qopenmp   -lpthread -lm"
export USE_SCALAPACK=y
export SCALAPACK="-mkl -qopenmp -lmkl_scalapack_ilp64 -lmkl_blacs_intelmpi_ilp64 -lpthread -lm"

export NWCHEM_MODULES="all python"
export MRCC_METHODS=TRUE

make nwchem_config >& nwchem_config.log &
make -j 20 >& make.log &