diff --git a/Doxyfile b/Doxyfile index bc4d1c21..32b87fcc 100644 --- a/Doxyfile +++ b/Doxyfile @@ -919,6 +919,7 @@ FILE_PATTERNS = *.c \ *.py \ *.pyw \ *.f90 \ + *.F90 \ *.f95 \ *.f03 \ *.f08 \ diff --git a/Makefile b/Makefile index 1b0ba938..df13c167 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,3 @@ -#!/bin/sh - ############################################ # This is the "classic" Makefile for SPEC. # ############################################ @@ -9,14 +7,26 @@ # are defined there as well. include SPECfile -# files to be preprocessed by m4 -PREPROC=$(ALLSPEC:=_m.F90) - ROBJS=$(SPECFILES:=_r.o) DOBJS=$(SPECFILES:=_d.o) -ROBJS_IO=$(IOFILES:=_r.o) -DOBJS_IO=$(IOFILES:=_d.o) +ROBJS_LEVEL_0=$(LEVEL_0:=_r.o) +DOBJS_LEVEL_0=$(LEVEL_0:=_d.o) + +ROBJS_LEVEL_1=$(LEVEL_1:=_r.o) +DOBJS_LEVEL_1=$(LEVEL_1:=_d.o) + +ROBJS_LEVEL_2=$(LEVEL_2:=_r.o) +DOBJS_LEVEL_2=$(LEVEL_2:=_d.o) + +ROBJS_LEVEL_3=$(LEVEL_3:=_r.o) +DOBJS_LEVEL_3=$(LEVEL_3:=_d.o) + +ROBJS_LEVEL_4=$(LEVEL_4:=_r.o) +DOBJS_LEVEL_4=$(LEVEL_4:=_d.o) + +ROBJS_BASE=$(BASEFILES:=_r.o) +DOBJS_BASE=$(BASEFILES:=_d.o) ############################################################################################################################################################### @@ -24,123 +34,110 @@ date:=$(shell date) text:=$(shell date +%F) ############################################################################################################################################################### +# main executables -xspec: $(addsuffix _r.o,$(ALLFILES)) $(MACROS) Makefile +xspec: $(addsuffix _r.o,$(ALLFILES)) Makefile $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o xspec $(addsuffix _r.o,$(ALLFILES)) $(LINKS) -dspec: $(addsuffix _d.o,$(ALLFILES)) $(MACROS) Makefile +dspec: $(addsuffix _d.o,$(ALLFILES)) Makefile $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o dspec $(addsuffix _d.o,$(ALLFILES)) $(LINKS) ############################################################################################################################################################### -# inputlist needs special handling: expansion of DSCREENLIST and NSCREENLIST using awk (not anymore !!!) +# sfiles: contrib -inputlist_r.o: %_r.o: src/inputlist.f90 $(MACROS) - m4 -P $(MACROS) src/inputlist.f90 > inputlist_m.F90 - $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o inputlist_r.o -c inputlist_m.F90 $(LIBS) - @wc -l -L -w inputlist_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +%_r.o: src/%.f + $(FC) $(FLAGS) $(RFLAGS) -o $*_r.o -c src/$*.f + @wc -l -L -w src/$*.f | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -inputlist_d.o: %_d.o: src/inputlist.f90 $(MACROS) - m4 -P $(MACROS) src/inputlist.f90 > inputlist_m.F90 - $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o inputlist_d.o -c inputlist_m.F90 $(LIBS) - @wc -l -L -w inputlist_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +%_d.o: src/%.f + $(FC) $(FLAGS) $(DFLAGS) -o $*_d.o -c src/$*.f + @wc -l -L -w src/$*.f | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' ############################################################################################################################################################### -# global needs special handling: expansion of CPUVARIABLE, BSCREENLIST and WSCREENLIST using awk (not anymore !!!) +# BASEFILES, depending on one another -global_r.o: %_r.o: inputlist_r.o src/global.f90 $(MACROS) - m4 -P $(MACROS) src/global.f90 > global_m.F90 - $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o global_r.o -c global_m.F90 $(LIBS) - @wc -l -L -w global_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(ROBJS_LEVEL_0): %_r.o: src/%.F90 + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -global_d.o: %_d.o: inputlist_d.o src/global.f90 $(MACROS) - m4 -P $(MACROS) src/global.f90 > global_m.F90 - $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o global_d.o -c global_m.F90 $(LIBS) - @wc -l -L -w global_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(DOBJS_LEVEL_0): %_d.o: src/%.F90 + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -############################################################################################################################################################### +$(ROBJS_LEVEL_1): %_r.o: src/%.F90 $(addsuffix _r.o,$(LEVEL_0)) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' + @echo '' -%_r.o: src/%.f - $(FC) $(FLAGS) $(RFLAGS) -o $*_r.o -c src/$*.f - @wc -l -L -w src/$*.f | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(DOBJS_LEVEL_1): %_d.o: src/%.F90 $(addsuffix _d.o,$(LEVEL_0)) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -%_d.o: src/%.f - $(FC) $(FLAGS) $(DFLAGS) -o $*_d.o -c src/$*.f - @wc -l -L -w src/$*.f | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(ROBJS_LEVEL_2): %_r.o: src/%.F90 mod_kinds_r.o $(addsuffix _r.o,$(LEVEL_1)) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -############################################################################################################################################################### +$(DOBJS_LEVEL_2): %_d.o: src/%.F90 mod_kinds_d.o $(addsuffix _d.o,$(LEVEL_1)) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' + @echo '' -$(PREPROC): %_m.F90: src/%.f90 $(MACROS) - @awk -v file=$*.f90 '{ gsub("__LINE__", NR); gsub("__FILE__",file); print }' src/$*.f90 > $*_p.f90 - m4 -P $(MACROS) $*_p.f90 > $*_m.F90 +$(ROBJS_LEVEL_3): %_r.o: src/%.F90 mod_kinds_r.o $(addsuffix _r.o,$(LEVEL_2)) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' + @echo '' +$(DOBJS_LEVEL_3): %_d.o: src/%.F90 mod_kinds_d.o $(addsuffix _d.o,$(LEVEL_2)) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' + @echo '' -$(ROBJS_IO): %_r.o: %_m.F90 $(addsuffix _r.o,$(BASEFILES)) $(MACROS) - $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c $*_m.F90 $(LIBS) - @wc -l -L -w $*_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(ROBJS_LEVEL_4): %_r.o: src/%.F90 mod_kinds_r.o $(addsuffix _r.o,$(LEVEL_3)) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -$(DOBJS_IO): %_d.o: %_m.F90 $(addsuffix _d.o,$(BASEFILES)) $(MACROS) - $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c $*_m.F90 $(LIBS) - @wc -l -L -w $*_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(DOBJS_LEVEL_4): %_d.o: src/%.F90 mod_kinds_d.o $(addsuffix _d.o,$(LEVEL_3)) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' +############################################################################################################################################################### +# SPECFILES: main physics part of SPEC -$(ROBJS): %_r.o: %_m.F90 $(addsuffix _r.o,$(BASEFILES)) $(addsuffix _r.o,$(IOFILES)) $(MACROS) - $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c $*_m.F90 $(LIBS) - @wc -l -L -w $*_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(ROBJS): %_r.o: src/%.F90 $(addsuffix _r.o,$(BASEFILES)) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o $*_r.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' -$(DOBJS): %_d.o: %_m.F90 $(addsuffix _d.o,$(BASEFILES)) $(addsuffix _d.o,$(IOFILES)) $(MACROS) - $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c $*_m.F90 $(LIBS) - @wc -l -L -w $*_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +$(DOBJS): %_d.o: src/%.F90 $(addsuffix _d.o,$(BASEFILES)) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o $*_d.o -c src/$*.F90 $(LIBS) + @wc -l -L -w src/$*.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' ############################################################################################################################################################### -xspech_r.o: src/xspech.f90 global_r.o sphdf5_r.o $(addsuffix _r.o,$(files)) $(MACROS) - @awk -v date='$(date)' -v pwd='$(PWD)' -v macros='$(MACROS)' -v fc='$(FC)' -v flags='$(FLAGS) $(CFLAGS) $(RFLAGS)' -v allfiles='$(ALLFILES)' \ - 'BEGIN{nfiles=split(allfiles,files," ")} \ - {if($$2=="COMPILATION") {print " write(ounit,*)\" : compiled : date = "date" ; \"" ; \ - print " write(ounit,*)\" : : srcdir = "pwd" ; \"" ; \ - print " write(ounit,*)\" : : macros = "macros" ; \"" ; \ - print " write(ounit,*)\" : : fc = "fc" ; \"" ; \ - print " write(ounit,*)\" : : flags = "flags" ; \"" }} \ - {if($$2=="SUMTIME") {for (i=1;i<=nfiles;i++) print " SUMTIME("files[i]")"}}\ - {if($$2=="PRTTIME") {for (i=1;i<=nfiles;i++) print " PRTTIME("files[i]")"}}\ - {print}' src/xspech.f90 > mspech.f90 - m4 -P $(MACROS) mspech.f90 > xspech_m.F90 - @rm -f mspech.f90 - $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o xspech_r.o -c xspech_m.F90 $(LIBS) - @wc -l -L -w xspech_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' - @echo '' - -xspech_d.o: src/xspech.f90 global_d.o sphdf5_d.o $(addsuffix _d.o,$(files)) $(MACROS) - @awk -v date='$(date)' -v pwd='$(PWD)' -v macros='$(MACROS)' -v fc='$(FC)' -v flags='$(FLAGS) $(CFLAGS) $(DFLAGS)' -v allfiles='$(ALLFILES)' \ - 'BEGIN{nfiles=split(allfiles,files," ")} \ - {if($$2=="COMPILATION") {print " write(ounit,*)\" : compiled : date = "date" ; \"" ; \ - print " write(ounit,*)\" : : srcdir = "pwd" ; \"" ; \ - print " write(ounit,*)\" : : macros = "macros" ; \"" ; \ - print " write(ounit,*)\" : : fc = "fc" ; \"" ; \ - print " write(ounit,*)\" : : flags = "flags" ; \"" }} \ - {if($$2=="SUMTIME") {for (i=1;i<=nfiles;i++) print " SUMTIME("files[i]")"}}\ - {if($$2=="PRTTIME") {for (i=1;i<=nfiles;i++) print " PRTTIME("files[i]")"}}\ - {print}' src/xspech.f90 > mspech.f90 - m4 -P $(MACROS) mspech.f90 > xspech_m.F90 - @rm -f mspech.f90 - $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o xspech_d.o -c xspech_m.F90 $(LIBS) - @wc -l -L -w xspech_m.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' +xspech_r.o: src/xspech.F90 $(ROBJS) + $(FC) $(FLAGS) $(CFLAGS) $(RFLAGS) -o xspech_r.o -c src/xspech.F90 $(LIBS) + @wc -l -L -w src/xspech.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' + @echo '' + +xspech_d.o: src/xspech.F90 $(DOBJS) + $(FC) $(FLAGS) $(CFLAGS) $(DFLAGS) -o xspech_d.o -c src/xspech.F90 $(LIBS) + @wc -l -L -w src/xspech.F90 | awk '{print $$4" has "$$1" lines, "$$2" words, and the longest line is "$$3" characters ;"}' @echo '' ############################################################################################################################################################### clean: - rm -f *.o *.mod *_p.f90 *_m.F90 .*.h *.pdf *.dvi *.out *.bbl *.toc .*.date ; rm -rf ./docs/html ./docs/latex + rm -f *.o *.mod *.pdf *.dvi *.out *.bbl *.toc .*.date *_m.F90 + rm -rf ./docs/html ./docs/latex ############################################################################################################################################################### diff --git a/SPECfile b/SPECfile index 6e71272f..95b12c79 100644 --- a/SPECfile +++ b/SPECfile @@ -12,8 +12,11 @@ # basis of SPEC: input variables, global workspace, HDF5 output file writing # these are split off since they require special treatment (needed by all others and/or special macros) -BASEFILES=inputlist global -IOFILES=sphdf5 +LEVEL_0=mod_kinds +LEVEL_1=inputlist +LEVEL_2=global +LEVEL_3=h5utils +LEVEL_4=sphdf5 # (most of) physics part of SPEC afiles=preset manual rzaxis packxi volume coords basefn memory @@ -24,26 +27,25 @@ efiles=jo00aa pp00aa pp00ab bfield stzxyz ffiles=hesian ra00aa numrec # externally provided libraries -# below assumes the .f files are double precision; the CFLAGS = -r8 option is not required; +# below assumes the .f files are double precision +# the CFLAGS = -r8 option is not required sfiles=dcuhre minpack iqpack rksuite i1mach d1mach ilut iters ############################################################################################################################################################### +BASEFILES=$(LEVEL_0) $(LEVEL_1) $(LEVEL_2) $(LEVEL_3) $(LEVEL_4) + # all of SPEC except BASEFILES SPECFILES=$(afiles) $(bfiles) $(cfiles) $(dfiles) $(efiles) $(ffiles) # all of "our" (vs. contributed) files needed for the "core" of SPEC -ALLSPEC=$(BASEFILES) $(IOFILES) $(SPECFILES) +ALLSPEC=$(BASEFILES) $(SPECFILES) # *ALL* files needed for the main SPEC executable ALLFILES=$(sfiles) $(ALLSPEC) xspech ############################################################################################################################################################### -MACROS=src/macros - -############################################################################################################################################################### - # if want to use gfortran: make BUILD_ENV=gfortran (x/d)spec # default: use Intel compiler BUILD_ENV?=intel diff --git a/Utilities/python_wrapper/CMakeLists.txt b/Utilities/python_wrapper/CMakeLists.txt index 96658fe1..c7d3582f 100644 --- a/Utilities/python_wrapper/CMakeLists.txt +++ b/Utilities/python_wrapper/CMakeLists.txt @@ -4,7 +4,7 @@ find_package(NumPy REQUIRED) find_package(F2PY REQUIRED) find_package(F90Wrap REQUIRED) -# Fortran preprocessing compiler +# Fortran preprocessing compiler if(CMAKE_Fortran_COMPILER_ID STREQUAL Intel) set(FPP_COMPILER fpp) set(FPP_COMPILE_FLAGS "") @@ -17,63 +17,64 @@ get_directory_property(COMP_DEFS COMPILE_DEFINITIONS) message(STATUS "Compile definitions for preprocessor are ${COMP_DEFS}") string(REPLACE ";" " " COMP_DEF_STR "${COMPILE_DEFINITIONS}") - function(preprocess_fortran outvar) message(STATUS "preprocess_fortran arguments: ${outvar}, followed by ${ARGN}") set(srcs) foreach(f ${ARGN}) # is it a Fortran file? if(f MATCHES "\\.[Ff](9[05])?") - message(STATUS "Got fortran file: ${f}") - # construct output filename - if(NOT IS_ABSOLUTE "${f}") - get_filename_component(f "${f}" ABSOLUTE) - endif() - file(RELATIVE_PATH r "${CMAKE_CURRENT_SOURCE_DIR}" "${f}") - get_filename_component(e "${r}" EXT) - get_filename_component(n "${r}" NAME_WE) - get_filename_component(p "${r}" PATH) - set(of "${CMAKE_CURRENT_BINARY_DIR}/${n}_fpp${e}") - message(STATUS "Output name: ${of}") - # preprocess the thing - if (CMAKE_Fortran_COMPILER_ID STREQUAL Intel) - add_custom_command(OUTPUT "${of}" - COMMAND ${FPP_COMPILER} ${FPP_COMPILE_FLAGS} ${COMP_DEF_STR} "${f}" "${of}" - IMPLICIT_DEPENDS Fortran "${f}" - COMMENT "Preprocessing ${f}" - VERBATIM - ) - else() - add_custom_command(OUTPUT "${of}" - COMMAND ${FPP_COMPILER} ${FPP_COMPILE_FLAGS} ${COMP_DEF_STR} "${f}" -o "${of}" - IMPLICIT_DEPENDS Fortran "${f}" - COMMENT "Preprocessing ${f}" - VERBATIM - ) - endif() - list(APPEND srcs "${of}") - #else() - # list(APPEND srcs "${f}") - endif() + message(STATUS "Got fortran file: ${f}") + # construct output filename + if(NOT IS_ABSOLUTE "${f}") + get_filename_component(f "${f}" ABSOLUTE) + endif() + file(RELATIVE_PATH r "${CMAKE_CURRENT_SOURCE_DIR}" "${f}") + get_filename_component(e "${r}" EXT) + get_filename_component(n "${r}" NAME_WE) + get_filename_component(p "${r}" PATH) + set(of "${CMAKE_CURRENT_BINARY_DIR}/${n}_fpp${e}") + message(STATUS "Output name: ${of}") + # preprocess the thing + if (CMAKE_Fortran_COMPILER_ID STREQUAL Intel) + add_custom_command(OUTPUT "${of}" + COMMAND ${FPP_COMPILER} ${FPP_COMPILE_FLAGS} ${COMP_DEF_STR} "${f}" "${of}" + IMPLICIT_DEPENDS Fortran "${f}" + COMMENT "Preprocessing ${f}" + VERBATIM + ) + else() + add_custom_command(OUTPUT "${of}" + COMMAND ${FPP_COMPILER} ${FPP_COMPILE_FLAGS} ${COMP_DEF_STR} "${f}" -o "${of}" + IMPLICIT_DEPENDS Fortran "${f}" + COMMENT "Preprocessing ${f}" + VERBATIM + ) + endif() + list(APPEND srcs "${of}") + #else() + # list(APPEND srcs "${f}") + endif() endforeach() # return the (preprocessed) sources set(${outvar} "${srcs}" PARENT_SCOPE) endfunction() -#message(STATUS "fortran_src_files is ${fortran_src_files}") +message(STATUS "fortran_src_files is ${fortran_src_files}") preprocess_fortran(fpp_files ${fortran_src_files}) -#message(STATUS "fpp_files is ${fpp_files}") +message(STATUS "fpp_files is ${fpp_files}") # ---------------------------------------------------------------------------- # NOTE: There is no way to identify the f90wrap---.f90 files ahead of running f90wrap # NOTE: The files produced have no one->one relation with the source files. -# NOTE: So giving the names of f90wrap_---.f90 files manually +# NOTE: So giving the names of f90wrap_---.f90 files manually #----------------------------------------------------------------------------- -set(f90wrap_output_files ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_global_m_fpp.f90 - ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_inputlist_m_fpp.f90 - ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_intghs_m_fpp.f90 - ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_msphdf5_m_fpp.f90 - ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_newton_m_fpp.f90 +set(f90wrap_output_files ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_global_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_h5utils_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_inputlist_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_intghs_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_mod_kinds_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_newton_fpp.f90 + ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_sphdf5_fpp.f90 ${CMAKE_CURRENT_BINARY_DIR}/f90wrap_toplevel.f90 ) @@ -81,22 +82,18 @@ set(kind_map_file ${CMAKE_CURRENT_SOURCE_DIR}/kind_map) set(python_mod_name spec_f90wrapped) set(python_mod_file ${CMAKE_CURRENT_BINARY_DIR}/${python_mod_name}.py) -add_custom_target(preprocessing ALL - DEPENDS ${fpp_files} -) - -add_custom_command(OUTPUT ${python_mod_file} ${f90wrap_output_files} - COMMAND "${F90Wrap_EXECUTABLE}" -m "${python_mod_name}" ${fpp_files} -k "${kind_map_file}" +add_custom_command( + OUTPUT ${python_mod_file} ${f90wrap_output_files} + COMMAND "${F90Wrap_EXECUTABLE}" + -m "${python_mod_name}" + -k "${kind_map_file}" + ${fpp_files} #IMPLICIT_DEPENDS Fortran ${fpp_files} DEPENDS ${fpp_files} ${kind_map_file} COMMENT "Executing F90Wrap for" ${fpp_files} VERBATIM ) -#add_custom_target("${python_mod_name}_pymod" -# DEPENDS ${python_mod_file} ${f90wrap_output_files} -#) - set(f2py_module_name "_${python_mod_name}") set(generated_module_file ${CMAKE_CURRENT_BINARY_DIR}/${f2py_module_name}${PYTHON_EXTENSION_MODULE_SUFFIX}) message(STATUS "Python exten suffix expansion: ${PYTHON_EXTENSION_MODULE_SUFFIX}") @@ -111,20 +108,20 @@ add_custom_target(${f2py_module_name} ALL add_custom_command( OUTPUT ${generated_module_file} - COMMAND ${F2PY_EXECUTABLE} - -m ${f2py_module_name} - --build-dir ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${F90Wrap_F2PY_EXECUTABLE} + -m ${f2py_module_name} + --build-dir ${CMAKE_CURRENT_BINARY_DIR} --f90exec=${CMAKE_Fortran_COMPILER} --f77exec=${CMAKE_Fortran_COMPILER} --f90flags="-fopenmp" -lgomp - -c + -c #${SCALAPACK_LIB} ${NETCDF_F} ${NETCDF_C} - ${f90wrap_output_files} + ${f90wrap_output_files} -I${CMAKE_BINARY_DIR}/build/modules/spec_modules -I${HDF5_Fortran_INCLUDE_DIRS} --verbose - ${CMAKE_BINARY_DIR}/build/lib/libspec.a + ${CMAKE_BINARY_DIR}/build/lib/libspec.a ${SPEC_LINK_LIB} #IMPLICIT_DEPENDS Fortran ${f90wrap_output_files} DEPENDS spec ${f90wrap_output_files} diff --git a/Utilities/python_wrapper/Makefile b/Utilities/python_wrapper/Makefile index 21b4a35a..6e4ceb81 100644 --- a/Utilities/python_wrapper/Makefile +++ b/Utilities/python_wrapper/Makefile @@ -21,7 +21,7 @@ PYTHON = python3 ifeq ($(F90),gfortran) FPP = $(F90) -E - FPP_F90FLAGS = -x f95-cpp-input -fPIC + FPP_F90FLAGS = -x f95-cpp-input -fPIC endif ifeq ($(F90),ifort) @@ -60,10 +60,7 @@ TMP_FILES := ${ALLSPEC} xspech LIBSRC_WRAP_SOURCES := $(TMP_FILES) # file names -LIBSRC_WRAP_FILES = $(addsuffix .f90,${LIBSRC_WRAP_SOURCES}) - -# macro_explained files -LIBSRC_WRAP_MFILES = $(addsuffix _m.F90,${LIBSRC_WRAP_SOURCES}) +LIBSRC_WRAP_FILES = $(addsuffix .F90,${LIBSRC_WRAP_SOURCES}) # object files LIBSRC_WRAP_OBJECTS = $(addsuffix _r.o,${LIBSRC_WRAP_SOURCES}) @@ -75,7 +72,7 @@ LIBSRC_WRAP_FPP_FILES = $(addsuffix .fpp,${LIBSRC_WRAP_SOURCES}) # Relevant suffixes #======================================================================= -.SUFFIXES: .f90 .fpp +.SUFFIXES: .F90 .f90 .fpp #======================================================================= # @@ -95,7 +92,7 @@ f90wrap_clean: -rm -rf src.*/ .f2py_f2cmap .libs/ __pycache__/ -rm -rf $(LIB_NAME) -$(LIBSRC_WRAP_FPP_FILES): %.fpp: ${SOURCES}/%_m.F90 +$(LIBSRC_WRAP_FPP_FILES): %.fpp: ${SOURCES}/src/%.F90 ${FPP} ${FPP_F90FLAGS} $< -o $@ $(LIB_NAME): @@ -124,4 +121,4 @@ compile_test: @echo "LINKS = ${LINKS}" test: - @${PYTHON} -c "from spec import spec; print('SPEC version: {:}'.format(spec.constants.version))" + @${PYTHON} -c "import spec.spec_f90wrapped as spec; print('SPEC version: {:}'.format(spec.constants.version))" diff --git a/Utilities/python_wrapper/kind_map b/Utilities/python_wrapper/kind_map index 32500b4e..8f2e01f6 100644 --- a/Utilities/python_wrapper/kind_map +++ b/Utilities/python_wrapper/kind_map @@ -4,6 +4,7 @@ 'isp' : 'float', '8' : 'double', 'dp' : 'double', + 'wp' : 'double', 'idp' : 'double'}, 'complex' : { '' : 'complex_float', '8' : 'complex_double', diff --git a/Utilities/python_wrapper/libspec.a b/Utilities/python_wrapper/libspec.a new file mode 100644 index 00000000..54353bfb Binary files /dev/null and b/Utilities/python_wrapper/libspec.a differ diff --git a/Utilities/python_wrapper/spec.py b/Utilities/python_wrapper/spec.py new file mode 100644 index 00000000..43958bae --- /dev/null +++ b/Utilities/python_wrapper/spec.py @@ -0,0 +1,15046 @@ +from __future__ import print_function, absolute_import, division +import _spec +import f90wrap.runtime +import logging + +class Inputlist(f90wrap.runtime.FortranModule): + """ + Module inputlist + + + Defined at inputlist.fpp lines 11-975 + + """ + @staticmethod + def initialize_inputs(): + """ + initialize_inputs() + + + Defined at inputlist.fpp lines 845-974 + + + """ + _spec.f90wrap_initialize_inputs() + + @property + def mnvol(self): + """ + Element mnvol ftype=integer pytype=int + + + Defined at inputlist.fpp line 18 + + """ + return _spec.f90wrap_inputlist__get__mnvol() + + @property + def mmpol(self): + """ + Element mmpol ftype=integer pytype=int + + + Defined at inputlist.fpp line 19 + + """ + return _spec.f90wrap_inputlist__get__mmpol() + + @property + def mntor(self): + """ + Element mntor ftype=integer pytype=int + + + Defined at inputlist.fpp line 20 + + """ + return _spec.f90wrap_inputlist__get__mntor() + + @property + def igeometry(self): + """ + Element igeometry ftype=integer pytype=int + + + Defined at inputlist.fpp line 23 + + """ + return _spec.f90wrap_inputlist__get__igeometry() + + @igeometry.setter + def igeometry(self, igeometry): + _spec.f90wrap_inputlist__set__igeometry(igeometry) + + @property + def istellsym(self): + """ + Element istellsym ftype=integer pytype=int + + + Defined at inputlist.fpp line 24 + + """ + return _spec.f90wrap_inputlist__get__istellsym() + + @istellsym.setter + def istellsym(self, istellsym): + _spec.f90wrap_inputlist__set__istellsym(istellsym) + + @property + def lfreebound(self): + """ + Element lfreebound ftype=integer pytype=int + + + Defined at inputlist.fpp line 25 + + """ + return _spec.f90wrap_inputlist__get__lfreebound() + + @lfreebound.setter + def lfreebound(self, lfreebound): + _spec.f90wrap_inputlist__set__lfreebound(lfreebound) + + @property + def phiedge(self): + """ + Element phiedge ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 26 + + """ + return _spec.f90wrap_inputlist__get__phiedge() + + @phiedge.setter + def phiedge(self, phiedge): + _spec.f90wrap_inputlist__set__phiedge(phiedge) + + @property + def curtor(self): + """ + Element curtor ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 27 + + """ + return _spec.f90wrap_inputlist__get__curtor() + + @curtor.setter + def curtor(self, curtor): + _spec.f90wrap_inputlist__set__curtor(curtor) + + @property + def curpol(self): + """ + Element curpol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 28 + + """ + return _spec.f90wrap_inputlist__get__curpol() + + @curpol.setter + def curpol(self, curpol): + _spec.f90wrap_inputlist__set__curpol(curpol) + + @property + def gamma(self): + """ + Element gamma ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 29 + + """ + return _spec.f90wrap_inputlist__get__gamma() + + @gamma.setter + def gamma(self, gamma): + _spec.f90wrap_inputlist__set__gamma(gamma) + + @property + def nfp(self): + """ + Element nfp ftype=integer pytype=int + + + Defined at inputlist.fpp line 30 + + """ + return _spec.f90wrap_inputlist__get__nfp() + + @nfp.setter + def nfp(self, nfp): + _spec.f90wrap_inputlist__set__nfp(nfp) + + @property + def nvol(self): + """ + Element nvol ftype=integer pytype=int + + + Defined at inputlist.fpp line 31 + + """ + return _spec.f90wrap_inputlist__get__nvol() + + @nvol.setter + def nvol(self, nvol): + _spec.f90wrap_inputlist__set__nvol(nvol) + + @property + def mpol(self): + """ + Element mpol ftype=integer pytype=int + + + Defined at inputlist.fpp line 32 + + """ + return _spec.f90wrap_inputlist__get__mpol() + + @mpol.setter + def mpol(self, mpol): + _spec.f90wrap_inputlist__set__mpol(mpol) + + @property + def ntor(self): + """ + Element ntor ftype=integer pytype=int + + + Defined at inputlist.fpp line 33 + + """ + return _spec.f90wrap_inputlist__get__ntor() + + @ntor.setter + def ntor(self, ntor): + _spec.f90wrap_inputlist__set__ntor(ntor) + + @property + def lrad(self): + """ + Element lrad ftype=integer pytype=int + + + Defined at inputlist.fpp line 34 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__lrad(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lrad = self._arrays[array_handle] + else: + lrad = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__lrad) + self._arrays[array_handle] = lrad + return lrad + + @lrad.setter + def lrad(self, lrad): + self.lrad[...] = lrad + + @property + def lconstraint(self): + """ + Element lconstraint ftype=integer pytype=int + + + Defined at inputlist.fpp line 35 + + """ + return _spec.f90wrap_inputlist__get__lconstraint() + + @lconstraint.setter + def lconstraint(self, lconstraint): + _spec.f90wrap_inputlist__set__lconstraint(lconstraint) + + @property + def tflux(self): + """ + Element tflux ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 36 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__tflux(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tflux = self._arrays[array_handle] + else: + tflux = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__tflux) + self._arrays[array_handle] = tflux + return tflux + + @tflux.setter + def tflux(self, tflux): + self.tflux[...] = tflux + + @property + def pflux(self): + """ + Element pflux ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 37 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__pflux(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pflux = self._arrays[array_handle] + else: + pflux = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__pflux) + self._arrays[array_handle] = pflux + return pflux + + @pflux.setter + def pflux(self, pflux): + self.pflux[...] = pflux + + @property + def helicity(self): + """ + Element helicity ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 38 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__helicity(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + helicity = self._arrays[array_handle] + else: + helicity = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__helicity) + self._arrays[array_handle] = helicity + return helicity + + @helicity.setter + def helicity(self, helicity): + self.helicity[...] = helicity + + @property + def pscale(self): + """ + Element pscale ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 39 + + """ + return _spec.f90wrap_inputlist__get__pscale() + + @pscale.setter + def pscale(self, pscale): + _spec.f90wrap_inputlist__set__pscale(pscale) + + @property + def pressure(self): + """ + Element pressure ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 40 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__pressure(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pressure = self._arrays[array_handle] + else: + pressure = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__pressure) + self._arrays[array_handle] = pressure + return pressure + + @pressure.setter + def pressure(self, pressure): + self.pressure[...] = pressure + + @property + def ladiabatic(self): + """ + Element ladiabatic ftype=integer pytype=int + + + Defined at inputlist.fpp line 41 + + """ + return _spec.f90wrap_inputlist__get__ladiabatic() + + @ladiabatic.setter + def ladiabatic(self, ladiabatic): + _spec.f90wrap_inputlist__set__ladiabatic(ladiabatic) + + @property + def adiabatic(self): + """ + Element adiabatic ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 42 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__adiabatic(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + adiabatic = self._arrays[array_handle] + else: + adiabatic = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__adiabatic) + self._arrays[array_handle] = adiabatic + return adiabatic + + @adiabatic.setter + def adiabatic(self, adiabatic): + self.adiabatic[...] = adiabatic + + @property + def mu(self): + """ + Element mu ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 43 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__mu(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + mu = self._arrays[array_handle] + else: + mu = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__mu) + self._arrays[array_handle] = mu + return mu + + @mu.setter + def mu(self, mu): + self.mu[...] = mu + + @property + def ivolume(self): + """ + Element ivolume ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 44 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__ivolume(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ivolume = self._arrays[array_handle] + else: + ivolume = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__ivolume) + self._arrays[array_handle] = ivolume + return ivolume + + @ivolume.setter + def ivolume(self, ivolume): + self.ivolume[...] = ivolume + + @property + def isurf(self): + """ + Element isurf ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 45 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__isurf(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + isurf = self._arrays[array_handle] + else: + isurf = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__isurf) + self._arrays[array_handle] = isurf + return isurf + + @isurf.setter + def isurf(self, isurf): + self.isurf[...] = isurf + + @property + def pl(self): + """ + Element pl ftype=integer pytype=int + + + Defined at inputlist.fpp line 46 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__pl(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pl = self._arrays[array_handle] + else: + pl = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__pl) + self._arrays[array_handle] = pl + return pl + + @pl.setter + def pl(self, pl): + self.pl[...] = pl + + @property + def ql(self): + """ + Element ql ftype=integer pytype=int + + + Defined at inputlist.fpp line 47 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__ql(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ql = self._arrays[array_handle] + else: + ql = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__ql) + self._arrays[array_handle] = ql + return ql + + @ql.setter + def ql(self, ql): + self.ql[...] = ql + + @property + def pr(self): + """ + Element pr ftype=integer pytype=int + + + Defined at inputlist.fpp line 48 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__pr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pr = self._arrays[array_handle] + else: + pr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__pr) + self._arrays[array_handle] = pr + return pr + + @pr.setter + def pr(self, pr): + self.pr[...] = pr + + @property + def qr(self): + """ + Element qr ftype=integer pytype=int + + + Defined at inputlist.fpp line 49 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__qr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + qr = self._arrays[array_handle] + else: + qr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__qr) + self._arrays[array_handle] = qr + return qr + + @qr.setter + def qr(self, qr): + self.qr[...] = qr + + @property + def iota(self): + """ + Element iota ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 50 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__iota(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iota = self._arrays[array_handle] + else: + iota = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__iota) + self._arrays[array_handle] = iota + return iota + + @iota.setter + def iota(self, iota): + self.iota[...] = iota + + @property + def lp(self): + """ + Element lp ftype=integer pytype=int + + + Defined at inputlist.fpp line 51 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__lp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lp = self._arrays[array_handle] + else: + lp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__lp) + self._arrays[array_handle] = lp + return lp + + @lp.setter + def lp(self, lp): + self.lp[...] = lp + + @property + def lq(self): + """ + Element lq ftype=integer pytype=int + + + Defined at inputlist.fpp line 52 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__lq(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lq = self._arrays[array_handle] + else: + lq = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__lq) + self._arrays[array_handle] = lq + return lq + + @lq.setter + def lq(self, lq): + self.lq[...] = lq + + @property + def rp(self): + """ + Element rp ftype=integer pytype=int + + + Defined at inputlist.fpp line 53 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rp = self._arrays[array_handle] + else: + rp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rp) + self._arrays[array_handle] = rp + return rp + + @rp.setter + def rp(self, rp): + self.rp[...] = rp + + @property + def rq(self): + """ + Element rq ftype=integer pytype=int + + + Defined at inputlist.fpp line 54 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rq(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rq = self._arrays[array_handle] + else: + rq = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rq) + self._arrays[array_handle] = rq + return rq + + @rq.setter + def rq(self, rq): + self.rq[...] = rq + + @property + def oita(self): + """ + Element oita ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 55 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__oita(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + oita = self._arrays[array_handle] + else: + oita = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__oita) + self._arrays[array_handle] = oita + return oita + + @oita.setter + def oita(self, oita): + self.oita[...] = oita + + @property + def rpol(self): + """ + Element rpol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 56 + + """ + return _spec.f90wrap_inputlist__get__rpol() + + @rpol.setter + def rpol(self, rpol): + _spec.f90wrap_inputlist__set__rpol(rpol) + + @property + def rtor(self): + """ + Element rtor ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 57 + + """ + return _spec.f90wrap_inputlist__get__rtor() + + @rtor.setter + def rtor(self, rtor): + _spec.f90wrap_inputlist__set__rtor(rtor) + + @property + def rac(self): + """ + Element rac ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 58 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rac(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rac = self._arrays[array_handle] + else: + rac = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rac) + self._arrays[array_handle] = rac + return rac + + @rac.setter + def rac(self, rac): + self.rac[...] = rac + + @property + def zas(self): + """ + Element zas ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 59 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zas(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zas = self._arrays[array_handle] + else: + zas = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zas) + self._arrays[array_handle] = zas + return zas + + @zas.setter + def zas(self, zas): + self.zas[...] = zas + + @property + def ras(self): + """ + Element ras ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 60 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__ras(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ras = self._arrays[array_handle] + else: + ras = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__ras) + self._arrays[array_handle] = ras + return ras + + @ras.setter + def ras(self, ras): + self.ras[...] = ras + + @property + def zac(self): + """ + Element zac ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 61 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zac(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zac = self._arrays[array_handle] + else: + zac = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zac) + self._arrays[array_handle] = zac + return zac + + @zac.setter + def zac(self, zac): + self.zac[...] = zac + + @property + def rbc(self): + """ + Element rbc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 62 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rbc = self._arrays[array_handle] + else: + rbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rbc) + self._arrays[array_handle] = rbc + return rbc + + @rbc.setter + def rbc(self, rbc): + self.rbc[...] = rbc + + @property + def zbs(self): + """ + Element zbs ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 63 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zbs = self._arrays[array_handle] + else: + zbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zbs) + self._arrays[array_handle] = zbs + return zbs + + @zbs.setter + def zbs(self, zbs): + self.zbs[...] = zbs + + @property + def rbs(self): + """ + Element rbs ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 64 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rbs = self._arrays[array_handle] + else: + rbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rbs) + self._arrays[array_handle] = rbs + return rbs + + @rbs.setter + def rbs(self, rbs): + self.rbs[...] = rbs + + @property + def zbc(self): + """ + Element zbc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 65 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zbc = self._arrays[array_handle] + else: + zbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zbc) + self._arrays[array_handle] = zbc + return zbc + + @zbc.setter + def zbc(self, zbc): + self.zbc[...] = zbc + + @property + def rwc(self): + """ + Element rwc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 66 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rwc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rwc = self._arrays[array_handle] + else: + rwc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rwc) + self._arrays[array_handle] = rwc + return rwc + + @rwc.setter + def rwc(self, rwc): + self.rwc[...] = rwc + + @property + def zws(self): + """ + Element zws ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 67 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zws(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zws = self._arrays[array_handle] + else: + zws = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zws) + self._arrays[array_handle] = zws + return zws + + @zws.setter + def zws(self, zws): + self.zws[...] = zws + + @property + def rws(self): + """ + Element rws ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 68 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__rws(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rws = self._arrays[array_handle] + else: + rws = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__rws) + self._arrays[array_handle] = rws + return rws + + @rws.setter + def rws(self, rws): + self.rws[...] = rws + + @property + def zwc(self): + """ + Element zwc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 69 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__zwc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zwc = self._arrays[array_handle] + else: + zwc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__zwc) + self._arrays[array_handle] = zwc + return zwc + + @zwc.setter + def zwc(self, zwc): + self.zwc[...] = zwc + + @property + def vns(self): + """ + Element vns ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 70 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__vns(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + vns = self._arrays[array_handle] + else: + vns = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__vns) + self._arrays[array_handle] = vns + return vns + + @vns.setter + def vns(self, vns): + self.vns[...] = vns + + @property + def bns(self): + """ + Element bns ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 71 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__bns(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bns = self._arrays[array_handle] + else: + bns = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__bns) + self._arrays[array_handle] = bns + return bns + + @bns.setter + def bns(self, bns): + self.bns[...] = bns + + @property + def vnc(self): + """ + Element vnc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 72 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__vnc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + vnc = self._arrays[array_handle] + else: + vnc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__vnc) + self._arrays[array_handle] = vnc + return vnc + + @vnc.setter + def vnc(self, vnc): + self.vnc[...] = vnc + + @property + def bnc(self): + """ + Element bnc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 73 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__bnc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bnc = self._arrays[array_handle] + else: + bnc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__bnc) + self._arrays[array_handle] = bnc + return bnc + + @bnc.setter + def bnc(self, bnc): + self.bnc[...] = bnc + + @property + def mupftol(self): + """ + Element mupftol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 74 + + """ + return _spec.f90wrap_inputlist__get__mupftol() + + @mupftol.setter + def mupftol(self, mupftol): + _spec.f90wrap_inputlist__set__mupftol(mupftol) + + @property + def mupfits(self): + """ + Element mupfits ftype=integer pytype=int + + + Defined at inputlist.fpp line 75 + + """ + return _spec.f90wrap_inputlist__get__mupfits() + + @mupfits.setter + def mupfits(self, mupfits): + _spec.f90wrap_inputlist__set__mupfits(mupfits) + + @property + def lreflect(self): + """ + Element lreflect ftype=integer pytype=int + + + Defined at inputlist.fpp line 76 + + """ + return _spec.f90wrap_inputlist__get__lreflect() + + @lreflect.setter + def lreflect(self, lreflect): + _spec.f90wrap_inputlist__set__lreflect(lreflect) + + @property + def linitialize(self): + """ + Element linitialize ftype=integer pytype=int + + + Defined at inputlist.fpp line 79 + + """ + return _spec.f90wrap_inputlist__get__linitialize() + + @linitialize.setter + def linitialize(self, linitialize): + _spec.f90wrap_inputlist__set__linitialize(linitialize) + + @property + def lautoinitbn(self): + """ + Element lautoinitbn ftype=integer pytype=int + + + Defined at inputlist.fpp line 80 + + """ + return _spec.f90wrap_inputlist__get__lautoinitbn() + + @lautoinitbn.setter + def lautoinitbn(self, lautoinitbn): + _spec.f90wrap_inputlist__set__lautoinitbn(lautoinitbn) + + @property + def lzerovac(self): + """ + Element lzerovac ftype=integer pytype=int + + + Defined at inputlist.fpp line 81 + + """ + return _spec.f90wrap_inputlist__get__lzerovac() + + @lzerovac.setter + def lzerovac(self, lzerovac): + _spec.f90wrap_inputlist__set__lzerovac(lzerovac) + + @property + def ndiscrete(self): + """ + Element ndiscrete ftype=integer pytype=int + + + Defined at inputlist.fpp line 82 + + """ + return _spec.f90wrap_inputlist__get__ndiscrete() + + @ndiscrete.setter + def ndiscrete(self, ndiscrete): + _spec.f90wrap_inputlist__set__ndiscrete(ndiscrete) + + @property + def nquad(self): + """ + Element nquad ftype=integer pytype=int + + + Defined at inputlist.fpp line 83 + + """ + return _spec.f90wrap_inputlist__get__nquad() + + @nquad.setter + def nquad(self, nquad): + _spec.f90wrap_inputlist__set__nquad(nquad) + + @property + def impol(self): + """ + Element impol ftype=integer pytype=int + + + Defined at inputlist.fpp line 84 + + """ + return _spec.f90wrap_inputlist__get__impol() + + @impol.setter + def impol(self, impol): + _spec.f90wrap_inputlist__set__impol(impol) + + @property + def intor(self): + """ + Element intor ftype=integer pytype=int + + + Defined at inputlist.fpp line 85 + + """ + return _spec.f90wrap_inputlist__get__intor() + + @intor.setter + def intor(self, intor): + _spec.f90wrap_inputlist__set__intor(intor) + + @property + def lsparse(self): + """ + Element lsparse ftype=integer pytype=int + + + Defined at inputlist.fpp line 86 + + """ + return _spec.f90wrap_inputlist__get__lsparse() + + @lsparse.setter + def lsparse(self, lsparse): + _spec.f90wrap_inputlist__set__lsparse(lsparse) + + @property + def lsvdiota(self): + """ + Element lsvdiota ftype=integer pytype=int + + + Defined at inputlist.fpp line 87 + + """ + return _spec.f90wrap_inputlist__get__lsvdiota() + + @lsvdiota.setter + def lsvdiota(self, lsvdiota): + _spec.f90wrap_inputlist__set__lsvdiota(lsvdiota) + + @property + def imethod(self): + """ + Element imethod ftype=integer pytype=int + + + Defined at inputlist.fpp line 88 + + """ + return _spec.f90wrap_inputlist__get__imethod() + + @imethod.setter + def imethod(self, imethod): + _spec.f90wrap_inputlist__set__imethod(imethod) + + @property + def iorder(self): + """ + Element iorder ftype=integer pytype=int + + + Defined at inputlist.fpp line 89 + + """ + return _spec.f90wrap_inputlist__get__iorder() + + @iorder.setter + def iorder(self, iorder): + _spec.f90wrap_inputlist__set__iorder(iorder) + + @property + def iprecon(self): + """ + Element iprecon ftype=integer pytype=int + + + Defined at inputlist.fpp line 90 + + """ + return _spec.f90wrap_inputlist__get__iprecon() + + @iprecon.setter + def iprecon(self, iprecon): + _spec.f90wrap_inputlist__set__iprecon(iprecon) + + @property + def iotatol(self): + """ + Element iotatol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 91 + + """ + return _spec.f90wrap_inputlist__get__iotatol() + + @iotatol.setter + def iotatol(self, iotatol): + _spec.f90wrap_inputlist__set__iotatol(iotatol) + + @property + def lextrap(self): + """ + Element lextrap ftype=integer pytype=int + + + Defined at inputlist.fpp line 92 + + """ + return _spec.f90wrap_inputlist__get__lextrap() + + @lextrap.setter + def lextrap(self, lextrap): + _spec.f90wrap_inputlist__set__lextrap(lextrap) + + @property + def mregular(self): + """ + Element mregular ftype=integer pytype=int + + + Defined at inputlist.fpp line 93 + + """ + return _spec.f90wrap_inputlist__get__mregular() + + @mregular.setter + def mregular(self, mregular): + _spec.f90wrap_inputlist__set__mregular(mregular) + + @property + def lrzaxis(self): + """ + Element lrzaxis ftype=integer pytype=int + + + Defined at inputlist.fpp line 94 + + """ + return _spec.f90wrap_inputlist__get__lrzaxis() + + @lrzaxis.setter + def lrzaxis(self, lrzaxis): + _spec.f90wrap_inputlist__set__lrzaxis(lrzaxis) + + @property + def ntoraxis(self): + """ + Element ntoraxis ftype=integer pytype=int + + + Defined at inputlist.fpp line 95 + + """ + return _spec.f90wrap_inputlist__get__ntoraxis() + + @ntoraxis.setter + def ntoraxis(self, ntoraxis): + _spec.f90wrap_inputlist__set__ntoraxis(ntoraxis) + + @property + def lbeltrami(self): + """ + Element lbeltrami ftype=integer pytype=int + + + Defined at inputlist.fpp line 98 + + """ + return _spec.f90wrap_inputlist__get__lbeltrami() + + @lbeltrami.setter + def lbeltrami(self, lbeltrami): + _spec.f90wrap_inputlist__set__lbeltrami(lbeltrami) + + @property + def linitgues(self): + """ + Element linitgues ftype=integer pytype=int + + + Defined at inputlist.fpp line 99 + + """ + return _spec.f90wrap_inputlist__get__linitgues() + + @linitgues.setter + def linitgues(self, linitgues): + _spec.f90wrap_inputlist__set__linitgues(linitgues) + + @property + def lposdef(self): + """ + Element lposdef ftype=integer pytype=int + + + Defined at inputlist.fpp line 100 + + """ + return _spec.f90wrap_inputlist__get__lposdef() + + @lposdef.setter + def lposdef(self, lposdef): + _spec.f90wrap_inputlist__set__lposdef(lposdef) + + @property + def maxrndgues(self): + """ + Element maxrndgues ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 101 + + """ + return _spec.f90wrap_inputlist__get__maxrndgues() + + @maxrndgues.setter + def maxrndgues(self, maxrndgues): + _spec.f90wrap_inputlist__set__maxrndgues(maxrndgues) + + @property + def lmatsolver(self): + """ + Element lmatsolver ftype=integer pytype=int + + + Defined at inputlist.fpp line 102 + + """ + return _spec.f90wrap_inputlist__get__lmatsolver() + + @lmatsolver.setter + def lmatsolver(self, lmatsolver): + _spec.f90wrap_inputlist__set__lmatsolver(lmatsolver) + + @property + def nitergmres(self): + """ + Element nitergmres ftype=integer pytype=int + + + Defined at inputlist.fpp line 103 + + """ + return _spec.f90wrap_inputlist__get__nitergmres() + + @nitergmres.setter + def nitergmres(self, nitergmres): + _spec.f90wrap_inputlist__set__nitergmres(nitergmres) + + @property + def epsgmres(self): + """ + Element epsgmres ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 104 + + """ + return _spec.f90wrap_inputlist__get__epsgmres() + + @epsgmres.setter + def epsgmres(self, epsgmres): + _spec.f90wrap_inputlist__set__epsgmres(epsgmres) + + @property + def lgmresprec(self): + """ + Element lgmresprec ftype=integer pytype=int + + + Defined at inputlist.fpp line 105 + + """ + return _spec.f90wrap_inputlist__get__lgmresprec() + + @lgmresprec.setter + def lgmresprec(self, lgmresprec): + _spec.f90wrap_inputlist__set__lgmresprec(lgmresprec) + + @property + def epsilu(self): + """ + Element epsilu ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 106 + + """ + return _spec.f90wrap_inputlist__get__epsilu() + + @epsilu.setter + def epsilu(self, epsilu): + _spec.f90wrap_inputlist__set__epsilu(epsilu) + + @property + def lfindzero(self): + """ + Element lfindzero ftype=integer pytype=int + + + Defined at inputlist.fpp line 109 + + """ + return _spec.f90wrap_inputlist__get__lfindzero() + + @lfindzero.setter + def lfindzero(self, lfindzero): + _spec.f90wrap_inputlist__set__lfindzero(lfindzero) + + @property + def escale(self): + """ + Element escale ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 110 + + """ + return _spec.f90wrap_inputlist__get__escale() + + @escale.setter + def escale(self, escale): + _spec.f90wrap_inputlist__set__escale(escale) + + @property + def opsilon(self): + """ + Element opsilon ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 111 + + """ + return _spec.f90wrap_inputlist__get__opsilon() + + @opsilon.setter + def opsilon(self, opsilon): + _spec.f90wrap_inputlist__set__opsilon(opsilon) + + @property + def pcondense(self): + """ + Element pcondense ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 112 + + """ + return _spec.f90wrap_inputlist__get__pcondense() + + @pcondense.setter + def pcondense(self, pcondense): + _spec.f90wrap_inputlist__set__pcondense(pcondense) + + @property + def epsilon(self): + """ + Element epsilon ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 113 + + """ + return _spec.f90wrap_inputlist__get__epsilon() + + @epsilon.setter + def epsilon(self, epsilon): + _spec.f90wrap_inputlist__set__epsilon(epsilon) + + @property + def wpoloidal(self): + """ + Element wpoloidal ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 114 + + """ + return _spec.f90wrap_inputlist__get__wpoloidal() + + @wpoloidal.setter + def wpoloidal(self, wpoloidal): + _spec.f90wrap_inputlist__set__wpoloidal(wpoloidal) + + @property + def upsilon(self): + """ + Element upsilon ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 115 + + """ + return _spec.f90wrap_inputlist__get__upsilon() + + @upsilon.setter + def upsilon(self, upsilon): + _spec.f90wrap_inputlist__set__upsilon(upsilon) + + @property + def forcetol(self): + """ + Element forcetol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 116 + + """ + return _spec.f90wrap_inputlist__get__forcetol() + + @forcetol.setter + def forcetol(self, forcetol): + _spec.f90wrap_inputlist__set__forcetol(forcetol) + + @property + def c05xmax(self): + """ + Element c05xmax ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 117 + + """ + return _spec.f90wrap_inputlist__get__c05xmax() + + @c05xmax.setter + def c05xmax(self, c05xmax): + _spec.f90wrap_inputlist__set__c05xmax(c05xmax) + + @property + def c05xtol(self): + """ + Element c05xtol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 118 + + """ + return _spec.f90wrap_inputlist__get__c05xtol() + + @c05xtol.setter + def c05xtol(self, c05xtol): + _spec.f90wrap_inputlist__set__c05xtol(c05xtol) + + @property + def c05factor(self): + """ + Element c05factor ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 119 + + """ + return _spec.f90wrap_inputlist__get__c05factor() + + @c05factor.setter + def c05factor(self, c05factor): + _spec.f90wrap_inputlist__set__c05factor(c05factor) + + @property + def lreadgf(self): + """ + Element lreadgf ftype=logical pytype=bool + + + Defined at inputlist.fpp line 120 + + """ + return _spec.f90wrap_inputlist__get__lreadgf() + + @lreadgf.setter + def lreadgf(self, lreadgf): + _spec.f90wrap_inputlist__set__lreadgf(lreadgf) + + @property + def mfreeits(self): + """ + Element mfreeits ftype=integer pytype=int + + + Defined at inputlist.fpp line 121 + + """ + return _spec.f90wrap_inputlist__get__mfreeits() + + @mfreeits.setter + def mfreeits(self, mfreeits): + _spec.f90wrap_inputlist__set__mfreeits(mfreeits) + + @property + def bnstol(self): + """ + Element bnstol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 122 + + """ + return _spec.f90wrap_inputlist__get__bnstol() + + @bnstol.setter + def bnstol(self, bnstol): + _spec.f90wrap_inputlist__set__bnstol(bnstol) + + @property + def bnsblend(self): + """ + Element bnsblend ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 123 + + """ + return _spec.f90wrap_inputlist__get__bnsblend() + + @bnsblend.setter + def bnsblend(self, bnsblend): + _spec.f90wrap_inputlist__set__bnsblend(bnsblend) + + @property + def gbntol(self): + """ + Element gbntol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 124 + + """ + return _spec.f90wrap_inputlist__get__gbntol() + + @gbntol.setter + def gbntol(self, gbntol): + _spec.f90wrap_inputlist__set__gbntol(gbntol) + + @property + def gbnbld(self): + """ + Element gbnbld ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 125 + + """ + return _spec.f90wrap_inputlist__get__gbnbld() + + @gbnbld.setter + def gbnbld(self, gbnbld): + _spec.f90wrap_inputlist__set__gbnbld(gbnbld) + + @property + def vcasingeps(self): + """ + Element vcasingeps ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 126 + + """ + return _spec.f90wrap_inputlist__get__vcasingeps() + + @vcasingeps.setter + def vcasingeps(self, vcasingeps): + _spec.f90wrap_inputlist__set__vcasingeps(vcasingeps) + + @property + def vcasingtol(self): + """ + Element vcasingtol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 127 + + """ + return _spec.f90wrap_inputlist__get__vcasingtol() + + @vcasingtol.setter + def vcasingtol(self, vcasingtol): + _spec.f90wrap_inputlist__set__vcasingtol(vcasingtol) + + @property + def vcasingits(self): + """ + Element vcasingits ftype=integer pytype=int + + + Defined at inputlist.fpp line 128 + + """ + return _spec.f90wrap_inputlist__get__vcasingits() + + @vcasingits.setter + def vcasingits(self, vcasingits): + _spec.f90wrap_inputlist__set__vcasingits(vcasingits) + + @property + def vcasingper(self): + """ + Element vcasingper ftype=integer pytype=int + + + Defined at inputlist.fpp line 129 + + """ + return _spec.f90wrap_inputlist__get__vcasingper() + + @vcasingper.setter + def vcasingper(self, vcasingper): + _spec.f90wrap_inputlist__set__vcasingper(vcasingper) + + @property + def mcasingcal(self): + """ + Element mcasingcal ftype=integer pytype=int + + + Defined at inputlist.fpp line 130 + + """ + return _spec.f90wrap_inputlist__get__mcasingcal() + + @mcasingcal.setter + def mcasingcal(self, mcasingcal): + _spec.f90wrap_inputlist__set__mcasingcal(mcasingcal) + + @property + def odetol(self): + """ + Element odetol ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 133 + + """ + return _spec.f90wrap_inputlist__get__odetol() + + @odetol.setter + def odetol(self, odetol): + _spec.f90wrap_inputlist__set__odetol(odetol) + + @property + def absreq(self): + """ + Element absreq ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 134 + + """ + return _spec.f90wrap_inputlist__get__absreq() + + @absreq.setter + def absreq(self, absreq): + _spec.f90wrap_inputlist__set__absreq(absreq) + + @property + def relreq(self): + """ + Element relreq ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 135 + + """ + return _spec.f90wrap_inputlist__get__relreq() + + @relreq.setter + def relreq(self, relreq): + _spec.f90wrap_inputlist__set__relreq(relreq) + + @property + def absacc(self): + """ + Element absacc ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 136 + + """ + return _spec.f90wrap_inputlist__get__absacc() + + @absacc.setter + def absacc(self, absacc): + _spec.f90wrap_inputlist__set__absacc(absacc) + + @property + def epsr(self): + """ + Element epsr ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 137 + + """ + return _spec.f90wrap_inputlist__get__epsr() + + @epsr.setter + def epsr(self, epsr): + _spec.f90wrap_inputlist__set__epsr(epsr) + + @property + def nppts(self): + """ + Element nppts ftype=integer pytype=int + + + Defined at inputlist.fpp line 138 + + """ + return _spec.f90wrap_inputlist__get__nppts() + + @nppts.setter + def nppts(self, nppts): + _spec.f90wrap_inputlist__set__nppts(nppts) + + @property + def ppts(self): + """ + Element ppts ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 139 + + """ + return _spec.f90wrap_inputlist__get__ppts() + + @ppts.setter + def ppts(self, ppts): + _spec.f90wrap_inputlist__set__ppts(ppts) + + @property + def nptrj(self): + """ + Element nptrj ftype=integer pytype=int + + + Defined at inputlist.fpp line 140 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_inputlist__array__nptrj(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + nptrj = self._arrays[array_handle] + else: + nptrj = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_inputlist__array__nptrj) + self._arrays[array_handle] = nptrj + return nptrj + + @nptrj.setter + def nptrj(self, nptrj): + self.nptrj[...] = nptrj + + @property + def lhevalues(self): + """ + Element lhevalues ftype=logical pytype=bool + + + Defined at inputlist.fpp line 141 + + """ + return _spec.f90wrap_inputlist__get__lhevalues() + + @lhevalues.setter + def lhevalues(self, lhevalues): + _spec.f90wrap_inputlist__set__lhevalues(lhevalues) + + @property + def lhevectors(self): + """ + Element lhevectors ftype=logical pytype=bool + + + Defined at inputlist.fpp line 142 + + """ + return _spec.f90wrap_inputlist__get__lhevectors() + + @lhevectors.setter + def lhevectors(self, lhevectors): + _spec.f90wrap_inputlist__set__lhevectors(lhevectors) + + @property + def lhmatrix(self): + """ + Element lhmatrix ftype=logical pytype=bool + + + Defined at inputlist.fpp line 143 + + """ + return _spec.f90wrap_inputlist__get__lhmatrix() + + @lhmatrix.setter + def lhmatrix(self, lhmatrix): + _spec.f90wrap_inputlist__set__lhmatrix(lhmatrix) + + @property + def lperturbed(self): + """ + Element lperturbed ftype=integer pytype=int + + + Defined at inputlist.fpp line 144 + + """ + return _spec.f90wrap_inputlist__get__lperturbed() + + @lperturbed.setter + def lperturbed(self, lperturbed): + _spec.f90wrap_inputlist__set__lperturbed(lperturbed) + + @property + def dpp(self): + """ + Element dpp ftype=integer pytype=int + + + Defined at inputlist.fpp line 145 + + """ + return _spec.f90wrap_inputlist__get__dpp() + + @dpp.setter + def dpp(self, dpp): + _spec.f90wrap_inputlist__set__dpp(dpp) + + @property + def dqq(self): + """ + Element dqq ftype=integer pytype=int + + + Defined at inputlist.fpp line 146 + + """ + return _spec.f90wrap_inputlist__get__dqq() + + @dqq.setter + def dqq(self, dqq): + _spec.f90wrap_inputlist__set__dqq(dqq) + + @property + def lerrortype(self): + """ + Element lerrortype ftype=integer pytype=int + + + Defined at inputlist.fpp line 147 + + """ + return _spec.f90wrap_inputlist__get__lerrortype() + + @lerrortype.setter + def lerrortype(self, lerrortype): + _spec.f90wrap_inputlist__set__lerrortype(lerrortype) + + @property + def ngrid(self): + """ + Element ngrid ftype=integer pytype=int + + + Defined at inputlist.fpp line 148 + + """ + return _spec.f90wrap_inputlist__get__ngrid() + + @ngrid.setter + def ngrid(self, ngrid): + _spec.f90wrap_inputlist__set__ngrid(ngrid) + + @property + def drz(self): + """ + Element drz ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 149 + + """ + return _spec.f90wrap_inputlist__get__drz() + + @drz.setter + def drz(self, drz): + _spec.f90wrap_inputlist__set__drz(drz) + + @property + def lcheck(self): + """ + Element lcheck ftype=integer pytype=int + + + Defined at inputlist.fpp line 150 + + """ + return _spec.f90wrap_inputlist__get__lcheck() + + @lcheck.setter + def lcheck(self, lcheck): + _spec.f90wrap_inputlist__set__lcheck(lcheck) + + @property + def ltiming(self): + """ + Element ltiming ftype=logical pytype=bool + + + Defined at inputlist.fpp line 151 + + """ + return _spec.f90wrap_inputlist__get__ltiming() + + @ltiming.setter + def ltiming(self, ltiming): + _spec.f90wrap_inputlist__set__ltiming(ltiming) + + @property + def fudge(self): + """ + Element fudge ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 152 + + """ + return _spec.f90wrap_inputlist__get__fudge() + + @fudge.setter + def fudge(self, fudge): + _spec.f90wrap_inputlist__set__fudge(fudge) + + @property + def scaling(self): + """ + Element scaling ftype=real(8) pytype=float + + + Defined at inputlist.fpp line 153 + + """ + return _spec.f90wrap_inputlist__get__scaling() + + @scaling.setter + def scaling(self, scaling): + _spec.f90wrap_inputlist__set__scaling(scaling) + + @property + def wdcuhre(self): + """ + Element wdcuhre ftype=logical pytype=bool + + + Defined at inputlist.fpp line 156 + + """ + return _spec.f90wrap_inputlist__get__wdcuhre() + + @wdcuhre.setter + def wdcuhre(self, wdcuhre): + _spec.f90wrap_inputlist__set__wdcuhre(wdcuhre) + + @property + def wminpack(self): + """ + Element wminpack ftype=logical pytype=bool + + + Defined at inputlist.fpp line 157 + + """ + return _spec.f90wrap_inputlist__get__wminpack() + + @wminpack.setter + def wminpack(self, wminpack): + _spec.f90wrap_inputlist__set__wminpack(wminpack) + + @property + def wiqpack(self): + """ + Element wiqpack ftype=logical pytype=bool + + + Defined at inputlist.fpp line 158 + + """ + return _spec.f90wrap_inputlist__get__wiqpack() + + @wiqpack.setter + def wiqpack(self, wiqpack): + _spec.f90wrap_inputlist__set__wiqpack(wiqpack) + + @property + def wrksuite(self): + """ + Element wrksuite ftype=logical pytype=bool + + + Defined at inputlist.fpp line 159 + + """ + return _spec.f90wrap_inputlist__get__wrksuite() + + @wrksuite.setter + def wrksuite(self, wrksuite): + _spec.f90wrap_inputlist__set__wrksuite(wrksuite) + + @property + def wi1mach(self): + """ + Element wi1mach ftype=logical pytype=bool + + + Defined at inputlist.fpp line 160 + + """ + return _spec.f90wrap_inputlist__get__wi1mach() + + @wi1mach.setter + def wi1mach(self, wi1mach): + _spec.f90wrap_inputlist__set__wi1mach(wi1mach) + + @property + def wd1mach(self): + """ + Element wd1mach ftype=logical pytype=bool + + + Defined at inputlist.fpp line 161 + + """ + return _spec.f90wrap_inputlist__get__wd1mach() + + @wd1mach.setter + def wd1mach(self, wd1mach): + _spec.f90wrap_inputlist__set__wd1mach(wd1mach) + + @property + def wilut(self): + """ + Element wilut ftype=logical pytype=bool + + + Defined at inputlist.fpp line 162 + + """ + return _spec.f90wrap_inputlist__get__wilut() + + @wilut.setter + def wilut(self, wilut): + _spec.f90wrap_inputlist__set__wilut(wilut) + + @property + def witers(self): + """ + Element witers ftype=logical pytype=bool + + + Defined at inputlist.fpp line 163 + + """ + return _spec.f90wrap_inputlist__get__witers() + + @witers.setter + def witers(self, witers): + _spec.f90wrap_inputlist__set__witers(witers) + + @property + def winputlist(self): + """ + Element winputlist ftype=logical pytype=bool + + + Defined at inputlist.fpp line 164 + + """ + return _spec.f90wrap_inputlist__get__winputlist() + + @winputlist.setter + def winputlist(self, winputlist): + _spec.f90wrap_inputlist__set__winputlist(winputlist) + + @property + def wglobal(self): + """ + Element wglobal ftype=logical pytype=bool + + + Defined at inputlist.fpp line 165 + + """ + return _spec.f90wrap_inputlist__get__wglobal() + + @wglobal.setter + def wglobal(self, wglobal): + _spec.f90wrap_inputlist__set__wglobal(wglobal) + + @property + def wsphdf5(self): + """ + Element wsphdf5 ftype=logical pytype=bool + + + Defined at inputlist.fpp line 166 + + """ + return _spec.f90wrap_inputlist__get__wsphdf5() + + @wsphdf5.setter + def wsphdf5(self, wsphdf5): + _spec.f90wrap_inputlist__set__wsphdf5(wsphdf5) + + @property + def wpreset(self): + """ + Element wpreset ftype=logical pytype=bool + + + Defined at inputlist.fpp line 167 + + """ + return _spec.f90wrap_inputlist__get__wpreset() + + @wpreset.setter + def wpreset(self, wpreset): + _spec.f90wrap_inputlist__set__wpreset(wpreset) + + @property + def wmanual(self): + """ + Element wmanual ftype=logical pytype=bool + + + Defined at inputlist.fpp line 168 + + """ + return _spec.f90wrap_inputlist__get__wmanual() + + @wmanual.setter + def wmanual(self, wmanual): + _spec.f90wrap_inputlist__set__wmanual(wmanual) + + @property + def wrzaxis(self): + """ + Element wrzaxis ftype=logical pytype=bool + + + Defined at inputlist.fpp line 169 + + """ + return _spec.f90wrap_inputlist__get__wrzaxis() + + @wrzaxis.setter + def wrzaxis(self, wrzaxis): + _spec.f90wrap_inputlist__set__wrzaxis(wrzaxis) + + @property + def wpackxi(self): + """ + Element wpackxi ftype=logical pytype=bool + + + Defined at inputlist.fpp line 170 + + """ + return _spec.f90wrap_inputlist__get__wpackxi() + + @wpackxi.setter + def wpackxi(self, wpackxi): + _spec.f90wrap_inputlist__set__wpackxi(wpackxi) + + @property + def wvolume(self): + """ + Element wvolume ftype=logical pytype=bool + + + Defined at inputlist.fpp line 171 + + """ + return _spec.f90wrap_inputlist__get__wvolume() + + @wvolume.setter + def wvolume(self, wvolume): + _spec.f90wrap_inputlist__set__wvolume(wvolume) + + @property + def wcoords(self): + """ + Element wcoords ftype=logical pytype=bool + + + Defined at inputlist.fpp line 172 + + """ + return _spec.f90wrap_inputlist__get__wcoords() + + @wcoords.setter + def wcoords(self, wcoords): + _spec.f90wrap_inputlist__set__wcoords(wcoords) + + @property + def wbasefn(self): + """ + Element wbasefn ftype=logical pytype=bool + + + Defined at inputlist.fpp line 173 + + """ + return _spec.f90wrap_inputlist__get__wbasefn() + + @wbasefn.setter + def wbasefn(self, wbasefn): + _spec.f90wrap_inputlist__set__wbasefn(wbasefn) + + @property + def wmemory(self): + """ + Element wmemory ftype=logical pytype=bool + + + Defined at inputlist.fpp line 174 + + """ + return _spec.f90wrap_inputlist__get__wmemory() + + @wmemory.setter + def wmemory(self, wmemory): + _spec.f90wrap_inputlist__set__wmemory(wmemory) + + @property + def wmetrix(self): + """ + Element wmetrix ftype=logical pytype=bool + + + Defined at inputlist.fpp line 175 + + """ + return _spec.f90wrap_inputlist__get__wmetrix() + + @wmetrix.setter + def wmetrix(self, wmetrix): + _spec.f90wrap_inputlist__set__wmetrix(wmetrix) + + @property + def wma00aa(self): + """ + Element wma00aa ftype=logical pytype=bool + + + Defined at inputlist.fpp line 176 + + """ + return _spec.f90wrap_inputlist__get__wma00aa() + + @wma00aa.setter + def wma00aa(self, wma00aa): + _spec.f90wrap_inputlist__set__wma00aa(wma00aa) + + @property + def wmatrix(self): + """ + Element wmatrix ftype=logical pytype=bool + + + Defined at inputlist.fpp line 177 + + """ + return _spec.f90wrap_inputlist__get__wmatrix() + + @wmatrix.setter + def wmatrix(self, wmatrix): + _spec.f90wrap_inputlist__set__wmatrix(wmatrix) + + @property + def wspsmat(self): + """ + Element wspsmat ftype=logical pytype=bool + + + Defined at inputlist.fpp line 178 + + """ + return _spec.f90wrap_inputlist__get__wspsmat() + + @wspsmat.setter + def wspsmat(self, wspsmat): + _spec.f90wrap_inputlist__set__wspsmat(wspsmat) + + @property + def wspsint(self): + """ + Element wspsint ftype=logical pytype=bool + + + Defined at inputlist.fpp line 179 + + """ + return _spec.f90wrap_inputlist__get__wspsint() + + @wspsint.setter + def wspsint(self, wspsint): + _spec.f90wrap_inputlist__set__wspsint(wspsint) + + @property + def wmp00ac(self): + """ + Element wmp00ac ftype=logical pytype=bool + + + Defined at inputlist.fpp line 180 + + """ + return _spec.f90wrap_inputlist__get__wmp00ac() + + @wmp00ac.setter + def wmp00ac(self, wmp00ac): + _spec.f90wrap_inputlist__set__wmp00ac(wmp00ac) + + @property + def wma02aa(self): + """ + Element wma02aa ftype=logical pytype=bool + + + Defined at inputlist.fpp line 181 + + """ + return _spec.f90wrap_inputlist__get__wma02aa() + + @wma02aa.setter + def wma02aa(self, wma02aa): + _spec.f90wrap_inputlist__set__wma02aa(wma02aa) + + @property + def wpackab(self): + """ + Element wpackab ftype=logical pytype=bool + + + Defined at inputlist.fpp line 182 + + """ + return _spec.f90wrap_inputlist__get__wpackab() + + @wpackab.setter + def wpackab(self, wpackab): + _spec.f90wrap_inputlist__set__wpackab(wpackab) + + @property + def wtr00ab(self): + """ + Element wtr00ab ftype=logical pytype=bool + + + Defined at inputlist.fpp line 183 + + """ + return _spec.f90wrap_inputlist__get__wtr00ab() + + @wtr00ab.setter + def wtr00ab(self, wtr00ab): + _spec.f90wrap_inputlist__set__wtr00ab(wtr00ab) + + @property + def wcurent(self): + """ + Element wcurent ftype=logical pytype=bool + + + Defined at inputlist.fpp line 184 + + """ + return _spec.f90wrap_inputlist__get__wcurent() + + @wcurent.setter + def wcurent(self, wcurent): + _spec.f90wrap_inputlist__set__wcurent(wcurent) + + @property + def wdf00ab(self): + """ + Element wdf00ab ftype=logical pytype=bool + + + Defined at inputlist.fpp line 185 + + """ + return _spec.f90wrap_inputlist__get__wdf00ab() + + @wdf00ab.setter + def wdf00ab(self, wdf00ab): + _spec.f90wrap_inputlist__set__wdf00ab(wdf00ab) + + @property + def wlforce(self): + """ + Element wlforce ftype=logical pytype=bool + + + Defined at inputlist.fpp line 186 + + """ + return _spec.f90wrap_inputlist__get__wlforce() + + @wlforce.setter + def wlforce(self, wlforce): + _spec.f90wrap_inputlist__set__wlforce(wlforce) + + @property + def wintghs(self): + """ + Element wintghs ftype=logical pytype=bool + + + Defined at inputlist.fpp line 187 + + """ + return _spec.f90wrap_inputlist__get__wintghs() + + @wintghs.setter + def wintghs(self, wintghs): + _spec.f90wrap_inputlist__set__wintghs(wintghs) + + @property + def wmtrxhs(self): + """ + Element wmtrxhs ftype=logical pytype=bool + + + Defined at inputlist.fpp line 188 + + """ + return _spec.f90wrap_inputlist__get__wmtrxhs() + + @wmtrxhs.setter + def wmtrxhs(self, wmtrxhs): + _spec.f90wrap_inputlist__set__wmtrxhs(wmtrxhs) + + @property + def wlbpol(self): + """ + Element wlbpol ftype=logical pytype=bool + + + Defined at inputlist.fpp line 189 + + """ + return _spec.f90wrap_inputlist__get__wlbpol() + + @wlbpol.setter + def wlbpol(self, wlbpol): + _spec.f90wrap_inputlist__set__wlbpol(wlbpol) + + @property + def wbrcast(self): + """ + Element wbrcast ftype=logical pytype=bool + + + Defined at inputlist.fpp line 190 + + """ + return _spec.f90wrap_inputlist__get__wbrcast() + + @wbrcast.setter + def wbrcast(self, wbrcast): + _spec.f90wrap_inputlist__set__wbrcast(wbrcast) + + @property + def wdfp100(self): + """ + Element wdfp100 ftype=logical pytype=bool + + + Defined at inputlist.fpp line 191 + + """ + return _spec.f90wrap_inputlist__get__wdfp100() + + @wdfp100.setter + def wdfp100(self, wdfp100): + _spec.f90wrap_inputlist__set__wdfp100(wdfp100) + + @property + def wdfp200(self): + """ + Element wdfp200 ftype=logical pytype=bool + + + Defined at inputlist.fpp line 192 + + """ + return _spec.f90wrap_inputlist__get__wdfp200() + + @wdfp200.setter + def wdfp200(self, wdfp200): + _spec.f90wrap_inputlist__set__wdfp200(wdfp200) + + @property + def wdforce(self): + """ + Element wdforce ftype=logical pytype=bool + + + Defined at inputlist.fpp line 193 + + """ + return _spec.f90wrap_inputlist__get__wdforce() + + @wdforce.setter + def wdforce(self, wdforce): + _spec.f90wrap_inputlist__set__wdforce(wdforce) + + @property + def wnewton(self): + """ + Element wnewton ftype=logical pytype=bool + + + Defined at inputlist.fpp line 194 + + """ + return _spec.f90wrap_inputlist__get__wnewton() + + @wnewton.setter + def wnewton(self, wnewton): + _spec.f90wrap_inputlist__set__wnewton(wnewton) + + @property + def wcasing(self): + """ + Element wcasing ftype=logical pytype=bool + + + Defined at inputlist.fpp line 195 + + """ + return _spec.f90wrap_inputlist__get__wcasing() + + @wcasing.setter + def wcasing(self, wcasing): + _spec.f90wrap_inputlist__set__wcasing(wcasing) + + @property + def wbnorml(self): + """ + Element wbnorml ftype=logical pytype=bool + + + Defined at inputlist.fpp line 196 + + """ + return _spec.f90wrap_inputlist__get__wbnorml() + + @wbnorml.setter + def wbnorml(self, wbnorml): + _spec.f90wrap_inputlist__set__wbnorml(wbnorml) + + @property + def wjo00aa(self): + """ + Element wjo00aa ftype=logical pytype=bool + + + Defined at inputlist.fpp line 197 + + """ + return _spec.f90wrap_inputlist__get__wjo00aa() + + @wjo00aa.setter + def wjo00aa(self, wjo00aa): + _spec.f90wrap_inputlist__set__wjo00aa(wjo00aa) + + @property + def wpp00aa(self): + """ + Element wpp00aa ftype=logical pytype=bool + + + Defined at inputlist.fpp line 198 + + """ + return _spec.f90wrap_inputlist__get__wpp00aa() + + @wpp00aa.setter + def wpp00aa(self, wpp00aa): + _spec.f90wrap_inputlist__set__wpp00aa(wpp00aa) + + @property + def wpp00ab(self): + """ + Element wpp00ab ftype=logical pytype=bool + + + Defined at inputlist.fpp line 199 + + """ + return _spec.f90wrap_inputlist__get__wpp00ab() + + @wpp00ab.setter + def wpp00ab(self, wpp00ab): + _spec.f90wrap_inputlist__set__wpp00ab(wpp00ab) + + @property + def wbfield(self): + """ + Element wbfield ftype=logical pytype=bool + + + Defined at inputlist.fpp line 200 + + """ + return _spec.f90wrap_inputlist__get__wbfield() + + @wbfield.setter + def wbfield(self, wbfield): + _spec.f90wrap_inputlist__set__wbfield(wbfield) + + @property + def wstzxyz(self): + """ + Element wstzxyz ftype=logical pytype=bool + + + Defined at inputlist.fpp line 201 + + """ + return _spec.f90wrap_inputlist__get__wstzxyz() + + @wstzxyz.setter + def wstzxyz(self, wstzxyz): + _spec.f90wrap_inputlist__set__wstzxyz(wstzxyz) + + @property + def whesian(self): + """ + Element whesian ftype=logical pytype=bool + + + Defined at inputlist.fpp line 202 + + """ + return _spec.f90wrap_inputlist__get__whesian() + + @whesian.setter + def whesian(self, whesian): + _spec.f90wrap_inputlist__set__whesian(whesian) + + @property + def wra00aa(self): + """ + Element wra00aa ftype=logical pytype=bool + + + Defined at inputlist.fpp line 203 + + """ + return _spec.f90wrap_inputlist__get__wra00aa() + + @wra00aa.setter + def wra00aa(self, wra00aa): + _spec.f90wrap_inputlist__set__wra00aa(wra00aa) + + @property + def wnumrec(self): + """ + Element wnumrec ftype=logical pytype=bool + + + Defined at inputlist.fpp line 204 + + """ + return _spec.f90wrap_inputlist__get__wnumrec() + + @wnumrec.setter + def wnumrec(self, wnumrec): + _spec.f90wrap_inputlist__set__wnumrec(wnumrec) + + @property + def wxspech(self): + """ + Element wxspech ftype=logical pytype=bool + + + Defined at inputlist.fpp line 205 + + """ + return _spec.f90wrap_inputlist__get__wxspech() + + @wxspech.setter + def wxspech(self, wxspech): + _spec.f90wrap_inputlist__set__wxspech(wxspech) + + @property + def wbuild_vector_potential(self): + """ + Element wbuild_vector_potential ftype=logical pytype=bool + + + Defined at inputlist.fpp line 207 + + """ + return _spec.f90wrap_inputlist__get__wbuild_vector_potential() + + @wbuild_vector_potential.setter + def wbuild_vector_potential(self, wbuild_vector_potential): + _spec.f90wrap_inputlist__set__wbuild_vector_potential(wbuild_vector_potential) + + @property + def wreadin(self): + """ + Element wreadin ftype=logical pytype=bool + + + Defined at inputlist.fpp line 208 + + """ + return _spec.f90wrap_inputlist__get__wreadin() + + @wreadin.setter + def wreadin(self, wreadin): + _spec.f90wrap_inputlist__set__wreadin(wreadin) + + @property + def wwritin(self): + """ + Element wwritin ftype=logical pytype=bool + + + Defined at inputlist.fpp line 209 + + """ + return _spec.f90wrap_inputlist__get__wwritin() + + @wwritin.setter + def wwritin(self, wwritin): + _spec.f90wrap_inputlist__set__wwritin(wwritin) + + @property + def wwrtend(self): + """ + Element wwrtend ftype=logical pytype=bool + + + Defined at inputlist.fpp line 210 + + """ + return _spec.f90wrap_inputlist__get__wwrtend() + + @wwrtend.setter + def wwrtend(self, wwrtend): + _spec.f90wrap_inputlist__set__wwrtend(wwrtend) + + @property + def wmacros(self): + """ + Element wmacros ftype=logical pytype=bool + + + Defined at inputlist.fpp line 211 + + """ + return _spec.f90wrap_inputlist__get__wmacros() + + @wmacros.setter + def wmacros(self, wmacros): + _spec.f90wrap_inputlist__set__wmacros(wmacros) + + def __str__(self): + ret = ['{\n'] + ret.append(' mnvol : ') + ret.append(repr(self.mnvol)) + ret.append(',\n mmpol : ') + ret.append(repr(self.mmpol)) + ret.append(',\n mntor : ') + ret.append(repr(self.mntor)) + ret.append(',\n igeometry : ') + ret.append(repr(self.igeometry)) + ret.append(',\n istellsym : ') + ret.append(repr(self.istellsym)) + ret.append(',\n lfreebound : ') + ret.append(repr(self.lfreebound)) + ret.append(',\n phiedge : ') + ret.append(repr(self.phiedge)) + ret.append(',\n curtor : ') + ret.append(repr(self.curtor)) + ret.append(',\n curpol : ') + ret.append(repr(self.curpol)) + ret.append(',\n gamma : ') + ret.append(repr(self.gamma)) + ret.append(',\n nfp : ') + ret.append(repr(self.nfp)) + ret.append(',\n nvol : ') + ret.append(repr(self.nvol)) + ret.append(',\n mpol : ') + ret.append(repr(self.mpol)) + ret.append(',\n ntor : ') + ret.append(repr(self.ntor)) + ret.append(',\n lrad : ') + ret.append(repr(self.lrad)) + ret.append(',\n lconstraint : ') + ret.append(repr(self.lconstraint)) + ret.append(',\n tflux : ') + ret.append(repr(self.tflux)) + ret.append(',\n pflux : ') + ret.append(repr(self.pflux)) + ret.append(',\n helicity : ') + ret.append(repr(self.helicity)) + ret.append(',\n pscale : ') + ret.append(repr(self.pscale)) + ret.append(',\n pressure : ') + ret.append(repr(self.pressure)) + ret.append(',\n ladiabatic : ') + ret.append(repr(self.ladiabatic)) + ret.append(',\n adiabatic : ') + ret.append(repr(self.adiabatic)) + ret.append(',\n mu : ') + ret.append(repr(self.mu)) + ret.append(',\n ivolume : ') + ret.append(repr(self.ivolume)) + ret.append(',\n isurf : ') + ret.append(repr(self.isurf)) + ret.append(',\n pl : ') + ret.append(repr(self.pl)) + ret.append(',\n ql : ') + ret.append(repr(self.ql)) + ret.append(',\n pr : ') + ret.append(repr(self.pr)) + ret.append(',\n qr : ') + ret.append(repr(self.qr)) + ret.append(',\n iota : ') + ret.append(repr(self.iota)) + ret.append(',\n lp : ') + ret.append(repr(self.lp)) + ret.append(',\n lq : ') + ret.append(repr(self.lq)) + ret.append(',\n rp : ') + ret.append(repr(self.rp)) + ret.append(',\n rq : ') + ret.append(repr(self.rq)) + ret.append(',\n oita : ') + ret.append(repr(self.oita)) + ret.append(',\n rpol : ') + ret.append(repr(self.rpol)) + ret.append(',\n rtor : ') + ret.append(repr(self.rtor)) + ret.append(',\n rac : ') + ret.append(repr(self.rac)) + ret.append(',\n zas : ') + ret.append(repr(self.zas)) + ret.append(',\n ras : ') + ret.append(repr(self.ras)) + ret.append(',\n zac : ') + ret.append(repr(self.zac)) + ret.append(',\n rbc : ') + ret.append(repr(self.rbc)) + ret.append(',\n zbs : ') + ret.append(repr(self.zbs)) + ret.append(',\n rbs : ') + ret.append(repr(self.rbs)) + ret.append(',\n zbc : ') + ret.append(repr(self.zbc)) + ret.append(',\n rwc : ') + ret.append(repr(self.rwc)) + ret.append(',\n zws : ') + ret.append(repr(self.zws)) + ret.append(',\n rws : ') + ret.append(repr(self.rws)) + ret.append(',\n zwc : ') + ret.append(repr(self.zwc)) + ret.append(',\n vns : ') + ret.append(repr(self.vns)) + ret.append(',\n bns : ') + ret.append(repr(self.bns)) + ret.append(',\n vnc : ') + ret.append(repr(self.vnc)) + ret.append(',\n bnc : ') + ret.append(repr(self.bnc)) + ret.append(',\n mupftol : ') + ret.append(repr(self.mupftol)) + ret.append(',\n mupfits : ') + ret.append(repr(self.mupfits)) + ret.append(',\n lreflect : ') + ret.append(repr(self.lreflect)) + ret.append(',\n linitialize : ') + ret.append(repr(self.linitialize)) + ret.append(',\n lautoinitbn : ') + ret.append(repr(self.lautoinitbn)) + ret.append(',\n lzerovac : ') + ret.append(repr(self.lzerovac)) + ret.append(',\n ndiscrete : ') + ret.append(repr(self.ndiscrete)) + ret.append(',\n nquad : ') + ret.append(repr(self.nquad)) + ret.append(',\n impol : ') + ret.append(repr(self.impol)) + ret.append(',\n intor : ') + ret.append(repr(self.intor)) + ret.append(',\n lsparse : ') + ret.append(repr(self.lsparse)) + ret.append(',\n lsvdiota : ') + ret.append(repr(self.lsvdiota)) + ret.append(',\n imethod : ') + ret.append(repr(self.imethod)) + ret.append(',\n iorder : ') + ret.append(repr(self.iorder)) + ret.append(',\n iprecon : ') + ret.append(repr(self.iprecon)) + ret.append(',\n iotatol : ') + ret.append(repr(self.iotatol)) + ret.append(',\n lextrap : ') + ret.append(repr(self.lextrap)) + ret.append(',\n mregular : ') + ret.append(repr(self.mregular)) + ret.append(',\n lrzaxis : ') + ret.append(repr(self.lrzaxis)) + ret.append(',\n ntoraxis : ') + ret.append(repr(self.ntoraxis)) + ret.append(',\n lbeltrami : ') + ret.append(repr(self.lbeltrami)) + ret.append(',\n linitgues : ') + ret.append(repr(self.linitgues)) + ret.append(',\n lposdef : ') + ret.append(repr(self.lposdef)) + ret.append(',\n maxrndgues : ') + ret.append(repr(self.maxrndgues)) + ret.append(',\n lmatsolver : ') + ret.append(repr(self.lmatsolver)) + ret.append(',\n nitergmres : ') + ret.append(repr(self.nitergmres)) + ret.append(',\n epsgmres : ') + ret.append(repr(self.epsgmres)) + ret.append(',\n lgmresprec : ') + ret.append(repr(self.lgmresprec)) + ret.append(',\n epsilu : ') + ret.append(repr(self.epsilu)) + ret.append(',\n lfindzero : ') + ret.append(repr(self.lfindzero)) + ret.append(',\n escale : ') + ret.append(repr(self.escale)) + ret.append(',\n opsilon : ') + ret.append(repr(self.opsilon)) + ret.append(',\n pcondense : ') + ret.append(repr(self.pcondense)) + ret.append(',\n epsilon : ') + ret.append(repr(self.epsilon)) + ret.append(',\n wpoloidal : ') + ret.append(repr(self.wpoloidal)) + ret.append(',\n upsilon : ') + ret.append(repr(self.upsilon)) + ret.append(',\n forcetol : ') + ret.append(repr(self.forcetol)) + ret.append(',\n c05xmax : ') + ret.append(repr(self.c05xmax)) + ret.append(',\n c05xtol : ') + ret.append(repr(self.c05xtol)) + ret.append(',\n c05factor : ') + ret.append(repr(self.c05factor)) + ret.append(',\n lreadgf : ') + ret.append(repr(self.lreadgf)) + ret.append(',\n mfreeits : ') + ret.append(repr(self.mfreeits)) + ret.append(',\n bnstol : ') + ret.append(repr(self.bnstol)) + ret.append(',\n bnsblend : ') + ret.append(repr(self.bnsblend)) + ret.append(',\n gbntol : ') + ret.append(repr(self.gbntol)) + ret.append(',\n gbnbld : ') + ret.append(repr(self.gbnbld)) + ret.append(',\n vcasingeps : ') + ret.append(repr(self.vcasingeps)) + ret.append(',\n vcasingtol : ') + ret.append(repr(self.vcasingtol)) + ret.append(',\n vcasingits : ') + ret.append(repr(self.vcasingits)) + ret.append(',\n vcasingper : ') + ret.append(repr(self.vcasingper)) + ret.append(',\n mcasingcal : ') + ret.append(repr(self.mcasingcal)) + ret.append(',\n odetol : ') + ret.append(repr(self.odetol)) + ret.append(',\n absreq : ') + ret.append(repr(self.absreq)) + ret.append(',\n relreq : ') + ret.append(repr(self.relreq)) + ret.append(',\n absacc : ') + ret.append(repr(self.absacc)) + ret.append(',\n epsr : ') + ret.append(repr(self.epsr)) + ret.append(',\n nppts : ') + ret.append(repr(self.nppts)) + ret.append(',\n ppts : ') + ret.append(repr(self.ppts)) + ret.append(',\n nptrj : ') + ret.append(repr(self.nptrj)) + ret.append(',\n lhevalues : ') + ret.append(repr(self.lhevalues)) + ret.append(',\n lhevectors : ') + ret.append(repr(self.lhevectors)) + ret.append(',\n lhmatrix : ') + ret.append(repr(self.lhmatrix)) + ret.append(',\n lperturbed : ') + ret.append(repr(self.lperturbed)) + ret.append(',\n dpp : ') + ret.append(repr(self.dpp)) + ret.append(',\n dqq : ') + ret.append(repr(self.dqq)) + ret.append(',\n lerrortype : ') + ret.append(repr(self.lerrortype)) + ret.append(',\n ngrid : ') + ret.append(repr(self.ngrid)) + ret.append(',\n drz : ') + ret.append(repr(self.drz)) + ret.append(',\n lcheck : ') + ret.append(repr(self.lcheck)) + ret.append(',\n ltiming : ') + ret.append(repr(self.ltiming)) + ret.append(',\n fudge : ') + ret.append(repr(self.fudge)) + ret.append(',\n scaling : ') + ret.append(repr(self.scaling)) + ret.append(',\n wdcuhre : ') + ret.append(repr(self.wdcuhre)) + ret.append(',\n wminpack : ') + ret.append(repr(self.wminpack)) + ret.append(',\n wiqpack : ') + ret.append(repr(self.wiqpack)) + ret.append(',\n wrksuite : ') + ret.append(repr(self.wrksuite)) + ret.append(',\n wi1mach : ') + ret.append(repr(self.wi1mach)) + ret.append(',\n wd1mach : ') + ret.append(repr(self.wd1mach)) + ret.append(',\n wilut : ') + ret.append(repr(self.wilut)) + ret.append(',\n witers : ') + ret.append(repr(self.witers)) + ret.append(',\n winputlist : ') + ret.append(repr(self.winputlist)) + ret.append(',\n wglobal : ') + ret.append(repr(self.wglobal)) + ret.append(',\n wsphdf5 : ') + ret.append(repr(self.wsphdf5)) + ret.append(',\n wpreset : ') + ret.append(repr(self.wpreset)) + ret.append(',\n wmanual : ') + ret.append(repr(self.wmanual)) + ret.append(',\n wrzaxis : ') + ret.append(repr(self.wrzaxis)) + ret.append(',\n wpackxi : ') + ret.append(repr(self.wpackxi)) + ret.append(',\n wvolume : ') + ret.append(repr(self.wvolume)) + ret.append(',\n wcoords : ') + ret.append(repr(self.wcoords)) + ret.append(',\n wbasefn : ') + ret.append(repr(self.wbasefn)) + ret.append(',\n wmemory : ') + ret.append(repr(self.wmemory)) + ret.append(',\n wmetrix : ') + ret.append(repr(self.wmetrix)) + ret.append(',\n wma00aa : ') + ret.append(repr(self.wma00aa)) + ret.append(',\n wmatrix : ') + ret.append(repr(self.wmatrix)) + ret.append(',\n wspsmat : ') + ret.append(repr(self.wspsmat)) + ret.append(',\n wspsint : ') + ret.append(repr(self.wspsint)) + ret.append(',\n wmp00ac : ') + ret.append(repr(self.wmp00ac)) + ret.append(',\n wma02aa : ') + ret.append(repr(self.wma02aa)) + ret.append(',\n wpackab : ') + ret.append(repr(self.wpackab)) + ret.append(',\n wtr00ab : ') + ret.append(repr(self.wtr00ab)) + ret.append(',\n wcurent : ') + ret.append(repr(self.wcurent)) + ret.append(',\n wdf00ab : ') + ret.append(repr(self.wdf00ab)) + ret.append(',\n wlforce : ') + ret.append(repr(self.wlforce)) + ret.append(',\n wintghs : ') + ret.append(repr(self.wintghs)) + ret.append(',\n wmtrxhs : ') + ret.append(repr(self.wmtrxhs)) + ret.append(',\n wlbpol : ') + ret.append(repr(self.wlbpol)) + ret.append(',\n wbrcast : ') + ret.append(repr(self.wbrcast)) + ret.append(',\n wdfp100 : ') + ret.append(repr(self.wdfp100)) + ret.append(',\n wdfp200 : ') + ret.append(repr(self.wdfp200)) + ret.append(',\n wdforce : ') + ret.append(repr(self.wdforce)) + ret.append(',\n wnewton : ') + ret.append(repr(self.wnewton)) + ret.append(',\n wcasing : ') + ret.append(repr(self.wcasing)) + ret.append(',\n wbnorml : ') + ret.append(repr(self.wbnorml)) + ret.append(',\n wjo00aa : ') + ret.append(repr(self.wjo00aa)) + ret.append(',\n wpp00aa : ') + ret.append(repr(self.wpp00aa)) + ret.append(',\n wpp00ab : ') + ret.append(repr(self.wpp00ab)) + ret.append(',\n wbfield : ') + ret.append(repr(self.wbfield)) + ret.append(',\n wstzxyz : ') + ret.append(repr(self.wstzxyz)) + ret.append(',\n whesian : ') + ret.append(repr(self.whesian)) + ret.append(',\n wra00aa : ') + ret.append(repr(self.wra00aa)) + ret.append(',\n wnumrec : ') + ret.append(repr(self.wnumrec)) + ret.append(',\n wxspech : ') + ret.append(repr(self.wxspech)) + ret.append(',\n wbuild_vector_potential : ') + ret.append(repr(self.wbuild_vector_potential)) + ret.append(',\n wreadin : ') + ret.append(repr(self.wreadin)) + ret.append(',\n wwritin : ') + ret.append(repr(self.wwritin)) + ret.append(',\n wwrtend : ') + ret.append(repr(self.wwrtend)) + ret.append(',\n wmacros : ') + ret.append(repr(self.wmacros)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +inputlist = Inputlist() + +class Constants(f90wrap.runtime.FortranModule): + """ + Module constants + + + Defined at global.fpp lines 24-51 + + """ + @property + def zero(self): + """ + Element zero ftype=real(8) pytype=float + + + Defined at global.fpp line 26 + + """ + return _spec.f90wrap_constants__get__zero() + + @property + def one(self): + """ + Element one ftype=real(8) pytype=float + + + Defined at global.fpp line 27 + + """ + return _spec.f90wrap_constants__get__one() + + @property + def two(self): + """ + Element two ftype=real(8) pytype=float + + + Defined at global.fpp line 28 + + """ + return _spec.f90wrap_constants__get__two() + + @property + def three(self): + """ + Element three ftype=real(8) pytype=float + + + Defined at global.fpp line 29 + + """ + return _spec.f90wrap_constants__get__three() + + @property + def four(self): + """ + Element four ftype=real(8) pytype=float + + + Defined at global.fpp line 30 + + """ + return _spec.f90wrap_constants__get__four() + + @property + def five(self): + """ + Element five ftype=real(8) pytype=float + + + Defined at global.fpp line 31 + + """ + return _spec.f90wrap_constants__get__five() + + @property + def six(self): + """ + Element six ftype=real(8) pytype=float + + + Defined at global.fpp line 32 + + """ + return _spec.f90wrap_constants__get__six() + + @property + def seven(self): + """ + Element seven ftype=real(8) pytype=float + + + Defined at global.fpp line 33 + + """ + return _spec.f90wrap_constants__get__seven() + + @property + def eight(self): + """ + Element eight ftype=real(8) pytype=float + + + Defined at global.fpp line 34 + + """ + return _spec.f90wrap_constants__get__eight() + + @property + def nine(self): + """ + Element nine ftype=real(8) pytype=float + + + Defined at global.fpp line 35 + + """ + return _spec.f90wrap_constants__get__nine() + + @property + def ten(self): + """ + Element ten ftype=real(8) pytype=float + + + Defined at global.fpp line 36 + + """ + return _spec.f90wrap_constants__get__ten() + + @property + def eleven(self): + """ + Element eleven ftype=real(8) pytype=float + + + Defined at global.fpp line 37 + + """ + return _spec.f90wrap_constants__get__eleven() + + @property + def twelve(self): + """ + Element twelve ftype=real(8) pytype=float + + + Defined at global.fpp line 38 + + """ + return _spec.f90wrap_constants__get__twelve() + + @property + def hundred(self): + """ + Element hundred ftype=real(8) pytype=float + + + Defined at global.fpp line 39 + + """ + return _spec.f90wrap_constants__get__hundred() + + @property + def thousand(self): + """ + Element thousand ftype=real(8) pytype=float + + + Defined at global.fpp line 40 + + """ + return _spec.f90wrap_constants__get__thousand() + + @property + def half(self): + """ + Element half ftype=real(8) pytype=float + + + Defined at global.fpp line 41 + + """ + return _spec.f90wrap_constants__get__half() + + @property + def third(self): + """ + Element third ftype=real(8) pytype=float + + + Defined at global.fpp line 42 + + """ + return _spec.f90wrap_constants__get__third() + + @property + def quart(self): + """ + Element quart ftype=real(8) pytype=float + + + Defined at global.fpp line 43 + + """ + return _spec.f90wrap_constants__get__quart() + + @property + def fifth(self): + """ + Element fifth ftype=real(8) pytype=float + + + Defined at global.fpp line 44 + + """ + return _spec.f90wrap_constants__get__fifth() + + @property + def sixth(self): + """ + Element sixth ftype=real(8) pytype=float + + + Defined at global.fpp line 45 + + """ + return _spec.f90wrap_constants__get__sixth() + + @property + def pi2(self): + """ + Element pi2 ftype=real(8) pytype=float + + + Defined at global.fpp line 46 + + """ + return _spec.f90wrap_constants__get__pi2() + + @property + def pi(self): + """ + Element pi ftype=real(8) pytype=float + + + Defined at global.fpp line 47 + + """ + return _spec.f90wrap_constants__get__pi() + + @property + def mu0(self): + """ + Element mu0 ftype=real(8) pytype=float + + + Defined at global.fpp line 48 + + """ + return _spec.f90wrap_constants__get__mu0() + + @property + def goldenmean(self): + """ + Element goldenmean ftype=real(8) pytype=float + + + Defined at global.fpp line 49 + + """ + return _spec.f90wrap_constants__get__goldenmean() + + @property + def version(self): + """ + Element version ftype=real(8) pytype=float + + + Defined at global.fpp line 51 + + """ + return _spec.f90wrap_constants__get__version() + + def __str__(self): + ret = ['{\n'] + ret.append(' zero : ') + ret.append(repr(self.zero)) + ret.append(',\n one : ') + ret.append(repr(self.one)) + ret.append(',\n two : ') + ret.append(repr(self.two)) + ret.append(',\n three : ') + ret.append(repr(self.three)) + ret.append(',\n four : ') + ret.append(repr(self.four)) + ret.append(',\n five : ') + ret.append(repr(self.five)) + ret.append(',\n six : ') + ret.append(repr(self.six)) + ret.append(',\n seven : ') + ret.append(repr(self.seven)) + ret.append(',\n eight : ') + ret.append(repr(self.eight)) + ret.append(',\n nine : ') + ret.append(repr(self.nine)) + ret.append(',\n ten : ') + ret.append(repr(self.ten)) + ret.append(',\n eleven : ') + ret.append(repr(self.eleven)) + ret.append(',\n twelve : ') + ret.append(repr(self.twelve)) + ret.append(',\n hundred : ') + ret.append(repr(self.hundred)) + ret.append(',\n thousand : ') + ret.append(repr(self.thousand)) + ret.append(',\n half : ') + ret.append(repr(self.half)) + ret.append(',\n third : ') + ret.append(repr(self.third)) + ret.append(',\n quart : ') + ret.append(repr(self.quart)) + ret.append(',\n fifth : ') + ret.append(repr(self.fifth)) + ret.append(',\n sixth : ') + ret.append(repr(self.sixth)) + ret.append(',\n pi2 : ') + ret.append(repr(self.pi2)) + ret.append(',\n pi : ') + ret.append(repr(self.pi)) + ret.append(',\n mu0 : ') + ret.append(repr(self.mu0)) + ret.append(',\n goldenmean : ') + ret.append(repr(self.goldenmean)) + ret.append(',\n version : ') + ret.append(repr(self.version)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +constants = Constants() + +class Numerical(f90wrap.runtime.FortranModule): + """ + Module numerical + + + Defined at global.fpp lines 55-65 + + """ + @property + def machprec(self): + """ + Element machprec ftype=real(8) pytype=float + + + Defined at global.fpp line 61 + + """ + return _spec.f90wrap_numerical__get__machprec() + + @property + def vsmall(self): + """ + Element vsmall ftype=real(8) pytype=float + + + Defined at global.fpp line 62 + + """ + return _spec.f90wrap_numerical__get__vsmall() + + @property + def small(self): + """ + Element small ftype=real(8) pytype=float + + + Defined at global.fpp line 63 + + """ + return _spec.f90wrap_numerical__get__small() + + @property + def sqrtmachprec(self): + """ + Element sqrtmachprec ftype=real(8) pytype=float + + + Defined at global.fpp line 64 + + """ + return _spec.f90wrap_numerical__get__sqrtmachprec() + + @property + def logtolerance(self): + """ + Element logtolerance ftype=real(8) pytype=float + + + Defined at global.fpp line 65 + + """ + return _spec.f90wrap_numerical__get__logtolerance() + + def __str__(self): + ret = ['{\n'] + ret.append(' machprec : ') + ret.append(repr(self.machprec)) + ret.append(',\n vsmall : ') + ret.append(repr(self.vsmall)) + ret.append(',\n small : ') + ret.append(repr(self.small)) + ret.append(',\n sqrtmachprec : ') + ret.append(repr(self.sqrtmachprec)) + ret.append(',\n logtolerance : ') + ret.append(repr(self.logtolerance)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +numerical = Numerical() + +class Fileunits(f90wrap.runtime.FortranModule): + """ + Module fileunits + + + Defined at global.fpp lines 68-97 + + """ + @staticmethod + def mute(action): + """ + mute(action) + + + Defined at global.fpp lines 81-96 + + Parameters + ---------- + action : int + + """ + _spec.f90wrap_mute(action=action) + + @property + def iunit(self): + """ + Element iunit ftype=integer pytype=int + + + Defined at global.fpp line 70 + + """ + return _spec.f90wrap_fileunits__get__iunit() + + @iunit.setter + def iunit(self, iunit): + _spec.f90wrap_fileunits__set__iunit(iunit) + + @property + def ounit(self): + """ + Element ounit ftype=integer pytype=int + + + Defined at global.fpp line 71 + + """ + return _spec.f90wrap_fileunits__get__ounit() + + @ounit.setter + def ounit(self, ounit): + _spec.f90wrap_fileunits__set__ounit(ounit) + + @property + def gunit(self): + """ + Element gunit ftype=integer pytype=int + + + Defined at global.fpp line 72 + + """ + return _spec.f90wrap_fileunits__get__gunit() + + @gunit.setter + def gunit(self, gunit): + _spec.f90wrap_fileunits__set__gunit(gunit) + + @property + def aunit(self): + """ + Element aunit ftype=integer pytype=int + + + Defined at global.fpp line 73 + + """ + return _spec.f90wrap_fileunits__get__aunit() + + @aunit.setter + def aunit(self, aunit): + _spec.f90wrap_fileunits__set__aunit(aunit) + + @property + def dunit(self): + """ + Element dunit ftype=integer pytype=int + + + Defined at global.fpp line 74 + + """ + return _spec.f90wrap_fileunits__get__dunit() + + @dunit.setter + def dunit(self, dunit): + _spec.f90wrap_fileunits__set__dunit(dunit) + + @property + def hunit(self): + """ + Element hunit ftype=integer pytype=int + + + Defined at global.fpp line 75 + + """ + return _spec.f90wrap_fileunits__get__hunit() + + @hunit.setter + def hunit(self, hunit): + _spec.f90wrap_fileunits__set__hunit(hunit) + + @property + def munit(self): + """ + Element munit ftype=integer pytype=int + + + Defined at global.fpp line 76 + + """ + return _spec.f90wrap_fileunits__get__munit() + + @munit.setter + def munit(self, munit): + _spec.f90wrap_fileunits__set__munit(munit) + + @property + def lunit(self): + """ + Element lunit ftype=integer pytype=int + + + Defined at global.fpp line 77 + + """ + return _spec.f90wrap_fileunits__get__lunit() + + @lunit.setter + def lunit(self, lunit): + _spec.f90wrap_fileunits__set__lunit(lunit) + + @property + def vunit(self): + """ + Element vunit ftype=integer pytype=int + + + Defined at global.fpp line 78 + + """ + return _spec.f90wrap_fileunits__get__vunit() + + @vunit.setter + def vunit(self, vunit): + _spec.f90wrap_fileunits__set__vunit(vunit) + + def __str__(self): + ret = ['{\n'] + ret.append(' iunit : ') + ret.append(repr(self.iunit)) + ret.append(',\n ounit : ') + ret.append(repr(self.ounit)) + ret.append(',\n gunit : ') + ret.append(repr(self.gunit)) + ret.append(',\n aunit : ') + ret.append(repr(self.aunit)) + ret.append(',\n dunit : ') + ret.append(repr(self.dunit)) + ret.append(',\n hunit : ') + ret.append(repr(self.hunit)) + ret.append(',\n munit : ') + ret.append(repr(self.munit)) + ret.append(',\n lunit : ') + ret.append(repr(self.lunit)) + ret.append(',\n vunit : ') + ret.append(repr(self.vunit)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +fileunits = Fileunits() + +class Cputiming(f90wrap.runtime.FortranModule): + """ + Module cputiming + + + Defined at global.fpp lines 100-154 + + """ + @property + def tdcuhre(self): + """ + Element tdcuhre ftype=real(8) pytype=float + + + Defined at global.fpp line 101 + + """ + return _spec.f90wrap_cputiming__get__tdcuhre() + + @tdcuhre.setter + def tdcuhre(self, tdcuhre): + _spec.f90wrap_cputiming__set__tdcuhre(tdcuhre) + + @property + def dcuhret(self): + """ + Element dcuhret ftype=real(8) pytype=float + + + Defined at global.fpp line 101 + + """ + return _spec.f90wrap_cputiming__get__dcuhret() + + @dcuhret.setter + def dcuhret(self, dcuhret): + _spec.f90wrap_cputiming__set__dcuhret(dcuhret) + + @property + def tminpack(self): + """ + Element tminpack ftype=real(8) pytype=float + + + Defined at global.fpp line 102 + + """ + return _spec.f90wrap_cputiming__get__tminpack() + + @tminpack.setter + def tminpack(self, tminpack): + _spec.f90wrap_cputiming__set__tminpack(tminpack) + + @property + def minpackt(self): + """ + Element minpackt ftype=real(8) pytype=float + + + Defined at global.fpp line 102 + + """ + return _spec.f90wrap_cputiming__get__minpackt() + + @minpackt.setter + def minpackt(self, minpackt): + _spec.f90wrap_cputiming__set__minpackt(minpackt) + + @property + def tiqpack(self): + """ + Element tiqpack ftype=real(8) pytype=float + + + Defined at global.fpp line 103 + + """ + return _spec.f90wrap_cputiming__get__tiqpack() + + @tiqpack.setter + def tiqpack(self, tiqpack): + _spec.f90wrap_cputiming__set__tiqpack(tiqpack) + + @property + def iqpackt(self): + """ + Element iqpackt ftype=real(8) pytype=float + + + Defined at global.fpp line 103 + + """ + return _spec.f90wrap_cputiming__get__iqpackt() + + @iqpackt.setter + def iqpackt(self, iqpackt): + _spec.f90wrap_cputiming__set__iqpackt(iqpackt) + + @property + def trksuite(self): + """ + Element trksuite ftype=real(8) pytype=float + + + Defined at global.fpp line 104 + + """ + return _spec.f90wrap_cputiming__get__trksuite() + + @trksuite.setter + def trksuite(self, trksuite): + _spec.f90wrap_cputiming__set__trksuite(trksuite) + + @property + def rksuitet(self): + """ + Element rksuitet ftype=real(8) pytype=float + + + Defined at global.fpp line 104 + + """ + return _spec.f90wrap_cputiming__get__rksuitet() + + @rksuitet.setter + def rksuitet(self, rksuitet): + _spec.f90wrap_cputiming__set__rksuitet(rksuitet) + + @property + def ti1mach(self): + """ + Element ti1mach ftype=real(8) pytype=float + + + Defined at global.fpp line 105 + + """ + return _spec.f90wrap_cputiming__get__ti1mach() + + @ti1mach.setter + def ti1mach(self, ti1mach): + _spec.f90wrap_cputiming__set__ti1mach(ti1mach) + + @property + def i1macht(self): + """ + Element i1macht ftype=real(8) pytype=float + + + Defined at global.fpp line 105 + + """ + return _spec.f90wrap_cputiming__get__i1macht() + + @i1macht.setter + def i1macht(self, i1macht): + _spec.f90wrap_cputiming__set__i1macht(i1macht) + + @property + def td1mach(self): + """ + Element td1mach ftype=real(8) pytype=float + + + Defined at global.fpp line 106 + + """ + return _spec.f90wrap_cputiming__get__td1mach() + + @td1mach.setter + def td1mach(self, td1mach): + _spec.f90wrap_cputiming__set__td1mach(td1mach) + + @property + def d1macht(self): + """ + Element d1macht ftype=real(8) pytype=float + + + Defined at global.fpp line 106 + + """ + return _spec.f90wrap_cputiming__get__d1macht() + + @d1macht.setter + def d1macht(self, d1macht): + _spec.f90wrap_cputiming__set__d1macht(d1macht) + + @property + def tilut(self): + """ + Element tilut ftype=real(8) pytype=float + + + Defined at global.fpp line 107 + + """ + return _spec.f90wrap_cputiming__get__tilut() + + @tilut.setter + def tilut(self, tilut): + _spec.f90wrap_cputiming__set__tilut(tilut) + + @property + def ilutt(self): + """ + Element ilutt ftype=real(8) pytype=float + + + Defined at global.fpp line 107 + + """ + return _spec.f90wrap_cputiming__get__ilutt() + + @ilutt.setter + def ilutt(self, ilutt): + _spec.f90wrap_cputiming__set__ilutt(ilutt) + + @property + def titers(self): + """ + Element titers ftype=real(8) pytype=float + + + Defined at global.fpp line 108 + + """ + return _spec.f90wrap_cputiming__get__titers() + + @titers.setter + def titers(self, titers): + _spec.f90wrap_cputiming__set__titers(titers) + + @property + def iterst(self): + """ + Element iterst ftype=real(8) pytype=float + + + Defined at global.fpp line 108 + + """ + return _spec.f90wrap_cputiming__get__iterst() + + @iterst.setter + def iterst(self, iterst): + _spec.f90wrap_cputiming__set__iterst(iterst) + + @property + def tinputlist(self): + """ + Element tinputlist ftype=real(8) pytype=float + + + Defined at global.fpp line 109 + + """ + return _spec.f90wrap_cputiming__get__tinputlist() + + @tinputlist.setter + def tinputlist(self, tinputlist): + _spec.f90wrap_cputiming__set__tinputlist(tinputlist) + + @property + def inputlistt(self): + """ + Element inputlistt ftype=real(8) pytype=float + + + Defined at global.fpp line 109 + + """ + return _spec.f90wrap_cputiming__get__inputlistt() + + @inputlistt.setter + def inputlistt(self, inputlistt): + _spec.f90wrap_cputiming__set__inputlistt(inputlistt) + + @property + def tglobal(self): + """ + Element tglobal ftype=real(8) pytype=float + + + Defined at global.fpp line 110 + + """ + return _spec.f90wrap_cputiming__get__tglobal() + + @tglobal.setter + def tglobal(self, tglobal): + _spec.f90wrap_cputiming__set__tglobal(tglobal) + + @property + def globalt(self): + """ + Element globalt ftype=real(8) pytype=float + + + Defined at global.fpp line 110 + + """ + return _spec.f90wrap_cputiming__get__globalt() + + @globalt.setter + def globalt(self, globalt): + _spec.f90wrap_cputiming__set__globalt(globalt) + + @property + def tsphdf5(self): + """ + Element tsphdf5 ftype=real(8) pytype=float + + + Defined at global.fpp line 111 + + """ + return _spec.f90wrap_cputiming__get__tsphdf5() + + @tsphdf5.setter + def tsphdf5(self, tsphdf5): + _spec.f90wrap_cputiming__set__tsphdf5(tsphdf5) + + @property + def sphdf5t(self): + """ + Element sphdf5t ftype=real(8) pytype=float + + + Defined at global.fpp line 111 + + """ + return _spec.f90wrap_cputiming__get__sphdf5t() + + @sphdf5t.setter + def sphdf5t(self, sphdf5t): + _spec.f90wrap_cputiming__set__sphdf5t(sphdf5t) + + @property + def tpreset(self): + """ + Element tpreset ftype=real(8) pytype=float + + + Defined at global.fpp line 112 + + """ + return _spec.f90wrap_cputiming__get__tpreset() + + @tpreset.setter + def tpreset(self, tpreset): + _spec.f90wrap_cputiming__set__tpreset(tpreset) + + @property + def presett(self): + """ + Element presett ftype=real(8) pytype=float + + + Defined at global.fpp line 112 + + """ + return _spec.f90wrap_cputiming__get__presett() + + @presett.setter + def presett(self, presett): + _spec.f90wrap_cputiming__set__presett(presett) + + @property + def tmanual(self): + """ + Element tmanual ftype=real(8) pytype=float + + + Defined at global.fpp line 113 + + """ + return _spec.f90wrap_cputiming__get__tmanual() + + @tmanual.setter + def tmanual(self, tmanual): + _spec.f90wrap_cputiming__set__tmanual(tmanual) + + @property + def manualt(self): + """ + Element manualt ftype=real(8) pytype=float + + + Defined at global.fpp line 113 + + """ + return _spec.f90wrap_cputiming__get__manualt() + + @manualt.setter + def manualt(self, manualt): + _spec.f90wrap_cputiming__set__manualt(manualt) + + @property + def trzaxis(self): + """ + Element trzaxis ftype=real(8) pytype=float + + + Defined at global.fpp line 114 + + """ + return _spec.f90wrap_cputiming__get__trzaxis() + + @trzaxis.setter + def trzaxis(self, trzaxis): + _spec.f90wrap_cputiming__set__trzaxis(trzaxis) + + @property + def rzaxist(self): + """ + Element rzaxist ftype=real(8) pytype=float + + + Defined at global.fpp line 114 + + """ + return _spec.f90wrap_cputiming__get__rzaxist() + + @rzaxist.setter + def rzaxist(self, rzaxist): + _spec.f90wrap_cputiming__set__rzaxist(rzaxist) + + @property + def tpackxi(self): + """ + Element tpackxi ftype=real(8) pytype=float + + + Defined at global.fpp line 115 + + """ + return _spec.f90wrap_cputiming__get__tpackxi() + + @tpackxi.setter + def tpackxi(self, tpackxi): + _spec.f90wrap_cputiming__set__tpackxi(tpackxi) + + @property + def packxit(self): + """ + Element packxit ftype=real(8) pytype=float + + + Defined at global.fpp line 115 + + """ + return _spec.f90wrap_cputiming__get__packxit() + + @packxit.setter + def packxit(self, packxit): + _spec.f90wrap_cputiming__set__packxit(packxit) + + @property + def tvolume(self): + """ + Element tvolume ftype=real(8) pytype=float + + + Defined at global.fpp line 116 + + """ + return _spec.f90wrap_cputiming__get__tvolume() + + @tvolume.setter + def tvolume(self, tvolume): + _spec.f90wrap_cputiming__set__tvolume(tvolume) + + @property + def volumet(self): + """ + Element volumet ftype=real(8) pytype=float + + + Defined at global.fpp line 116 + + """ + return _spec.f90wrap_cputiming__get__volumet() + + @volumet.setter + def volumet(self, volumet): + _spec.f90wrap_cputiming__set__volumet(volumet) + + @property + def tcoords(self): + """ + Element tcoords ftype=real(8) pytype=float + + + Defined at global.fpp line 117 + + """ + return _spec.f90wrap_cputiming__get__tcoords() + + @tcoords.setter + def tcoords(self, tcoords): + _spec.f90wrap_cputiming__set__tcoords(tcoords) + + @property + def coordst(self): + """ + Element coordst ftype=real(8) pytype=float + + + Defined at global.fpp line 117 + + """ + return _spec.f90wrap_cputiming__get__coordst() + + @coordst.setter + def coordst(self, coordst): + _spec.f90wrap_cputiming__set__coordst(coordst) + + @property + def tbasefn(self): + """ + Element tbasefn ftype=real(8) pytype=float + + + Defined at global.fpp line 118 + + """ + return _spec.f90wrap_cputiming__get__tbasefn() + + @tbasefn.setter + def tbasefn(self, tbasefn): + _spec.f90wrap_cputiming__set__tbasefn(tbasefn) + + @property + def basefnt(self): + """ + Element basefnt ftype=real(8) pytype=float + + + Defined at global.fpp line 118 + + """ + return _spec.f90wrap_cputiming__get__basefnt() + + @basefnt.setter + def basefnt(self, basefnt): + _spec.f90wrap_cputiming__set__basefnt(basefnt) + + @property + def tmemory(self): + """ + Element tmemory ftype=real(8) pytype=float + + + Defined at global.fpp line 119 + + """ + return _spec.f90wrap_cputiming__get__tmemory() + + @tmemory.setter + def tmemory(self, tmemory): + _spec.f90wrap_cputiming__set__tmemory(tmemory) + + @property + def memoryt(self): + """ + Element memoryt ftype=real(8) pytype=float + + + Defined at global.fpp line 119 + + """ + return _spec.f90wrap_cputiming__get__memoryt() + + @memoryt.setter + def memoryt(self, memoryt): + _spec.f90wrap_cputiming__set__memoryt(memoryt) + + @property + def tmetrix(self): + """ + Element tmetrix ftype=real(8) pytype=float + + + Defined at global.fpp line 120 + + """ + return _spec.f90wrap_cputiming__get__tmetrix() + + @tmetrix.setter + def tmetrix(self, tmetrix): + _spec.f90wrap_cputiming__set__tmetrix(tmetrix) + + @property + def metrixt(self): + """ + Element metrixt ftype=real(8) pytype=float + + + Defined at global.fpp line 120 + + """ + return _spec.f90wrap_cputiming__get__metrixt() + + @metrixt.setter + def metrixt(self, metrixt): + _spec.f90wrap_cputiming__set__metrixt(metrixt) + + @property + def tma00aa(self): + """ + Element tma00aa ftype=real(8) pytype=float + + + Defined at global.fpp line 121 + + """ + return _spec.f90wrap_cputiming__get__tma00aa() + + @tma00aa.setter + def tma00aa(self, tma00aa): + _spec.f90wrap_cputiming__set__tma00aa(tma00aa) + + @property + def ma00aat(self): + """ + Element ma00aat ftype=real(8) pytype=float + + + Defined at global.fpp line 121 + + """ + return _spec.f90wrap_cputiming__get__ma00aat() + + @ma00aat.setter + def ma00aat(self, ma00aat): + _spec.f90wrap_cputiming__set__ma00aat(ma00aat) + + @property + def tmatrix(self): + """ + Element tmatrix ftype=real(8) pytype=float + + + Defined at global.fpp line 122 + + """ + return _spec.f90wrap_cputiming__get__tmatrix() + + @tmatrix.setter + def tmatrix(self, tmatrix): + _spec.f90wrap_cputiming__set__tmatrix(tmatrix) + + @property + def matrixt(self): + """ + Element matrixt ftype=real(8) pytype=float + + + Defined at global.fpp line 122 + + """ + return _spec.f90wrap_cputiming__get__matrixt() + + @matrixt.setter + def matrixt(self, matrixt): + _spec.f90wrap_cputiming__set__matrixt(matrixt) + + @property + def tspsmat(self): + """ + Element tspsmat ftype=real(8) pytype=float + + + Defined at global.fpp line 123 + + """ + return _spec.f90wrap_cputiming__get__tspsmat() + + @tspsmat.setter + def tspsmat(self, tspsmat): + _spec.f90wrap_cputiming__set__tspsmat(tspsmat) + + @property + def spsmatt(self): + """ + Element spsmatt ftype=real(8) pytype=float + + + Defined at global.fpp line 123 + + """ + return _spec.f90wrap_cputiming__get__spsmatt() + + @spsmatt.setter + def spsmatt(self, spsmatt): + _spec.f90wrap_cputiming__set__spsmatt(spsmatt) + + @property + def tspsint(self): + """ + Element tspsint ftype=real(8) pytype=float + + + Defined at global.fpp line 124 + + """ + return _spec.f90wrap_cputiming__get__tspsint() + + @tspsint.setter + def tspsint(self, tspsint): + _spec.f90wrap_cputiming__set__tspsint(tspsint) + + @property + def spsintt(self): + """ + Element spsintt ftype=real(8) pytype=float + + + Defined at global.fpp line 124 + + """ + return _spec.f90wrap_cputiming__get__spsintt() + + @spsintt.setter + def spsintt(self, spsintt): + _spec.f90wrap_cputiming__set__spsintt(spsintt) + + @property + def tmp00ac(self): + """ + Element tmp00ac ftype=real(8) pytype=float + + + Defined at global.fpp line 125 + + """ + return _spec.f90wrap_cputiming__get__tmp00ac() + + @tmp00ac.setter + def tmp00ac(self, tmp00ac): + _spec.f90wrap_cputiming__set__tmp00ac(tmp00ac) + + @property + def mp00act(self): + """ + Element mp00act ftype=real(8) pytype=float + + + Defined at global.fpp line 125 + + """ + return _spec.f90wrap_cputiming__get__mp00act() + + @mp00act.setter + def mp00act(self, mp00act): + _spec.f90wrap_cputiming__set__mp00act(mp00act) + + @property + def tma02aa(self): + """ + Element tma02aa ftype=real(8) pytype=float + + + Defined at global.fpp line 126 + + """ + return _spec.f90wrap_cputiming__get__tma02aa() + + @tma02aa.setter + def tma02aa(self, tma02aa): + _spec.f90wrap_cputiming__set__tma02aa(tma02aa) + + @property + def ma02aat(self): + """ + Element ma02aat ftype=real(8) pytype=float + + + Defined at global.fpp line 126 + + """ + return _spec.f90wrap_cputiming__get__ma02aat() + + @ma02aat.setter + def ma02aat(self, ma02aat): + _spec.f90wrap_cputiming__set__ma02aat(ma02aat) + + @property + def tpackab(self): + """ + Element tpackab ftype=real(8) pytype=float + + + Defined at global.fpp line 127 + + """ + return _spec.f90wrap_cputiming__get__tpackab() + + @tpackab.setter + def tpackab(self, tpackab): + _spec.f90wrap_cputiming__set__tpackab(tpackab) + + @property + def packabt(self): + """ + Element packabt ftype=real(8) pytype=float + + + Defined at global.fpp line 127 + + """ + return _spec.f90wrap_cputiming__get__packabt() + + @packabt.setter + def packabt(self, packabt): + _spec.f90wrap_cputiming__set__packabt(packabt) + + @property + def ttr00ab(self): + """ + Element ttr00ab ftype=real(8) pytype=float + + + Defined at global.fpp line 128 + + """ + return _spec.f90wrap_cputiming__get__ttr00ab() + + @ttr00ab.setter + def ttr00ab(self, ttr00ab): + _spec.f90wrap_cputiming__set__ttr00ab(ttr00ab) + + @property + def tr00abt(self): + """ + Element tr00abt ftype=real(8) pytype=float + + + Defined at global.fpp line 128 + + """ + return _spec.f90wrap_cputiming__get__tr00abt() + + @tr00abt.setter + def tr00abt(self, tr00abt): + _spec.f90wrap_cputiming__set__tr00abt(tr00abt) + + @property + def tcurent(self): + """ + Element tcurent ftype=real(8) pytype=float + + + Defined at global.fpp line 129 + + """ + return _spec.f90wrap_cputiming__get__tcurent() + + @tcurent.setter + def tcurent(self, tcurent): + _spec.f90wrap_cputiming__set__tcurent(tcurent) + + @property + def curentt(self): + """ + Element curentt ftype=real(8) pytype=float + + + Defined at global.fpp line 129 + + """ + return _spec.f90wrap_cputiming__get__curentt() + + @curentt.setter + def curentt(self, curentt): + _spec.f90wrap_cputiming__set__curentt(curentt) + + @property + def tdf00ab(self): + """ + Element tdf00ab ftype=real(8) pytype=float + + + Defined at global.fpp line 130 + + """ + return _spec.f90wrap_cputiming__get__tdf00ab() + + @tdf00ab.setter + def tdf00ab(self, tdf00ab): + _spec.f90wrap_cputiming__set__tdf00ab(tdf00ab) + + @property + def df00abt(self): + """ + Element df00abt ftype=real(8) pytype=float + + + Defined at global.fpp line 130 + + """ + return _spec.f90wrap_cputiming__get__df00abt() + + @df00abt.setter + def df00abt(self, df00abt): + _spec.f90wrap_cputiming__set__df00abt(df00abt) + + @property + def tlforce(self): + """ + Element tlforce ftype=real(8) pytype=float + + + Defined at global.fpp line 131 + + """ + return _spec.f90wrap_cputiming__get__tlforce() + + @tlforce.setter + def tlforce(self, tlforce): + _spec.f90wrap_cputiming__set__tlforce(tlforce) + + @property + def lforcet(self): + """ + Element lforcet ftype=real(8) pytype=float + + + Defined at global.fpp line 131 + + """ + return _spec.f90wrap_cputiming__get__lforcet() + + @lforcet.setter + def lforcet(self, lforcet): + _spec.f90wrap_cputiming__set__lforcet(lforcet) + + @property + def tintghs(self): + """ + Element tintghs ftype=real(8) pytype=float + + + Defined at global.fpp line 132 + + """ + return _spec.f90wrap_cputiming__get__tintghs() + + @tintghs.setter + def tintghs(self, tintghs): + _spec.f90wrap_cputiming__set__tintghs(tintghs) + + @property + def intghst(self): + """ + Element intghst ftype=real(8) pytype=float + + + Defined at global.fpp line 132 + + """ + return _spec.f90wrap_cputiming__get__intghst() + + @intghst.setter + def intghst(self, intghst): + _spec.f90wrap_cputiming__set__intghst(intghst) + + @property + def tmtrxhs(self): + """ + Element tmtrxhs ftype=real(8) pytype=float + + + Defined at global.fpp line 133 + + """ + return _spec.f90wrap_cputiming__get__tmtrxhs() + + @tmtrxhs.setter + def tmtrxhs(self, tmtrxhs): + _spec.f90wrap_cputiming__set__tmtrxhs(tmtrxhs) + + @property + def mtrxhst(self): + """ + Element mtrxhst ftype=real(8) pytype=float + + + Defined at global.fpp line 133 + + """ + return _spec.f90wrap_cputiming__get__mtrxhst() + + @mtrxhst.setter + def mtrxhst(self, mtrxhst): + _spec.f90wrap_cputiming__set__mtrxhst(mtrxhst) + + @property + def tlbpol(self): + """ + Element tlbpol ftype=real(8) pytype=float + + + Defined at global.fpp line 134 + + """ + return _spec.f90wrap_cputiming__get__tlbpol() + + @tlbpol.setter + def tlbpol(self, tlbpol): + _spec.f90wrap_cputiming__set__tlbpol(tlbpol) + + @property + def lbpolt(self): + """ + Element lbpolt ftype=real(8) pytype=float + + + Defined at global.fpp line 134 + + """ + return _spec.f90wrap_cputiming__get__lbpolt() + + @lbpolt.setter + def lbpolt(self, lbpolt): + _spec.f90wrap_cputiming__set__lbpolt(lbpolt) + + @property + def tbrcast(self): + """ + Element tbrcast ftype=real(8) pytype=float + + + Defined at global.fpp line 135 + + """ + return _spec.f90wrap_cputiming__get__tbrcast() + + @tbrcast.setter + def tbrcast(self, tbrcast): + _spec.f90wrap_cputiming__set__tbrcast(tbrcast) + + @property + def brcastt(self): + """ + Element brcastt ftype=real(8) pytype=float + + + Defined at global.fpp line 135 + + """ + return _spec.f90wrap_cputiming__get__brcastt() + + @brcastt.setter + def brcastt(self, brcastt): + _spec.f90wrap_cputiming__set__brcastt(brcastt) + + @property + def tdfp100(self): + """ + Element tdfp100 ftype=real(8) pytype=float + + + Defined at global.fpp line 136 + + """ + return _spec.f90wrap_cputiming__get__tdfp100() + + @tdfp100.setter + def tdfp100(self, tdfp100): + _spec.f90wrap_cputiming__set__tdfp100(tdfp100) + + @property + def dfp100t(self): + """ + Element dfp100t ftype=real(8) pytype=float + + + Defined at global.fpp line 136 + + """ + return _spec.f90wrap_cputiming__get__dfp100t() + + @dfp100t.setter + def dfp100t(self, dfp100t): + _spec.f90wrap_cputiming__set__dfp100t(dfp100t) + + @property + def tdfp200(self): + """ + Element tdfp200 ftype=real(8) pytype=float + + + Defined at global.fpp line 137 + + """ + return _spec.f90wrap_cputiming__get__tdfp200() + + @tdfp200.setter + def tdfp200(self, tdfp200): + _spec.f90wrap_cputiming__set__tdfp200(tdfp200) + + @property + def dfp200t(self): + """ + Element dfp200t ftype=real(8) pytype=float + + + Defined at global.fpp line 137 + + """ + return _spec.f90wrap_cputiming__get__dfp200t() + + @dfp200t.setter + def dfp200t(self, dfp200t): + _spec.f90wrap_cputiming__set__dfp200t(dfp200t) + + @property + def tdforce(self): + """ + Element tdforce ftype=real(8) pytype=float + + + Defined at global.fpp line 138 + + """ + return _spec.f90wrap_cputiming__get__tdforce() + + @tdforce.setter + def tdforce(self, tdforce): + _spec.f90wrap_cputiming__set__tdforce(tdforce) + + @property + def dforcet(self): + """ + Element dforcet ftype=real(8) pytype=float + + + Defined at global.fpp line 138 + + """ + return _spec.f90wrap_cputiming__get__dforcet() + + @dforcet.setter + def dforcet(self, dforcet): + _spec.f90wrap_cputiming__set__dforcet(dforcet) + + @property + def tnewton(self): + """ + Element tnewton ftype=real(8) pytype=float + + + Defined at global.fpp line 139 + + """ + return _spec.f90wrap_cputiming__get__tnewton() + + @tnewton.setter + def tnewton(self, tnewton): + _spec.f90wrap_cputiming__set__tnewton(tnewton) + + @property + def newtont(self): + """ + Element newtont ftype=real(8) pytype=float + + + Defined at global.fpp line 139 + + """ + return _spec.f90wrap_cputiming__get__newtont() + + @newtont.setter + def newtont(self, newtont): + _spec.f90wrap_cputiming__set__newtont(newtont) + + @property + def tcasing(self): + """ + Element tcasing ftype=real(8) pytype=float + + + Defined at global.fpp line 140 + + """ + return _spec.f90wrap_cputiming__get__tcasing() + + @tcasing.setter + def tcasing(self, tcasing): + _spec.f90wrap_cputiming__set__tcasing(tcasing) + + @property + def casingt(self): + """ + Element casingt ftype=real(8) pytype=float + + + Defined at global.fpp line 140 + + """ + return _spec.f90wrap_cputiming__get__casingt() + + @casingt.setter + def casingt(self, casingt): + _spec.f90wrap_cputiming__set__casingt(casingt) + + @property + def tbnorml(self): + """ + Element tbnorml ftype=real(8) pytype=float + + + Defined at global.fpp line 141 + + """ + return _spec.f90wrap_cputiming__get__tbnorml() + + @tbnorml.setter + def tbnorml(self, tbnorml): + _spec.f90wrap_cputiming__set__tbnorml(tbnorml) + + @property + def bnormlt(self): + """ + Element bnormlt ftype=real(8) pytype=float + + + Defined at global.fpp line 141 + + """ + return _spec.f90wrap_cputiming__get__bnormlt() + + @bnormlt.setter + def bnormlt(self, bnormlt): + _spec.f90wrap_cputiming__set__bnormlt(bnormlt) + + @property + def tjo00aa(self): + """ + Element tjo00aa ftype=real(8) pytype=float + + + Defined at global.fpp line 142 + + """ + return _spec.f90wrap_cputiming__get__tjo00aa() + + @tjo00aa.setter + def tjo00aa(self, tjo00aa): + _spec.f90wrap_cputiming__set__tjo00aa(tjo00aa) + + @property + def jo00aat(self): + """ + Element jo00aat ftype=real(8) pytype=float + + + Defined at global.fpp line 142 + + """ + return _spec.f90wrap_cputiming__get__jo00aat() + + @jo00aat.setter + def jo00aat(self, jo00aat): + _spec.f90wrap_cputiming__set__jo00aat(jo00aat) + + @property + def tpp00aa(self): + """ + Element tpp00aa ftype=real(8) pytype=float + + + Defined at global.fpp line 143 + + """ + return _spec.f90wrap_cputiming__get__tpp00aa() + + @tpp00aa.setter + def tpp00aa(self, tpp00aa): + _spec.f90wrap_cputiming__set__tpp00aa(tpp00aa) + + @property + def pp00aat(self): + """ + Element pp00aat ftype=real(8) pytype=float + + + Defined at global.fpp line 143 + + """ + return _spec.f90wrap_cputiming__get__pp00aat() + + @pp00aat.setter + def pp00aat(self, pp00aat): + _spec.f90wrap_cputiming__set__pp00aat(pp00aat) + + @property + def tpp00ab(self): + """ + Element tpp00ab ftype=real(8) pytype=float + + + Defined at global.fpp line 144 + + """ + return _spec.f90wrap_cputiming__get__tpp00ab() + + @tpp00ab.setter + def tpp00ab(self, tpp00ab): + _spec.f90wrap_cputiming__set__tpp00ab(tpp00ab) + + @property + def pp00abt(self): + """ + Element pp00abt ftype=real(8) pytype=float + + + Defined at global.fpp line 144 + + """ + return _spec.f90wrap_cputiming__get__pp00abt() + + @pp00abt.setter + def pp00abt(self, pp00abt): + _spec.f90wrap_cputiming__set__pp00abt(pp00abt) + + @property + def tbfield(self): + """ + Element tbfield ftype=real(8) pytype=float + + + Defined at global.fpp line 145 + + """ + return _spec.f90wrap_cputiming__get__tbfield() + + @tbfield.setter + def tbfield(self, tbfield): + _spec.f90wrap_cputiming__set__tbfield(tbfield) + + @property + def bfieldt(self): + """ + Element bfieldt ftype=real(8) pytype=float + + + Defined at global.fpp line 145 + + """ + return _spec.f90wrap_cputiming__get__bfieldt() + + @bfieldt.setter + def bfieldt(self, bfieldt): + _spec.f90wrap_cputiming__set__bfieldt(bfieldt) + + @property + def tstzxyz(self): + """ + Element tstzxyz ftype=real(8) pytype=float + + + Defined at global.fpp line 146 + + """ + return _spec.f90wrap_cputiming__get__tstzxyz() + + @tstzxyz.setter + def tstzxyz(self, tstzxyz): + _spec.f90wrap_cputiming__set__tstzxyz(tstzxyz) + + @property + def stzxyzt(self): + """ + Element stzxyzt ftype=real(8) pytype=float + + + Defined at global.fpp line 146 + + """ + return _spec.f90wrap_cputiming__get__stzxyzt() + + @stzxyzt.setter + def stzxyzt(self, stzxyzt): + _spec.f90wrap_cputiming__set__stzxyzt(stzxyzt) + + @property + def thesian(self): + """ + Element thesian ftype=real(8) pytype=float + + + Defined at global.fpp line 147 + + """ + return _spec.f90wrap_cputiming__get__thesian() + + @thesian.setter + def thesian(self, thesian): + _spec.f90wrap_cputiming__set__thesian(thesian) + + @property + def hesiant(self): + """ + Element hesiant ftype=real(8) pytype=float + + + Defined at global.fpp line 147 + + """ + return _spec.f90wrap_cputiming__get__hesiant() + + @hesiant.setter + def hesiant(self, hesiant): + _spec.f90wrap_cputiming__set__hesiant(hesiant) + + @property + def tra00aa(self): + """ + Element tra00aa ftype=real(8) pytype=float + + + Defined at global.fpp line 148 + + """ + return _spec.f90wrap_cputiming__get__tra00aa() + + @tra00aa.setter + def tra00aa(self, tra00aa): + _spec.f90wrap_cputiming__set__tra00aa(tra00aa) + + @property + def ra00aat(self): + """ + Element ra00aat ftype=real(8) pytype=float + + + Defined at global.fpp line 148 + + """ + return _spec.f90wrap_cputiming__get__ra00aat() + + @ra00aat.setter + def ra00aat(self, ra00aat): + _spec.f90wrap_cputiming__set__ra00aat(ra00aat) + + @property + def tnumrec(self): + """ + Element tnumrec ftype=real(8) pytype=float + + + Defined at global.fpp line 149 + + """ + return _spec.f90wrap_cputiming__get__tnumrec() + + @tnumrec.setter + def tnumrec(self, tnumrec): + _spec.f90wrap_cputiming__set__tnumrec(tnumrec) + + @property + def numrect(self): + """ + Element numrect ftype=real(8) pytype=float + + + Defined at global.fpp line 149 + + """ + return _spec.f90wrap_cputiming__get__numrect() + + @numrect.setter + def numrect(self, numrect): + _spec.f90wrap_cputiming__set__numrect(numrect) + + @property + def txspech(self): + """ + Element txspech ftype=real(8) pytype=float + + + Defined at global.fpp line 150 + + """ + return _spec.f90wrap_cputiming__get__txspech() + + @txspech.setter + def txspech(self, txspech): + _spec.f90wrap_cputiming__set__txspech(txspech) + + @property + def xspecht(self): + """ + Element xspecht ftype=real(8) pytype=float + + + Defined at global.fpp line 150 + + """ + return _spec.f90wrap_cputiming__get__xspecht() + + @xspecht.setter + def xspecht(self, xspecht): + _spec.f90wrap_cputiming__set__xspecht(xspecht) + + @property + def treadin(self): + """ + Element treadin ftype=real(8) pytype=float + + + Defined at global.fpp line 152 + + """ + return _spec.f90wrap_cputiming__get__treadin() + + @treadin.setter + def treadin(self, treadin): + _spec.f90wrap_cputiming__set__treadin(treadin) + + @property + def twritin(self): + """ + Element twritin ftype=real(8) pytype=float + + + Defined at global.fpp line 153 + + """ + return _spec.f90wrap_cputiming__get__twritin() + + @twritin.setter + def twritin(self, twritin): + _spec.f90wrap_cputiming__set__twritin(twritin) + + @property + def twrtend(self): + """ + Element twrtend ftype=real(8) pytype=float + + + Defined at global.fpp line 154 + + """ + return _spec.f90wrap_cputiming__get__twrtend() + + @twrtend.setter + def twrtend(self, twrtend): + _spec.f90wrap_cputiming__set__twrtend(twrtend) + + def __str__(self): + ret = ['{\n'] + ret.append(' tdcuhre : ') + ret.append(repr(self.tdcuhre)) + ret.append(',\n dcuhret : ') + ret.append(repr(self.dcuhret)) + ret.append(',\n tminpack : ') + ret.append(repr(self.tminpack)) + ret.append(',\n minpackt : ') + ret.append(repr(self.minpackt)) + ret.append(',\n tiqpack : ') + ret.append(repr(self.tiqpack)) + ret.append(',\n iqpackt : ') + ret.append(repr(self.iqpackt)) + ret.append(',\n trksuite : ') + ret.append(repr(self.trksuite)) + ret.append(',\n rksuitet : ') + ret.append(repr(self.rksuitet)) + ret.append(',\n ti1mach : ') + ret.append(repr(self.ti1mach)) + ret.append(',\n i1macht : ') + ret.append(repr(self.i1macht)) + ret.append(',\n td1mach : ') + ret.append(repr(self.td1mach)) + ret.append(',\n d1macht : ') + ret.append(repr(self.d1macht)) + ret.append(',\n tilut : ') + ret.append(repr(self.tilut)) + ret.append(',\n ilutt : ') + ret.append(repr(self.ilutt)) + ret.append(',\n titers : ') + ret.append(repr(self.titers)) + ret.append(',\n iterst : ') + ret.append(repr(self.iterst)) + ret.append(',\n tinputlist : ') + ret.append(repr(self.tinputlist)) + ret.append(',\n inputlistt : ') + ret.append(repr(self.inputlistt)) + ret.append(',\n tglobal : ') + ret.append(repr(self.tglobal)) + ret.append(',\n globalt : ') + ret.append(repr(self.globalt)) + ret.append(',\n tsphdf5 : ') + ret.append(repr(self.tsphdf5)) + ret.append(',\n sphdf5t : ') + ret.append(repr(self.sphdf5t)) + ret.append(',\n tpreset : ') + ret.append(repr(self.tpreset)) + ret.append(',\n presett : ') + ret.append(repr(self.presett)) + ret.append(',\n tmanual : ') + ret.append(repr(self.tmanual)) + ret.append(',\n manualt : ') + ret.append(repr(self.manualt)) + ret.append(',\n trzaxis : ') + ret.append(repr(self.trzaxis)) + ret.append(',\n rzaxist : ') + ret.append(repr(self.rzaxist)) + ret.append(',\n tpackxi : ') + ret.append(repr(self.tpackxi)) + ret.append(',\n packxit : ') + ret.append(repr(self.packxit)) + ret.append(',\n tvolume : ') + ret.append(repr(self.tvolume)) + ret.append(',\n volumet : ') + ret.append(repr(self.volumet)) + ret.append(',\n tcoords : ') + ret.append(repr(self.tcoords)) + ret.append(',\n coordst : ') + ret.append(repr(self.coordst)) + ret.append(',\n tbasefn : ') + ret.append(repr(self.tbasefn)) + ret.append(',\n basefnt : ') + ret.append(repr(self.basefnt)) + ret.append(',\n tmemory : ') + ret.append(repr(self.tmemory)) + ret.append(',\n memoryt : ') + ret.append(repr(self.memoryt)) + ret.append(',\n tmetrix : ') + ret.append(repr(self.tmetrix)) + ret.append(',\n metrixt : ') + ret.append(repr(self.metrixt)) + ret.append(',\n tma00aa : ') + ret.append(repr(self.tma00aa)) + ret.append(',\n ma00aat : ') + ret.append(repr(self.ma00aat)) + ret.append(',\n tmatrix : ') + ret.append(repr(self.tmatrix)) + ret.append(',\n matrixt : ') + ret.append(repr(self.matrixt)) + ret.append(',\n tspsmat : ') + ret.append(repr(self.tspsmat)) + ret.append(',\n spsmatt : ') + ret.append(repr(self.spsmatt)) + ret.append(',\n tspsint : ') + ret.append(repr(self.tspsint)) + ret.append(',\n spsintt : ') + ret.append(repr(self.spsintt)) + ret.append(',\n tmp00ac : ') + ret.append(repr(self.tmp00ac)) + ret.append(',\n mp00act : ') + ret.append(repr(self.mp00act)) + ret.append(',\n tma02aa : ') + ret.append(repr(self.tma02aa)) + ret.append(',\n ma02aat : ') + ret.append(repr(self.ma02aat)) + ret.append(',\n tpackab : ') + ret.append(repr(self.tpackab)) + ret.append(',\n packabt : ') + ret.append(repr(self.packabt)) + ret.append(',\n ttr00ab : ') + ret.append(repr(self.ttr00ab)) + ret.append(',\n tr00abt : ') + ret.append(repr(self.tr00abt)) + ret.append(',\n tcurent : ') + ret.append(repr(self.tcurent)) + ret.append(',\n curentt : ') + ret.append(repr(self.curentt)) + ret.append(',\n tdf00ab : ') + ret.append(repr(self.tdf00ab)) + ret.append(',\n df00abt : ') + ret.append(repr(self.df00abt)) + ret.append(',\n tlforce : ') + ret.append(repr(self.tlforce)) + ret.append(',\n lforcet : ') + ret.append(repr(self.lforcet)) + ret.append(',\n tintghs : ') + ret.append(repr(self.tintghs)) + ret.append(',\n intghst : ') + ret.append(repr(self.intghst)) + ret.append(',\n tmtrxhs : ') + ret.append(repr(self.tmtrxhs)) + ret.append(',\n mtrxhst : ') + ret.append(repr(self.mtrxhst)) + ret.append(',\n tlbpol : ') + ret.append(repr(self.tlbpol)) + ret.append(',\n lbpolt : ') + ret.append(repr(self.lbpolt)) + ret.append(',\n tbrcast : ') + ret.append(repr(self.tbrcast)) + ret.append(',\n brcastt : ') + ret.append(repr(self.brcastt)) + ret.append(',\n tdfp100 : ') + ret.append(repr(self.tdfp100)) + ret.append(',\n dfp100t : ') + ret.append(repr(self.dfp100t)) + ret.append(',\n tdfp200 : ') + ret.append(repr(self.tdfp200)) + ret.append(',\n dfp200t : ') + ret.append(repr(self.dfp200t)) + ret.append(',\n tdforce : ') + ret.append(repr(self.tdforce)) + ret.append(',\n dforcet : ') + ret.append(repr(self.dforcet)) + ret.append(',\n tnewton : ') + ret.append(repr(self.tnewton)) + ret.append(',\n newtont : ') + ret.append(repr(self.newtont)) + ret.append(',\n tcasing : ') + ret.append(repr(self.tcasing)) + ret.append(',\n casingt : ') + ret.append(repr(self.casingt)) + ret.append(',\n tbnorml : ') + ret.append(repr(self.tbnorml)) + ret.append(',\n bnormlt : ') + ret.append(repr(self.bnormlt)) + ret.append(',\n tjo00aa : ') + ret.append(repr(self.tjo00aa)) + ret.append(',\n jo00aat : ') + ret.append(repr(self.jo00aat)) + ret.append(',\n tpp00aa : ') + ret.append(repr(self.tpp00aa)) + ret.append(',\n pp00aat : ') + ret.append(repr(self.pp00aat)) + ret.append(',\n tpp00ab : ') + ret.append(repr(self.tpp00ab)) + ret.append(',\n pp00abt : ') + ret.append(repr(self.pp00abt)) + ret.append(',\n tbfield : ') + ret.append(repr(self.tbfield)) + ret.append(',\n bfieldt : ') + ret.append(repr(self.bfieldt)) + ret.append(',\n tstzxyz : ') + ret.append(repr(self.tstzxyz)) + ret.append(',\n stzxyzt : ') + ret.append(repr(self.stzxyzt)) + ret.append(',\n thesian : ') + ret.append(repr(self.thesian)) + ret.append(',\n hesiant : ') + ret.append(repr(self.hesiant)) + ret.append(',\n tra00aa : ') + ret.append(repr(self.tra00aa)) + ret.append(',\n ra00aat : ') + ret.append(repr(self.ra00aat)) + ret.append(',\n tnumrec : ') + ret.append(repr(self.tnumrec)) + ret.append(',\n numrect : ') + ret.append(repr(self.numrect)) + ret.append(',\n txspech : ') + ret.append(repr(self.txspech)) + ret.append(',\n xspecht : ') + ret.append(repr(self.xspecht)) + ret.append(',\n treadin : ') + ret.append(repr(self.treadin)) + ret.append(',\n twritin : ') + ret.append(repr(self.twritin)) + ret.append(',\n twrtend : ') + ret.append(repr(self.twrtend)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +cputiming = Cputiming() + +class Typedefns(f90wrap.runtime.FortranModule): + """ + Module typedefns + + + Defined at global.fpp lines 157-173 + + """ + @f90wrap.runtime.register_class("spec.subgrid") + class subgrid(f90wrap.runtime.FortranDerivedType): + """ + Type(name=subgrid) + + + Defined at global.fpp lines 158-160 + + """ + def __init__(self, handle=None): + """ + self = Subgrid() + + + Defined at global.fpp lines 158-160 + + + Returns + ------- + this : Subgrid + Object to be constructed + + + Automatically generated constructor for subgrid + """ + f90wrap.runtime.FortranDerivedType.__init__(self) + result = _spec.f90wrap_subgrid_initialise() + self._handle = result[0] if isinstance(result, tuple) else result + + def __del__(self): + """ + Destructor for class Subgrid + + + Defined at global.fpp lines 158-160 + + Parameters + ---------- + this : Subgrid + Object to be destructed + + + Automatically generated destructor for subgrid + """ + if self._alloc: + _spec.f90wrap_subgrid_finalise(this=self._handle) + + @property + def s(self): + """ + Element s ftype=real(8) pytype=float + + + Defined at global.fpp line 159 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_subgrid__array__s(self._handle) + if array_handle in self._arrays: + s = self._arrays[array_handle] + else: + s = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_subgrid__array__s) + self._arrays[array_handle] = s + return s + + @s.setter + def s(self, s): + self.s[...] = s + + @property + def i(self): + """ + Element i ftype=integer pytype=int + + + Defined at global.fpp line 160 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_subgrid__array__i(self._handle) + if array_handle in self._arrays: + i = self._arrays[array_handle] + else: + i = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_subgrid__array__i) + self._arrays[array_handle] = i + return i + + @i.setter + def i(self, i): + self.i[...] = i + + def __str__(self): + ret = ['{\n'] + ret.append(' s : ') + ret.append(repr(self.s)) + ret.append(',\n i : ') + ret.append(repr(self.i)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + + @f90wrap.runtime.register_class("spec.MatrixLU") + class MatrixLU(f90wrap.runtime.FortranDerivedType): + """ + Type(name=matrixlu) + + + Defined at global.fpp lines 162-164 + + """ + def __init__(self, handle=None): + """ + self = Matrixlu() + + + Defined at global.fpp lines 162-164 + + + Returns + ------- + this : Matrixlu + Object to be constructed + + + Automatically generated constructor for matrixlu + """ + f90wrap.runtime.FortranDerivedType.__init__(self) + result = _spec.f90wrap_matrixlu_initialise() + self._handle = result[0] if isinstance(result, tuple) else result + + def __del__(self): + """ + Destructor for class Matrixlu + + + Defined at global.fpp lines 162-164 + + Parameters + ---------- + this : Matrixlu + Object to be destructed + + + Automatically generated destructor for matrixlu + """ + if self._alloc: + _spec.f90wrap_matrixlu_finalise(this=self._handle) + + @property + def mat(self): + """ + Element mat ftype=real(8) pytype=float + + + Defined at global.fpp line 163 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_matrixlu__array__mat(self._handle) + if array_handle in self._arrays: + mat = self._arrays[array_handle] + else: + mat = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_matrixlu__array__mat) + self._arrays[array_handle] = mat + return mat + + @mat.setter + def mat(self, mat): + self.mat[...] = mat + + @property + def ipivot(self): + """ + Element ipivot ftype=integer pytype=int + + + Defined at global.fpp line 164 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_matrixlu__array__ipivot(self._handle) + if array_handle in self._arrays: + ipivot = self._arrays[array_handle] + else: + ipivot = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_matrixlu__array__ipivot) + self._arrays[array_handle] = ipivot + return ipivot + + @ipivot.setter + def ipivot(self, ipivot): + self.ipivot[...] = ipivot + + def __str__(self): + ret = ['{\n'] + ret.append(' mat : ') + ret.append(repr(self.mat)) + ret.append(',\n ipivot : ') + ret.append(repr(self.ipivot)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + + @f90wrap.runtime.register_class("spec.derivative") + class derivative(f90wrap.runtime.FortranDerivedType): + """ + Type(name=derivative) + + + Defined at global.fpp lines 166-172 + + """ + def __init__(self, handle=None): + """ + self = Derivative() + + + Defined at global.fpp lines 166-172 + + + Returns + ------- + this : Derivative + Object to be constructed + + + Automatically generated constructor for derivative + """ + f90wrap.runtime.FortranDerivedType.__init__(self) + result = _spec.f90wrap_derivative_initialise() + self._handle = result[0] if isinstance(result, tuple) else result + + def __del__(self): + """ + Destructor for class Derivative + + + Defined at global.fpp lines 166-172 + + Parameters + ---------- + this : Derivative + Object to be destructed + + + Automatically generated destructor for derivative + """ + if self._alloc: + _spec.f90wrap_derivative_finalise(this=self._handle) + + @property + def l(self): + """ + Element l ftype=logical pytype=bool + + + Defined at global.fpp line 167 + + """ + return _spec.f90wrap_derivative__get__l(self._handle) + + @l.setter + def l(self, l): + _spec.f90wrap_derivative__set__l(self._handle, l) + + @property + def vol(self): + """ + Element vol ftype=integer pytype=int + + + Defined at global.fpp line 168 + + """ + return _spec.f90wrap_derivative__get__vol(self._handle) + + @vol.setter + def vol(self, vol): + _spec.f90wrap_derivative__set__vol(self._handle, vol) + + @property + def innout(self): + """ + Element innout ftype=integer pytype=int + + + Defined at global.fpp line 169 + + """ + return _spec.f90wrap_derivative__get__innout(self._handle) + + @innout.setter + def innout(self, innout): + _spec.f90wrap_derivative__set__innout(self._handle, innout) + + @property + def ii(self): + """ + Element ii ftype=integer pytype=int + + + Defined at global.fpp line 170 + + """ + return _spec.f90wrap_derivative__get__ii(self._handle) + + @ii.setter + def ii(self, ii): + _spec.f90wrap_derivative__set__ii(self._handle, ii) + + @property + def irz(self): + """ + Element irz ftype=integer pytype=int + + + Defined at global.fpp line 171 + + """ + return _spec.f90wrap_derivative__get__irz(self._handle) + + @irz.setter + def irz(self, irz): + _spec.f90wrap_derivative__set__irz(self._handle, irz) + + @property + def issym(self): + """ + Element issym ftype=integer pytype=int + + + Defined at global.fpp line 172 + + """ + return _spec.f90wrap_derivative__get__issym(self._handle) + + @issym.setter + def issym(self, issym): + _spec.f90wrap_derivative__set__issym(self._handle, issym) + + def __str__(self): + ret = ['{\n'] + ret.append(' l : ') + ret.append(repr(self.l)) + ret.append(',\n vol : ') + ret.append(repr(self.vol)) + ret.append(',\n innout : ') + ret.append(repr(self.innout)) + ret.append(',\n ii : ') + ret.append(repr(self.ii)) + ret.append(',\n irz : ') + ret.append(repr(self.irz)) + ret.append(',\n issym : ') + ret.append(repr(self.issym)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + + _dt_array_initialisers = [] + + +typedefns = Typedefns() + +class Allglobal(f90wrap.runtime.FortranModule): + """ + Module allglobal + + + Defined at global.fpp lines 176-2271 + + """ + @staticmethod + def build_vector_potential(lvol, iocons, aderiv, tderiv): + """ + build_vector_potential(lvol, iocons, aderiv, tderiv) + + + Defined at global.fpp lines 536-589 + + Parameters + ---------- + lvol : int + iocons : int + aderiv : int + tderiv : int + + """ + _spec.f90wrap_build_vector_potential(lvol=lvol, iocons=iocons, aderiv=aderiv, \ + tderiv=tderiv) + + @staticmethod + def set_mpi_comm(comm): + """ + set_mpi_comm(comm) + + + Defined at global.fpp lines 595-607 + + Parameters + ---------- + comm : int + + """ + _spec.f90wrap_set_mpi_comm(comm=comm) + + @staticmethod + def read_inputlists_from_file(): + """ + read_inputlists_from_file() + + + Defined at global.fpp lines 610-733 + + + """ + _spec.f90wrap_read_inputlists_from_file() + + @staticmethod + def check_inputs(): + """ + check_inputs() + + + Defined at global.fpp lines 737-1126 + + + """ + _spec.f90wrap_check_inputs() + + @staticmethod + def broadcast_inputs(): + """ + broadcast_inputs() + + + Defined at global.fpp lines 1130-1927 + + + """ + _spec.f90wrap_broadcast_inputs() + + @staticmethod + def wrtend(): + """ + wrtend() + + + Defined at global.fpp lines 1931-2235 + + + """ + _spec.f90wrap_wrtend() + + @staticmethod + def ismyvolume(vvol): + """ + ismyvolume(vvol) + + + Defined at global.fpp lines 2238-2255 + + Parameters + ---------- + vvol : int + + """ + _spec.f90wrap_ismyvolume(vvol=vvol) + + @staticmethod + def whichcpuid(vvol, cpu_id): + """ + whichcpuid(vvol, cpu_id) + + + Defined at global.fpp lines 2258-2269 + + Parameters + ---------- + vvol : int + cpu_id : int + + """ + _spec.f90wrap_whichcpuid(vvol=vvol, cpu_id=cpu_id) + + @property + def myid(self): + """ + Element myid ftype=integer pytype=int + + + Defined at global.fpp line 181 + + """ + return _spec.f90wrap_allglobal__get__myid() + + @myid.setter + def myid(self, myid): + _spec.f90wrap_allglobal__set__myid(myid) + + @property + def ncpu(self): + """ + Element ncpu ftype=integer pytype=int + + + Defined at global.fpp line 181 + + """ + return _spec.f90wrap_allglobal__get__ncpu() + + @ncpu.setter + def ncpu(self, ncpu): + _spec.f90wrap_allglobal__set__ncpu(ncpu) + + @property + def mpi_comm_spec(self): + """ + Element mpi_comm_spec ftype=integer pytype=int + + + Defined at global.fpp line 181 + + """ + return _spec.f90wrap_allglobal__get__mpi_comm_spec() + + @mpi_comm_spec.setter + def mpi_comm_spec(self, mpi_comm_spec): + _spec.f90wrap_allglobal__set__mpi_comm_spec(mpi_comm_spec) + + @property + def ismyvolumevalue(self): + """ + Element ismyvolumevalue ftype=integer pytype=int + + + Defined at global.fpp line 182 + + """ + return _spec.f90wrap_allglobal__get__ismyvolumevalue() + + @ismyvolumevalue.setter + def ismyvolumevalue(self, ismyvolumevalue): + _spec.f90wrap_allglobal__set__ismyvolumevalue(ismyvolumevalue) + + @property + def cpus(self): + """ + Element cpus ftype=real(8) pytype=float + + + Defined at global.fpp line 183 + + """ + return _spec.f90wrap_allglobal__get__cpus() + + @cpus.setter + def cpus(self, cpus): + _spec.f90wrap_allglobal__set__cpus(cpus) + + @property + def skip_write(self): + """ + Element skip_write ftype=logical pytype=bool + + + Defined at global.fpp line 184 + + """ + return _spec.f90wrap_allglobal__get__skip_write() + + @skip_write.setter + def skip_write(self, skip_write): + _spec.f90wrap_allglobal__set__skip_write(skip_write) + + @property + def pi2nfp(self): + """ + Element pi2nfp ftype=real(8) pytype=float + + + Defined at global.fpp line 185 + + """ + return _spec.f90wrap_allglobal__get__pi2nfp() + + @pi2nfp.setter + def pi2nfp(self, pi2nfp): + _spec.f90wrap_allglobal__set__pi2nfp(pi2nfp) + + @property + def pi2pi2nfp(self): + """ + Element pi2pi2nfp ftype=real(8) pytype=float + + + Defined at global.fpp line 186 + + """ + return _spec.f90wrap_allglobal__get__pi2pi2nfp() + + @pi2pi2nfp.setter + def pi2pi2nfp(self, pi2pi2nfp): + _spec.f90wrap_allglobal__set__pi2pi2nfp(pi2pi2nfp) + + @property + def pi2pi2nfphalf(self): + """ + Element pi2pi2nfphalf ftype=real(8) pytype=float + + + Defined at global.fpp line 187 + + """ + return _spec.f90wrap_allglobal__get__pi2pi2nfphalf() + + @pi2pi2nfphalf.setter + def pi2pi2nfphalf(self, pi2pi2nfphalf): + _spec.f90wrap_allglobal__set__pi2pi2nfphalf(pi2pi2nfphalf) + + @property + def pi2pi2nfpquart(self): + """ + Element pi2pi2nfpquart ftype=real(8) pytype=float + + + Defined at global.fpp line 188 + + """ + return _spec.f90wrap_allglobal__get__pi2pi2nfpquart() + + @pi2pi2nfpquart.setter + def pi2pi2nfpquart(self, pi2pi2nfpquart): + _spec.f90wrap_allglobal__set__pi2pi2nfpquart(pi2pi2nfpquart) + + @property + def ext(self): + """ + Element ext ftype=character(len=100) pytype=str + + + Defined at global.fpp line 190 + + """ + return _spec.f90wrap_allglobal__get__ext() + + @ext.setter + def ext(self, ext): + _spec.f90wrap_allglobal__set__ext(ext) + + @property + def forceerr(self): + """ + Element forceerr ftype=real(8) pytype=float + + + Defined at global.fpp line 191 + + """ + return _spec.f90wrap_allglobal__get__forceerr() + + @forceerr.setter + def forceerr(self, forceerr): + _spec.f90wrap_allglobal__set__forceerr(forceerr) + + @property + def energy(self): + """ + Element energy ftype=real(8) pytype=float + + + Defined at global.fpp line 191 + + """ + return _spec.f90wrap_allglobal__get__energy() + + @energy.setter + def energy(self, energy): + _spec.f90wrap_allglobal__set__energy(energy) + + @property + def ipdt(self): + """ + Element ipdt ftype=real(8) pytype=float + + + Defined at global.fpp line 192 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ipdt(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ipdt = self._arrays[array_handle] + else: + ipdt = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ipdt) + self._arrays[array_handle] = ipdt + return ipdt + + @ipdt.setter + def ipdt(self, ipdt): + self.ipdt[...] = ipdt + + @property + def ipdtdpf(self): + """ + Element ipdtdpf ftype=real(8) pytype=float + + + Defined at global.fpp line 192 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ipdtdpf(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ipdtdpf = self._arrays[array_handle] + else: + ipdtdpf = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ipdtdpf) + self._arrays[array_handle] = ipdtdpf + return ipdtdpf + + @ipdtdpf.setter + def ipdtdpf(self, ipdtdpf): + self.ipdtdpf[...] = ipdtdpf + + @property + def mvol(self): + """ + Element mvol ftype=integer pytype=int + + + Defined at global.fpp line 193 + + """ + return _spec.f90wrap_allglobal__get__mvol() + + @mvol.setter + def mvol(self, mvol): + _spec.f90wrap_allglobal__set__mvol(mvol) + + @property + def yesstellsym(self): + """ + Element yesstellsym ftype=logical pytype=bool + + + Defined at global.fpp line 194 + + """ + return _spec.f90wrap_allglobal__get__yesstellsym() + + @yesstellsym.setter + def yesstellsym(self, yesstellsym): + _spec.f90wrap_allglobal__set__yesstellsym(yesstellsym) + + @property + def notstellsym(self): + """ + Element notstellsym ftype=logical pytype=bool + + + Defined at global.fpp line 194 + + """ + return _spec.f90wrap_allglobal__get__notstellsym() + + @notstellsym.setter + def notstellsym(self, notstellsym): + _spec.f90wrap_allglobal__set__notstellsym(notstellsym) + + @property + def yesmatrixfree(self): + """ + Element yesmatrixfree ftype=logical pytype=bool + + + Defined at global.fpp line 195 + + """ + return _spec.f90wrap_allglobal__get__yesmatrixfree() + + @yesmatrixfree.setter + def yesmatrixfree(self, yesmatrixfree): + _spec.f90wrap_allglobal__set__yesmatrixfree(yesmatrixfree) + + @property + def notmatrixfree(self): + """ + Element notmatrixfree ftype=logical pytype=bool + + + Defined at global.fpp line 195 + + """ + return _spec.f90wrap_allglobal__get__notmatrixfree() + + @notmatrixfree.setter + def notmatrixfree(self, notmatrixfree): + _spec.f90wrap_allglobal__set__notmatrixfree(notmatrixfree) + + @property + def cheby(self): + """ + Element cheby ftype=real(8) pytype=float + + + Defined at global.fpp line 196 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__cheby(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + cheby = self._arrays[array_handle] + else: + cheby = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__cheby) + self._arrays[array_handle] = cheby + return cheby + + @cheby.setter + def cheby(self, cheby): + self.cheby[...] = cheby + + @property + def zernike(self): + """ + Element zernike ftype=real(8) pytype=float + + + Defined at global.fpp line 196 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__zernike(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zernike = self._arrays[array_handle] + else: + zernike = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__zernike) + self._arrays[array_handle] = zernike + return zernike + + @zernike.setter + def zernike(self, zernike): + self.zernike[...] = zernike + + @property + def tt(self): + """ + Element tt ftype=real(8) pytype=float + + + Defined at global.fpp line 197 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tt(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tt = self._arrays[array_handle] + else: + tt = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tt) + self._arrays[array_handle] = tt + return tt + + @tt.setter + def tt(self, tt): + self.tt[...] = tt + + @property + def rtt(self): + """ + Element rtt ftype=real(8) pytype=float + + + Defined at global.fpp line 197 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__rtt(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rtt = self._arrays[array_handle] + else: + rtt = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__rtt) + self._arrays[array_handle] = rtt + return rtt + + @rtt.setter + def rtt(self, rtt): + self.rtt[...] = rtt + + @property + def rtm(self): + """ + Element rtm ftype=real(8) pytype=float + + + Defined at global.fpp line 198 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__rtm(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rtm = self._arrays[array_handle] + else: + rtm = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__rtm) + self._arrays[array_handle] = rtm + return rtm + + @rtm.setter + def rtm(self, rtm): + self.rtm[...] = rtm + + @property + def zernikedof(self): + """ + Element zernikedof ftype=real(8) pytype=float + + + Defined at global.fpp line 199 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__zernikedof(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zernikedof = self._arrays[array_handle] + else: + zernikedof = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__zernikedof) + self._arrays[array_handle] = zernikedof + return zernikedof + + @zernikedof.setter + def zernikedof(self, zernikedof): + self.zernikedof[...] = zernikedof + + @property + def imagneticok(self): + """ + Element imagneticok ftype=logical pytype=bool + + + Defined at global.fpp line 200 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__imagneticok(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + imagneticok = self._arrays[array_handle] + else: + imagneticok = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__imagneticok) + self._arrays[array_handle] = imagneticok + return imagneticok + + @imagneticok.setter + def imagneticok(self, imagneticok): + self.imagneticok[...] = imagneticok + + @property + def iconstraintok(self): + """ + Element iconstraintok ftype=logical pytype=bool + + + Defined at global.fpp line 201 + + """ + return _spec.f90wrap_allglobal__get__iconstraintok() + + @iconstraintok.setter + def iconstraintok(self, iconstraintok): + _spec.f90wrap_allglobal__set__iconstraintok(iconstraintok) + + @property + def beltramierror(self): + """ + Element beltramierror ftype=real(8) pytype=float + + + Defined at global.fpp line 202 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__beltramierror(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + beltramierror = self._arrays[array_handle] + else: + beltramierror = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__beltramierror) + self._arrays[array_handle] = beltramierror + return beltramierror + + @beltramierror.setter + def beltramierror(self, beltramierror): + self.beltramierror[...] = beltramierror + + @property + def mn(self): + """ + Element mn ftype=integer pytype=int + + + Defined at global.fpp line 207 + + """ + return _spec.f90wrap_allglobal__get__mn() + + @mn.setter + def mn(self, mn): + _spec.f90wrap_allglobal__set__mn(mn) + + @property + def im(self): + """ + Element im ftype=integer pytype=int + + + Defined at global.fpp line 208 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__im(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + im = self._arrays[array_handle] + else: + im = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__im) + self._arrays[array_handle] = im + return im + + @im.setter + def im(self, im): + self.im[...] = im + + @property + def in_(self): + """ + Element in_ ftype=integer pytype=int + + + Defined at global.fpp line 208 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__in_(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + in_ = self._arrays[array_handle] + else: + in_ = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__in_) + self._arrays[array_handle] = in_ + return in_ + + @in_.setter + def in_(self, in_): + self.in_[...] = in_ + + @property + def halfmm(self): + """ + Element halfmm ftype=real(8) pytype=float + + + Defined at global.fpp line 209 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__halfmm(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + halfmm = self._arrays[array_handle] + else: + halfmm = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__halfmm) + self._arrays[array_handle] = halfmm + return halfmm + + @halfmm.setter + def halfmm(self, halfmm): + self.halfmm[...] = halfmm + + @property + def regumm(self): + """ + Element regumm ftype=real(8) pytype=float + + + Defined at global.fpp line 209 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__regumm(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + regumm = self._arrays[array_handle] + else: + regumm = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__regumm) + self._arrays[array_handle] = regumm + return regumm + + @regumm.setter + def regumm(self, regumm): + self.regumm[...] = regumm + + @property + def rscale(self): + """ + Element rscale ftype=real(8) pytype=float + + + Defined at global.fpp line 210 + + """ + return _spec.f90wrap_allglobal__get__rscale() + + @rscale.setter + def rscale(self, rscale): + _spec.f90wrap_allglobal__set__rscale(rscale) + + @property + def psifactor(self): + """ + Element psifactor ftype=real(8) pytype=float + + + Defined at global.fpp line 211 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__psifactor(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + psifactor = self._arrays[array_handle] + else: + psifactor = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__psifactor) + self._arrays[array_handle] = psifactor + return psifactor + + @psifactor.setter + def psifactor(self, psifactor): + self.psifactor[...] = psifactor + + @property + def inifactor(self): + """ + Element inifactor ftype=real(8) pytype=float + + + Defined at global.fpp line 211 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__inifactor(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + inifactor = self._arrays[array_handle] + else: + inifactor = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__inifactor) + self._arrays[array_handle] = inifactor + return inifactor + + @inifactor.setter + def inifactor(self, inifactor): + self.inifactor[...] = inifactor + + @property + def bbweight(self): + """ + Element bbweight ftype=real(8) pytype=float + + + Defined at global.fpp line 212 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bbweight(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bbweight = self._arrays[array_handle] + else: + bbweight = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bbweight) + self._arrays[array_handle] = bbweight + return bbweight + + @bbweight.setter + def bbweight(self, bbweight): + self.bbweight[...] = bbweight + + @property + def mmpp(self): + """ + Element mmpp ftype=real(8) pytype=float + + + Defined at global.fpp line 213 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__mmpp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + mmpp = self._arrays[array_handle] + else: + mmpp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__mmpp) + self._arrays[array_handle] = mmpp + return mmpp + + @mmpp.setter + def mmpp(self, mmpp): + self.mmpp[...] = mmpp + + @property + def mne(self): + """ + Element mne ftype=integer pytype=int + + + Defined at global.fpp line 217 + + """ + return _spec.f90wrap_allglobal__get__mne() + + @mne.setter + def mne(self, mne): + _spec.f90wrap_allglobal__set__mne(mne) + + @property + def ime(self): + """ + Element ime ftype=integer pytype=int + + + Defined at global.fpp line 218 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ime(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ime = self._arrays[array_handle] + else: + ime = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ime) + self._arrays[array_handle] = ime + return ime + + @ime.setter + def ime(self, ime): + self.ime[...] = ime + + @property + def ine(self): + """ + Element ine ftype=integer pytype=int + + + Defined at global.fpp line 218 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ine(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ine = self._arrays[array_handle] + else: + ine = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ine) + self._arrays[array_handle] = ine + return ine + + @ine.setter + def ine(self, ine): + self.ine[...] = ine + + @property + def mns(self): + """ + Element mns ftype=integer pytype=int + + + Defined at global.fpp line 223 + + """ + return _spec.f90wrap_allglobal__get__mns() + + @mns.setter + def mns(self, mns): + _spec.f90wrap_allglobal__set__mns(mns) + + @property + def ims(self): + """ + Element ims ftype=integer pytype=int + + + Defined at global.fpp line 224 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ims(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ims = self._arrays[array_handle] + else: + ims = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ims) + self._arrays[array_handle] = ims + return ims + + @ims.setter + def ims(self, ims): + self.ims[...] = ims + + @property + def ins(self): + """ + Element ins ftype=integer pytype=int + + + Defined at global.fpp line 224 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ins(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ins = self._arrays[array_handle] + else: + ins = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ins) + self._arrays[array_handle] = ins + return ins + + @ins.setter + def ins(self, ins): + self.ins[...] = ins + + @property + def lmpol(self): + """ + Element lmpol ftype=integer pytype=int + + + Defined at global.fpp line 225 + + """ + return _spec.f90wrap_allglobal__get__lmpol() + + @lmpol.setter + def lmpol(self, lmpol): + _spec.f90wrap_allglobal__set__lmpol(lmpol) + + @property + def lntor(self): + """ + Element lntor ftype=integer pytype=int + + + Defined at global.fpp line 225 + + """ + return _spec.f90wrap_allglobal__get__lntor() + + @lntor.setter + def lntor(self, lntor): + _spec.f90wrap_allglobal__set__lntor(lntor) + + @property + def smpol(self): + """ + Element smpol ftype=integer pytype=int + + + Defined at global.fpp line 225 + + """ + return _spec.f90wrap_allglobal__get__smpol() + + @smpol.setter + def smpol(self, smpol): + _spec.f90wrap_allglobal__set__smpol(smpol) + + @property + def sntor(self): + """ + Element sntor ftype=integer pytype=int + + + Defined at global.fpp line 225 + + """ + return _spec.f90wrap_allglobal__get__sntor() + + @sntor.setter + def sntor(self, sntor): + _spec.f90wrap_allglobal__set__sntor(sntor) + + @property + def xoffset(self): + """ + Element xoffset ftype=real(8) pytype=float + + + Defined at global.fpp line 227 + + """ + return _spec.f90wrap_allglobal__get__xoffset() + + @xoffset.setter + def xoffset(self, xoffset): + _spec.f90wrap_allglobal__set__xoffset(xoffset) + + @property + def irbc(self): + """ + Element irbc ftype=real(8) pytype=float + + + Defined at global.fpp line 234 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__irbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + irbc = self._arrays[array_handle] + else: + irbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__irbc) + self._arrays[array_handle] = irbc + return irbc + + @irbc.setter + def irbc(self, irbc): + self.irbc[...] = irbc + + @property + def izbs(self): + """ + Element izbs ftype=real(8) pytype=float + + + Defined at global.fpp line 234 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__izbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + izbs = self._arrays[array_handle] + else: + izbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__izbs) + self._arrays[array_handle] = izbs + return izbs + + @izbs.setter + def izbs(self, izbs): + self.izbs[...] = izbs + + @property + def irbs(self): + """ + Element irbs ftype=real(8) pytype=float + + + Defined at global.fpp line 235 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__irbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + irbs = self._arrays[array_handle] + else: + irbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__irbs) + self._arrays[array_handle] = irbs + return irbs + + @irbs.setter + def irbs(self, irbs): + self.irbs[...] = irbs + + @property + def izbc(self): + """ + Element izbc ftype=real(8) pytype=float + + + Defined at global.fpp line 235 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__izbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + izbc = self._arrays[array_handle] + else: + izbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__izbc) + self._arrays[array_handle] = izbc + return izbc + + @izbc.setter + def izbc(self, izbc): + self.izbc[...] = izbc + + @property + def drbc(self): + """ + Element drbc ftype=real(8) pytype=float + + + Defined at global.fpp line 236 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__drbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + drbc = self._arrays[array_handle] + else: + drbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__drbc) + self._arrays[array_handle] = drbc + return drbc + + @drbc.setter + def drbc(self, drbc): + self.drbc[...] = drbc + + @property + def dzbs(self): + """ + Element dzbs ftype=real(8) pytype=float + + + Defined at global.fpp line 236 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzbs = self._arrays[array_handle] + else: + dzbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzbs) + self._arrays[array_handle] = dzbs + return dzbs + + @dzbs.setter + def dzbs(self, dzbs): + self.dzbs[...] = dzbs + + @property + def drbs(self): + """ + Element drbs ftype=real(8) pytype=float + + + Defined at global.fpp line 237 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__drbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + drbs = self._arrays[array_handle] + else: + drbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__drbs) + self._arrays[array_handle] = drbs + return drbs + + @drbs.setter + def drbs(self, drbs): + self.drbs[...] = drbs + + @property + def dzbc(self): + """ + Element dzbc ftype=real(8) pytype=float + + + Defined at global.fpp line 237 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzbc = self._arrays[array_handle] + else: + dzbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzbc) + self._arrays[array_handle] = dzbc + return dzbc + + @dzbc.setter + def dzbc(self, dzbc): + self.dzbc[...] = dzbc + + @property + def irij(self): + """ + Element irij ftype=real(8) pytype=float + + + Defined at global.fpp line 238 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__irij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + irij = self._arrays[array_handle] + else: + irij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__irij) + self._arrays[array_handle] = irij + return irij + + @irij.setter + def irij(self, irij): + self.irij[...] = irij + + @property + def izij(self): + """ + Element izij ftype=real(8) pytype=float + + + Defined at global.fpp line 238 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__izij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + izij = self._arrays[array_handle] + else: + izij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__izij) + self._arrays[array_handle] = izij + return izij + + @izij.setter + def izij(self, izij): + self.izij[...] = izij + + @property + def drij(self): + """ + Element drij ftype=real(8) pytype=float + + + Defined at global.fpp line 239 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__drij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + drij = self._arrays[array_handle] + else: + drij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__drij) + self._arrays[array_handle] = drij + return drij + + @drij.setter + def drij(self, drij): + self.drij[...] = drij + + @property + def dzij(self): + """ + Element dzij ftype=real(8) pytype=float + + + Defined at global.fpp line 239 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzij = self._arrays[array_handle] + else: + dzij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzij) + self._arrays[array_handle] = dzij + return dzij + + @dzij.setter + def dzij(self, dzij): + self.dzij[...] = dzij + + @property + def trij(self): + """ + Element trij ftype=real(8) pytype=float + + + Defined at global.fpp line 240 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__trij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + trij = self._arrays[array_handle] + else: + trij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__trij) + self._arrays[array_handle] = trij + return trij + + @trij.setter + def trij(self, trij): + self.trij[...] = trij + + @property + def tzij(self): + """ + Element tzij ftype=real(8) pytype=float + + + Defined at global.fpp line 240 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tzij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tzij = self._arrays[array_handle] + else: + tzij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tzij) + self._arrays[array_handle] = tzij + return tzij + + @tzij.setter + def tzij(self, tzij): + self.tzij[...] = tzij + + @property + def ivns(self): + """ + Element ivns ftype=real(8) pytype=float + + + Defined at global.fpp line 241 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ivns(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ivns = self._arrays[array_handle] + else: + ivns = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ivns) + self._arrays[array_handle] = ivns + return ivns + + @ivns.setter + def ivns(self, ivns): + self.ivns[...] = ivns + + @property + def ibns(self): + """ + Element ibns ftype=real(8) pytype=float + + + Defined at global.fpp line 242 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ibns(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ibns = self._arrays[array_handle] + else: + ibns = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ibns) + self._arrays[array_handle] = ibns + return ibns + + @ibns.setter + def ibns(self, ibns): + self.ibns[...] = ibns + + @property + def ivnc(self): + """ + Element ivnc ftype=real(8) pytype=float + + + Defined at global.fpp line 243 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ivnc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ivnc = self._arrays[array_handle] + else: + ivnc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ivnc) + self._arrays[array_handle] = ivnc + return ivnc + + @ivnc.setter + def ivnc(self, ivnc): + self.ivnc[...] = ivnc + + @property + def ibnc(self): + """ + Element ibnc ftype=real(8) pytype=float + + + Defined at global.fpp line 244 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ibnc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ibnc = self._arrays[array_handle] + else: + ibnc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ibnc) + self._arrays[array_handle] = ibnc + return ibnc + + @ibnc.setter + def ibnc(self, ibnc): + self.ibnc[...] = ibnc + + @property + def lrbc(self): + """ + Element lrbc ftype=real(8) pytype=float + + + Defined at global.fpp line 245 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lrbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lrbc = self._arrays[array_handle] + else: + lrbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lrbc) + self._arrays[array_handle] = lrbc + return lrbc + + @lrbc.setter + def lrbc(self, lrbc): + self.lrbc[...] = lrbc + + @property + def lzbs(self): + """ + Element lzbs ftype=real(8) pytype=float + + + Defined at global.fpp line 245 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lzbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lzbs = self._arrays[array_handle] + else: + lzbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lzbs) + self._arrays[array_handle] = lzbs + return lzbs + + @lzbs.setter + def lzbs(self, lzbs): + self.lzbs[...] = lzbs + + @property + def lrbs(self): + """ + Element lrbs ftype=real(8) pytype=float + + + Defined at global.fpp line 246 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lrbs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lrbs = self._arrays[array_handle] + else: + lrbs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lrbs) + self._arrays[array_handle] = lrbs + return lrbs + + @lrbs.setter + def lrbs(self, lrbs): + self.lrbs[...] = lrbs + + @property + def lzbc(self): + """ + Element lzbc ftype=real(8) pytype=float + + + Defined at global.fpp line 246 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lzbc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lzbc = self._arrays[array_handle] + else: + lzbc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lzbc) + self._arrays[array_handle] = lzbc + return lzbc + + @lzbc.setter + def lzbc(self, lzbc): + self.lzbc[...] = lzbc + + @property + def num_modes(self): + """ + Element num_modes ftype=integer pytype=int + + + Defined at global.fpp line 248 + + """ + return _spec.f90wrap_allglobal__get__num_modes() + + @num_modes.setter + def num_modes(self, num_modes): + _spec.f90wrap_allglobal__set__num_modes(num_modes) + + @property + def mmrzrz(self): + """ + Element mmrzrz ftype=integer pytype=int + + + Defined at global.fpp line 249 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__mmrzrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + mmrzrz = self._arrays[array_handle] + else: + mmrzrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__mmrzrz) + self._arrays[array_handle] = mmrzrz + return mmrzrz + + @mmrzrz.setter + def mmrzrz(self, mmrzrz): + self.mmrzrz[...] = mmrzrz + + @property + def nnrzrz(self): + """ + Element nnrzrz ftype=integer pytype=int + + + Defined at global.fpp line 249 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__nnrzrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + nnrzrz = self._arrays[array_handle] + else: + nnrzrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__nnrzrz) + self._arrays[array_handle] = nnrzrz + return nnrzrz + + @nnrzrz.setter + def nnrzrz(self, nnrzrz): + self.nnrzrz[...] = nnrzrz + + @property + def allrzrz(self): + """ + Element allrzrz ftype=real(8) pytype=float + + + Defined at global.fpp line 250 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__allrzrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + allrzrz = self._arrays[array_handle] + else: + allrzrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__allrzrz) + self._arrays[array_handle] = allrzrz + return allrzrz + + @allrzrz.setter + def allrzrz(self, allrzrz): + self.allrzrz[...] = allrzrz + + @property + def nt(self): + """ + Element nt ftype=integer pytype=int + + + Defined at global.fpp line 256 + + """ + return _spec.f90wrap_allglobal__get__nt() + + @nt.setter + def nt(self, nt): + _spec.f90wrap_allglobal__set__nt(nt) + + @property + def nz(self): + """ + Element nz ftype=integer pytype=int + + + Defined at global.fpp line 256 + + """ + return _spec.f90wrap_allglobal__get__nz() + + @nz.setter + def nz(self, nz): + _spec.f90wrap_allglobal__set__nz(nz) + + @property + def ntz(self): + """ + Element ntz ftype=integer pytype=int + + + Defined at global.fpp line 256 + + """ + return _spec.f90wrap_allglobal__get__ntz() + + @ntz.setter + def ntz(self, ntz): + _spec.f90wrap_allglobal__set__ntz(ntz) + + @property + def hnt(self): + """ + Element hnt ftype=integer pytype=int + + + Defined at global.fpp line 256 + + """ + return _spec.f90wrap_allglobal__get__hnt() + + @hnt.setter + def hnt(self, hnt): + _spec.f90wrap_allglobal__set__hnt(hnt) + + @property + def hnz(self): + """ + Element hnz ftype=integer pytype=int + + + Defined at global.fpp line 256 + + """ + return _spec.f90wrap_allglobal__get__hnz() + + @hnz.setter + def hnz(self, hnz): + _spec.f90wrap_allglobal__set__hnz(hnz) + + @property + def sontz(self): + """ + Element sontz ftype=real(8) pytype=float + + + Defined at global.fpp line 257 + + """ + return _spec.f90wrap_allglobal__get__sontz() + + @sontz.setter + def sontz(self, sontz): + _spec.f90wrap_allglobal__set__sontz(sontz) + + @property + def rij(self): + """ + Element rij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__rij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + rij = self._arrays[array_handle] + else: + rij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__rij) + self._arrays[array_handle] = rij + return rij + + @rij.setter + def rij(self, rij): + self.rij[...] = rij + + @property + def zij(self): + """ + Element zij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__zij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + zij = self._arrays[array_handle] + else: + zij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__zij) + self._arrays[array_handle] = zij + return zij + + @zij.setter + def zij(self, zij): + self.zij[...] = zij + + @property + def xij(self): + """ + Element xij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__xij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + xij = self._arrays[array_handle] + else: + xij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__xij) + self._arrays[array_handle] = xij + return xij + + @xij.setter + def xij(self, xij): + self.xij[...] = xij + + @property + def yij(self): + """ + Element yij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__yij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + yij = self._arrays[array_handle] + else: + yij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__yij) + self._arrays[array_handle] = yij + return yij + + @yij.setter + def yij(self, yij): + self.yij[...] = yij + + @property + def sg(self): + """ + Element sg ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__sg(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + sg = self._arrays[array_handle] + else: + sg = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__sg) + self._arrays[array_handle] = sg + return sg + + @sg.setter + def sg(self, sg): + self.sg[...] = sg + + @property + def guvij(self): + """ + Element guvij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__guvij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + guvij = self._arrays[array_handle] + else: + guvij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__guvij) + self._arrays[array_handle] = guvij + return guvij + + @guvij.setter + def guvij(self, guvij): + self.guvij[...] = guvij + + @property + def gvuij(self): + """ + Element gvuij ftype=real(8) pytype=float + + + Defined at global.fpp line 263 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gvuij(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gvuij = self._arrays[array_handle] + else: + gvuij = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gvuij) + self._arrays[array_handle] = gvuij + return gvuij + + @gvuij.setter + def gvuij(self, gvuij): + self.gvuij[...] = gvuij + + @property + def guvijsave(self): + """ + Element guvijsave ftype=real(8) pytype=float + + + Defined at global.fpp line 264 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__guvijsave(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + guvijsave = self._arrays[array_handle] + else: + guvijsave = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__guvijsave) + self._arrays[array_handle] = guvijsave + return guvijsave + + @guvijsave.setter + def guvijsave(self, guvijsave): + self.guvijsave[...] = guvijsave + + @property + def ki(self): + """ + Element ki ftype=integer pytype=int + + + Defined at global.fpp line 265 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ki(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ki = self._arrays[array_handle] + else: + ki = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ki) + self._arrays[array_handle] = ki + return ki + + @ki.setter + def ki(self, ki): + self.ki[...] = ki + + @property + def kijs(self): + """ + Element kijs ftype=integer pytype=int + + + Defined at global.fpp line 265 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__kijs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + kijs = self._arrays[array_handle] + else: + kijs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__kijs) + self._arrays[array_handle] = kijs + return kijs + + @kijs.setter + def kijs(self, kijs): + self.kijs[...] = kijs + + @property + def kija(self): + """ + Element kija ftype=integer pytype=int + + + Defined at global.fpp line 265 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__kija(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + kija = self._arrays[array_handle] + else: + kija = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__kija) + self._arrays[array_handle] = kija + return kija + + @kija.setter + def kija(self, kija): + self.kija[...] = kija + + @property + def iotakkii(self): + """ + Element iotakkii ftype=integer pytype=int + + + Defined at global.fpp line 266 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iotakkii(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iotakkii = self._arrays[array_handle] + else: + iotakkii = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iotakkii) + self._arrays[array_handle] = iotakkii + return iotakkii + + @iotakkii.setter + def iotakkii(self, iotakkii): + self.iotakkii[...] = iotakkii + + @property + def iotaksub(self): + """ + Element iotaksub ftype=integer pytype=int + + + Defined at global.fpp line 266 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iotaksub(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iotaksub = self._arrays[array_handle] + else: + iotaksub = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iotaksub) + self._arrays[array_handle] = iotaksub + return iotaksub + + @iotaksub.setter + def iotaksub(self, iotaksub): + self.iotaksub[...] = iotaksub + + @property + def iotakadd(self): + """ + Element iotakadd ftype=integer pytype=int + + + Defined at global.fpp line 266 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iotakadd(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iotakadd = self._arrays[array_handle] + else: + iotakadd = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iotakadd) + self._arrays[array_handle] = iotakadd + return iotakadd + + @iotakadd.setter + def iotakadd(self, iotakadd): + self.iotakadd[...] = iotakadd + + @property + def iotaksgn(self): + """ + Element iotaksgn ftype=integer pytype=int + + + Defined at global.fpp line 266 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iotaksgn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iotaksgn = self._arrays[array_handle] + else: + iotaksgn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iotaksgn) + self._arrays[array_handle] = iotaksgn + return iotaksgn + + @iotaksgn.setter + def iotaksgn(self, iotaksgn): + self.iotaksgn[...] = iotaksgn + + @property + def efmn(self): + """ + Element efmn ftype=real(8) pytype=float + + + Defined at global.fpp line 267 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__efmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + efmn = self._arrays[array_handle] + else: + efmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__efmn) + self._arrays[array_handle] = efmn + return efmn + + @efmn.setter + def efmn(self, efmn): + self.efmn[...] = efmn + + @property + def ofmn(self): + """ + Element ofmn ftype=real(8) pytype=float + + + Defined at global.fpp line 267 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ofmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ofmn = self._arrays[array_handle] + else: + ofmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ofmn) + self._arrays[array_handle] = ofmn + return ofmn + + @ofmn.setter + def ofmn(self, ofmn): + self.ofmn[...] = ofmn + + @property + def cfmn(self): + """ + Element cfmn ftype=real(8) pytype=float + + + Defined at global.fpp line 267 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__cfmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + cfmn = self._arrays[array_handle] + else: + cfmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__cfmn) + self._arrays[array_handle] = cfmn + return cfmn + + @cfmn.setter + def cfmn(self, cfmn): + self.cfmn[...] = cfmn + + @property + def sfmn(self): + """ + Element sfmn ftype=real(8) pytype=float + + + Defined at global.fpp line 267 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__sfmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + sfmn = self._arrays[array_handle] + else: + sfmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__sfmn) + self._arrays[array_handle] = sfmn + return sfmn + + @sfmn.setter + def sfmn(self, sfmn): + self.sfmn[...] = sfmn + + @property + def evmn(self): + """ + Element evmn ftype=real(8) pytype=float + + + Defined at global.fpp line 268 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__evmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + evmn = self._arrays[array_handle] + else: + evmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__evmn) + self._arrays[array_handle] = evmn + return evmn + + @evmn.setter + def evmn(self, evmn): + self.evmn[...] = evmn + + @property + def odmn(self): + """ + Element odmn ftype=real(8) pytype=float + + + Defined at global.fpp line 268 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__odmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + odmn = self._arrays[array_handle] + else: + odmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__odmn) + self._arrays[array_handle] = odmn + return odmn + + @odmn.setter + def odmn(self, odmn): + self.odmn[...] = odmn + + @property + def comn(self): + """ + Element comn ftype=real(8) pytype=float + + + Defined at global.fpp line 268 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__comn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + comn = self._arrays[array_handle] + else: + comn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__comn) + self._arrays[array_handle] = comn + return comn + + @comn.setter + def comn(self, comn): + self.comn[...] = comn + + @property + def simn(self): + """ + Element simn ftype=real(8) pytype=float + + + Defined at global.fpp line 268 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__simn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + simn = self._arrays[array_handle] + else: + simn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__simn) + self._arrays[array_handle] = simn + return simn + + @simn.setter + def simn(self, simn): + self.simn[...] = simn + + @property + def ijreal(self): + """ + Element ijreal ftype=real(8) pytype=float + + + Defined at global.fpp line 269 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ijreal(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ijreal = self._arrays[array_handle] + else: + ijreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ijreal) + self._arrays[array_handle] = ijreal + return ijreal + + @ijreal.setter + def ijreal(self, ijreal): + self.ijreal[...] = ijreal + + @property + def ijimag(self): + """ + Element ijimag ftype=real(8) pytype=float + + + Defined at global.fpp line 269 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ijimag(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ijimag = self._arrays[array_handle] + else: + ijimag = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ijimag) + self._arrays[array_handle] = ijimag + return ijimag + + @ijimag.setter + def ijimag(self, ijimag): + self.ijimag[...] = ijimag + + @property + def jireal(self): + """ + Element jireal ftype=real(8) pytype=float + + + Defined at global.fpp line 269 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jireal(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jireal = self._arrays[array_handle] + else: + jireal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jireal) + self._arrays[array_handle] = jireal + return jireal + + @jireal.setter + def jireal(self, jireal): + self.jireal[...] = jireal + + @property + def jiimag(self): + """ + Element jiimag ftype=real(8) pytype=float + + + Defined at global.fpp line 269 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jiimag(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jiimag = self._arrays[array_handle] + else: + jiimag = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jiimag) + self._arrays[array_handle] = jiimag + return jiimag + + @jiimag.setter + def jiimag(self, jiimag): + self.jiimag[...] = jiimag + + @property + def jkreal(self): + """ + Element jkreal ftype=real(8) pytype=float + + + Defined at global.fpp line 270 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jkreal(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jkreal = self._arrays[array_handle] + else: + jkreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jkreal) + self._arrays[array_handle] = jkreal + return jkreal + + @jkreal.setter + def jkreal(self, jkreal): + self.jkreal[...] = jkreal + + @property + def jkimag(self): + """ + Element jkimag ftype=real(8) pytype=float + + + Defined at global.fpp line 270 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jkimag(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jkimag = self._arrays[array_handle] + else: + jkimag = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jkimag) + self._arrays[array_handle] = jkimag + return jkimag + + @jkimag.setter + def jkimag(self, jkimag): + self.jkimag[...] = jkimag + + @property + def kjreal(self): + """ + Element kjreal ftype=real(8) pytype=float + + + Defined at global.fpp line 270 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__kjreal(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + kjreal = self._arrays[array_handle] + else: + kjreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__kjreal) + self._arrays[array_handle] = kjreal + return kjreal + + @kjreal.setter + def kjreal(self, kjreal): + self.kjreal[...] = kjreal + + @property + def kjimag(self): + """ + Element kjimag ftype=real(8) pytype=float + + + Defined at global.fpp line 270 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__kjimag(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + kjimag = self._arrays[array_handle] + else: + kjimag = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__kjimag) + self._arrays[array_handle] = kjimag + return kjimag + + @kjimag.setter + def kjimag(self, kjimag): + self.kjimag[...] = kjimag + + @property + def bsupumn(self): + """ + Element bsupumn ftype=real(8) pytype=float + + + Defined at global.fpp line 271 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bsupumn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bsupumn = self._arrays[array_handle] + else: + bsupumn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bsupumn) + self._arrays[array_handle] = bsupumn + return bsupumn + + @bsupumn.setter + def bsupumn(self, bsupumn): + self.bsupumn[...] = bsupumn + + @property + def bsupvmn(self): + """ + Element bsupvmn ftype=real(8) pytype=float + + + Defined at global.fpp line 271 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bsupvmn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bsupvmn = self._arrays[array_handle] + else: + bsupvmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bsupvmn) + self._arrays[array_handle] = bsupvmn + return bsupvmn + + @bsupvmn.setter + def bsupvmn(self, bsupvmn): + self.bsupvmn[...] = bsupvmn + + @property + def goomne(self): + """ + Element goomne ftype=real(8) pytype=float + + + Defined at global.fpp line 273 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__goomne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + goomne = self._arrays[array_handle] + else: + goomne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__goomne) + self._arrays[array_handle] = goomne + return goomne + + @goomne.setter + def goomne(self, goomne): + self.goomne[...] = goomne + + @property + def goomno(self): + """ + Element goomno ftype=real(8) pytype=float + + + Defined at global.fpp line 273 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__goomno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + goomno = self._arrays[array_handle] + else: + goomno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__goomno) + self._arrays[array_handle] = goomno + return goomno + + @goomno.setter + def goomno(self, goomno): + self.goomno[...] = goomno + + @property + def gssmne(self): + """ + Element gssmne ftype=real(8) pytype=float + + + Defined at global.fpp line 274 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gssmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gssmne = self._arrays[array_handle] + else: + gssmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gssmne) + self._arrays[array_handle] = gssmne + return gssmne + + @gssmne.setter + def gssmne(self, gssmne): + self.gssmne[...] = gssmne + + @property + def gssmno(self): + """ + Element gssmno ftype=real(8) pytype=float + + + Defined at global.fpp line 274 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gssmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gssmno = self._arrays[array_handle] + else: + gssmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gssmno) + self._arrays[array_handle] = gssmno + return gssmno + + @gssmno.setter + def gssmno(self, gssmno): + self.gssmno[...] = gssmno + + @property + def gstmne(self): + """ + Element gstmne ftype=real(8) pytype=float + + + Defined at global.fpp line 275 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gstmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gstmne = self._arrays[array_handle] + else: + gstmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gstmne) + self._arrays[array_handle] = gstmne + return gstmne + + @gstmne.setter + def gstmne(self, gstmne): + self.gstmne[...] = gstmne + + @property + def gstmno(self): + """ + Element gstmno ftype=real(8) pytype=float + + + Defined at global.fpp line 275 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gstmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gstmno = self._arrays[array_handle] + else: + gstmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gstmno) + self._arrays[array_handle] = gstmno + return gstmno + + @gstmno.setter + def gstmno(self, gstmno): + self.gstmno[...] = gstmno + + @property + def gszmne(self): + """ + Element gszmne ftype=real(8) pytype=float + + + Defined at global.fpp line 276 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gszmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gszmne = self._arrays[array_handle] + else: + gszmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gszmne) + self._arrays[array_handle] = gszmne + return gszmne + + @gszmne.setter + def gszmne(self, gszmne): + self.gszmne[...] = gszmne + + @property + def gszmno(self): + """ + Element gszmno ftype=real(8) pytype=float + + + Defined at global.fpp line 276 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gszmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gszmno = self._arrays[array_handle] + else: + gszmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gszmno) + self._arrays[array_handle] = gszmno + return gszmno + + @gszmno.setter + def gszmno(self, gszmno): + self.gszmno[...] = gszmno + + @property + def gttmne(self): + """ + Element gttmne ftype=real(8) pytype=float + + + Defined at global.fpp line 277 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gttmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gttmne = self._arrays[array_handle] + else: + gttmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gttmne) + self._arrays[array_handle] = gttmne + return gttmne + + @gttmne.setter + def gttmne(self, gttmne): + self.gttmne[...] = gttmne + + @property + def gttmno(self): + """ + Element gttmno ftype=real(8) pytype=float + + + Defined at global.fpp line 277 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gttmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gttmno = self._arrays[array_handle] + else: + gttmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gttmno) + self._arrays[array_handle] = gttmno + return gttmno + + @gttmno.setter + def gttmno(self, gttmno): + self.gttmno[...] = gttmno + + @property + def gtzmne(self): + """ + Element gtzmne ftype=real(8) pytype=float + + + Defined at global.fpp line 278 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gtzmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gtzmne = self._arrays[array_handle] + else: + gtzmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gtzmne) + self._arrays[array_handle] = gtzmne + return gtzmne + + @gtzmne.setter + def gtzmne(self, gtzmne): + self.gtzmne[...] = gtzmne + + @property + def gtzmno(self): + """ + Element gtzmno ftype=real(8) pytype=float + + + Defined at global.fpp line 278 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gtzmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gtzmno = self._arrays[array_handle] + else: + gtzmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gtzmno) + self._arrays[array_handle] = gtzmno + return gtzmno + + @gtzmno.setter + def gtzmno(self, gtzmno): + self.gtzmno[...] = gtzmno + + @property + def gzzmne(self): + """ + Element gzzmne ftype=real(8) pytype=float + + + Defined at global.fpp line 279 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gzzmne(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gzzmne = self._arrays[array_handle] + else: + gzzmne = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gzzmne) + self._arrays[array_handle] = gzzmne + return gzzmne + + @gzzmne.setter + def gzzmne(self, gzzmne): + self.gzzmne[...] = gzzmne + + @property + def gzzmno(self): + """ + Element gzzmno ftype=real(8) pytype=float + + + Defined at global.fpp line 279 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gzzmno(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gzzmno = self._arrays[array_handle] + else: + gzzmno = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gzzmno) + self._arrays[array_handle] = gzzmno + return gzzmno + + @gzzmno.setter + def gzzmno(self, gzzmno): + self.gzzmno[...] = gzzmno + + @property + def dtoocc(self): + """ + Element dtoocc ftype=real(8) pytype=float + + + Defined at global.fpp line 291 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtoocc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtoocc = self._arrays[array_handle] + else: + dtoocc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtoocc) + self._arrays[array_handle] = dtoocc + return dtoocc + + @dtoocc.setter + def dtoocc(self, dtoocc): + self.dtoocc[...] = dtoocc + + @property + def dtoocs(self): + """ + Element dtoocs ftype=real(8) pytype=float + + + Defined at global.fpp line 291 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtoocs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtoocs = self._arrays[array_handle] + else: + dtoocs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtoocs) + self._arrays[array_handle] = dtoocs + return dtoocs + + @dtoocs.setter + def dtoocs(self, dtoocs): + self.dtoocs[...] = dtoocs + + @property + def dtoosc(self): + """ + Element dtoosc ftype=real(8) pytype=float + + + Defined at global.fpp line 291 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtoosc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtoosc = self._arrays[array_handle] + else: + dtoosc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtoosc) + self._arrays[array_handle] = dtoosc + return dtoosc + + @dtoosc.setter + def dtoosc(self, dtoosc): + self.dtoosc[...] = dtoosc + + @property + def dtooss(self): + """ + Element dtooss ftype=real(8) pytype=float + + + Defined at global.fpp line 291 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtooss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtooss = self._arrays[array_handle] + else: + dtooss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtooss) + self._arrays[array_handle] = dtooss + return dtooss + + @dtooss.setter + def dtooss(self, dtooss): + self.dtooss[...] = dtooss + + @property + def ttsscc(self): + """ + Element ttsscc ftype=real(8) pytype=float + + + Defined at global.fpp line 292 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ttsscc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ttsscc = self._arrays[array_handle] + else: + ttsscc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ttsscc) + self._arrays[array_handle] = ttsscc + return ttsscc + + @ttsscc.setter + def ttsscc(self, ttsscc): + self.ttsscc[...] = ttsscc + + @property + def ttsscs(self): + """ + Element ttsscs ftype=real(8) pytype=float + + + Defined at global.fpp line 292 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ttsscs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ttsscs = self._arrays[array_handle] + else: + ttsscs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ttsscs) + self._arrays[array_handle] = ttsscs + return ttsscs + + @ttsscs.setter + def ttsscs(self, ttsscs): + self.ttsscs[...] = ttsscs + + @property + def ttsssc(self): + """ + Element ttsssc ftype=real(8) pytype=float + + + Defined at global.fpp line 292 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ttsssc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ttsssc = self._arrays[array_handle] + else: + ttsssc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ttsssc) + self._arrays[array_handle] = ttsssc + return ttsssc + + @ttsssc.setter + def ttsssc(self, ttsssc): + self.ttsssc[...] = ttsssc + + @property + def ttssss(self): + """ + Element ttssss ftype=real(8) pytype=float + + + Defined at global.fpp line 292 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ttssss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ttssss = self._arrays[array_handle] + else: + ttssss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ttssss) + self._arrays[array_handle] = ttssss + return ttssss + + @ttssss.setter + def ttssss(self, ttssss): + self.ttssss[...] = ttssss + + @property + def tdstcc(self): + """ + Element tdstcc ftype=real(8) pytype=float + + + Defined at global.fpp line 293 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdstcc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdstcc = self._arrays[array_handle] + else: + tdstcc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdstcc) + self._arrays[array_handle] = tdstcc + return tdstcc + + @tdstcc.setter + def tdstcc(self, tdstcc): + self.tdstcc[...] = tdstcc + + @property + def tdstcs(self): + """ + Element tdstcs ftype=real(8) pytype=float + + + Defined at global.fpp line 293 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdstcs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdstcs = self._arrays[array_handle] + else: + tdstcs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdstcs) + self._arrays[array_handle] = tdstcs + return tdstcs + + @tdstcs.setter + def tdstcs(self, tdstcs): + self.tdstcs[...] = tdstcs + + @property + def tdstsc(self): + """ + Element tdstsc ftype=real(8) pytype=float + + + Defined at global.fpp line 293 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdstsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdstsc = self._arrays[array_handle] + else: + tdstsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdstsc) + self._arrays[array_handle] = tdstsc + return tdstsc + + @tdstsc.setter + def tdstsc(self, tdstsc): + self.tdstsc[...] = tdstsc + + @property + def tdstss(self): + """ + Element tdstss ftype=real(8) pytype=float + + + Defined at global.fpp line 293 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdstss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdstss = self._arrays[array_handle] + else: + tdstss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdstss) + self._arrays[array_handle] = tdstss + return tdstss + + @tdstss.setter + def tdstss(self, tdstss): + self.tdstss[...] = tdstss + + @property + def tdszcc(self): + """ + Element tdszcc ftype=real(8) pytype=float + + + Defined at global.fpp line 294 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdszcc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdszcc = self._arrays[array_handle] + else: + tdszcc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdszcc) + self._arrays[array_handle] = tdszcc + return tdszcc + + @tdszcc.setter + def tdszcc(self, tdszcc): + self.tdszcc[...] = tdszcc + + @property + def tdszcs(self): + """ + Element tdszcs ftype=real(8) pytype=float + + + Defined at global.fpp line 294 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdszcs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdszcs = self._arrays[array_handle] + else: + tdszcs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdszcs) + self._arrays[array_handle] = tdszcs + return tdszcs + + @tdszcs.setter + def tdszcs(self, tdszcs): + self.tdszcs[...] = tdszcs + + @property + def tdszsc(self): + """ + Element tdszsc ftype=real(8) pytype=float + + + Defined at global.fpp line 294 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdszsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdszsc = self._arrays[array_handle] + else: + tdszsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdszsc) + self._arrays[array_handle] = tdszsc + return tdszsc + + @tdszsc.setter + def tdszsc(self, tdszsc): + self.tdszsc[...] = tdszsc + + @property + def tdszss(self): + """ + Element tdszss ftype=real(8) pytype=float + + + Defined at global.fpp line 294 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tdszss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tdszss = self._arrays[array_handle] + else: + tdszss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tdszss) + self._arrays[array_handle] = tdszss + return tdszss + + @tdszss.setter + def tdszss(self, tdszss): + self.tdszss[...] = tdszss + + @property + def ddttcc(self): + """ + Element ddttcc ftype=real(8) pytype=float + + + Defined at global.fpp line 295 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddttcc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddttcc = self._arrays[array_handle] + else: + ddttcc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddttcc) + self._arrays[array_handle] = ddttcc + return ddttcc + + @ddttcc.setter + def ddttcc(self, ddttcc): + self.ddttcc[...] = ddttcc + + @property + def ddttcs(self): + """ + Element ddttcs ftype=real(8) pytype=float + + + Defined at global.fpp line 295 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddttcs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddttcs = self._arrays[array_handle] + else: + ddttcs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddttcs) + self._arrays[array_handle] = ddttcs + return ddttcs + + @ddttcs.setter + def ddttcs(self, ddttcs): + self.ddttcs[...] = ddttcs + + @property + def ddttsc(self): + """ + Element ddttsc ftype=real(8) pytype=float + + + Defined at global.fpp line 295 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddttsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddttsc = self._arrays[array_handle] + else: + ddttsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddttsc) + self._arrays[array_handle] = ddttsc + return ddttsc + + @ddttsc.setter + def ddttsc(self, ddttsc): + self.ddttsc[...] = ddttsc + + @property + def ddttss(self): + """ + Element ddttss ftype=real(8) pytype=float + + + Defined at global.fpp line 295 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddttss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddttss = self._arrays[array_handle] + else: + ddttss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddttss) + self._arrays[array_handle] = ddttss + return ddttss + + @ddttss.setter + def ddttss(self, ddttss): + self.ddttss[...] = ddttss + + @property + def ddtzcc(self): + """ + Element ddtzcc ftype=real(8) pytype=float + + + Defined at global.fpp line 296 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddtzcc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddtzcc = self._arrays[array_handle] + else: + ddtzcc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddtzcc) + self._arrays[array_handle] = ddtzcc + return ddtzcc + + @ddtzcc.setter + def ddtzcc(self, ddtzcc): + self.ddtzcc[...] = ddtzcc + + @property + def ddtzcs(self): + """ + Element ddtzcs ftype=real(8) pytype=float + + + Defined at global.fpp line 296 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddtzcs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddtzcs = self._arrays[array_handle] + else: + ddtzcs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddtzcs) + self._arrays[array_handle] = ddtzcs + return ddtzcs + + @ddtzcs.setter + def ddtzcs(self, ddtzcs): + self.ddtzcs[...] = ddtzcs + + @property + def ddtzsc(self): + """ + Element ddtzsc ftype=real(8) pytype=float + + + Defined at global.fpp line 296 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddtzsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddtzsc = self._arrays[array_handle] + else: + ddtzsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddtzsc) + self._arrays[array_handle] = ddtzsc + return ddtzsc + + @ddtzsc.setter + def ddtzsc(self, ddtzsc): + self.ddtzsc[...] = ddtzsc + + @property + def ddtzss(self): + """ + Element ddtzss ftype=real(8) pytype=float + + + Defined at global.fpp line 296 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddtzss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddtzss = self._arrays[array_handle] + else: + ddtzss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddtzss) + self._arrays[array_handle] = ddtzss + return ddtzss + + @ddtzss.setter + def ddtzss(self, ddtzss): + self.ddtzss[...] = ddtzss + + @property + def ddzzcc(self): + """ + Element ddzzcc ftype=real(8) pytype=float + + + Defined at global.fpp line 297 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddzzcc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddzzcc = self._arrays[array_handle] + else: + ddzzcc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddzzcc) + self._arrays[array_handle] = ddzzcc + return ddzzcc + + @ddzzcc.setter + def ddzzcc(self, ddzzcc): + self.ddzzcc[...] = ddzzcc + + @property + def ddzzcs(self): + """ + Element ddzzcs ftype=real(8) pytype=float + + + Defined at global.fpp line 297 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddzzcs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddzzcs = self._arrays[array_handle] + else: + ddzzcs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddzzcs) + self._arrays[array_handle] = ddzzcs + return ddzzcs + + @ddzzcs.setter + def ddzzcs(self, ddzzcs): + self.ddzzcs[...] = ddzzcs + + @property + def ddzzsc(self): + """ + Element ddzzsc ftype=real(8) pytype=float + + + Defined at global.fpp line 297 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddzzsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddzzsc = self._arrays[array_handle] + else: + ddzzsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddzzsc) + self._arrays[array_handle] = ddzzsc + return ddzzsc + + @ddzzsc.setter + def ddzzsc(self, ddzzsc): + self.ddzzsc[...] = ddzzsc + + @property + def ddzzss(self): + """ + Element ddzzss ftype=real(8) pytype=float + + + Defined at global.fpp line 297 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ddzzss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ddzzss = self._arrays[array_handle] + else: + ddzzss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ddzzss) + self._arrays[array_handle] = ddzzss + return ddzzss + + @ddzzss.setter + def ddzzss(self, ddzzss): + self.ddzzss[...] = ddzzss + + @property + def tsc(self): + """ + Element tsc ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tsc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tsc = self._arrays[array_handle] + else: + tsc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tsc) + self._arrays[array_handle] = tsc + return tsc + + @tsc.setter + def tsc(self, tsc): + self.tsc[...] = tsc + + @property + def tss(self): + """ + Element tss ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tss(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tss = self._arrays[array_handle] + else: + tss = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tss) + self._arrays[array_handle] = tss + return tss + + @tss.setter + def tss(self, tss): + self.tss[...] = tss + + @property + def dtc(self): + """ + Element dtc ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtc = self._arrays[array_handle] + else: + dtc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtc) + self._arrays[array_handle] = dtc + return dtc + + @dtc.setter + def dtc(self, dtc): + self.dtc[...] = dtc + + @property + def dts(self): + """ + Element dts ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dts(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dts = self._arrays[array_handle] + else: + dts = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dts) + self._arrays[array_handle] = dts + return dts + + @dts.setter + def dts(self, dts): + self.dts[...] = dts + + @property + def dzc(self): + """ + Element dzc ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzc = self._arrays[array_handle] + else: + dzc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzc) + self._arrays[array_handle] = dzc + return dzc + + @dzc.setter + def dzc(self, dzc): + self.dzc[...] = dzc + + @property + def dzs(self): + """ + Element dzs ftype=real(8) pytype=float + + + Defined at global.fpp line 299 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzs = self._arrays[array_handle] + else: + dzs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzs) + self._arrays[array_handle] = dzs + return dzs + + @dzs.setter + def dzs(self, dzs): + self.dzs[...] = dzs + + @property + def ttc(self): + """ + Element ttc ftype=real(8) pytype=float + + + Defined at global.fpp line 300 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ttc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ttc = self._arrays[array_handle] + else: + ttc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ttc) + self._arrays[array_handle] = ttc + return ttc + + @ttc.setter + def ttc(self, ttc): + self.ttc[...] = ttc + + @property + def tzc(self): + """ + Element tzc ftype=real(8) pytype=float + + + Defined at global.fpp line 300 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tzc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tzc = self._arrays[array_handle] + else: + tzc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tzc) + self._arrays[array_handle] = tzc + return tzc + + @tzc.setter + def tzc(self, tzc): + self.tzc[...] = tzc + + @property + def tts(self): + """ + Element tts ftype=real(8) pytype=float + + + Defined at global.fpp line 300 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tts(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tts = self._arrays[array_handle] + else: + tts = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tts) + self._arrays[array_handle] = tts + return tts + + @tts.setter + def tts(self, tts): + self.tts[...] = tts + + @property + def tzs(self): + """ + Element tzs ftype=real(8) pytype=float + + + Defined at global.fpp line 300 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tzs(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tzs = self._arrays[array_handle] + else: + tzs = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tzs) + self._arrays[array_handle] = tzs + return tzs + + @tzs.setter + def tzs(self, tzs): + self.tzs[...] = tzs + + @property + def dtflux(self): + """ + Element dtflux ftype=real(8) pytype=float + + + Defined at global.fpp line 302 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dtflux(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dtflux = self._arrays[array_handle] + else: + dtflux = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dtflux) + self._arrays[array_handle] = dtflux + return dtflux + + @dtflux.setter + def dtflux(self, dtflux): + self.dtflux[...] = dtflux + + @property + def dpflux(self): + """ + Element dpflux ftype=real(8) pytype=float + + + Defined at global.fpp line 302 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dpflux(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dpflux = self._arrays[array_handle] + else: + dpflux = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dpflux) + self._arrays[array_handle] = dpflux + return dpflux + + @dpflux.setter + def dpflux(self, dpflux): + self.dpflux[...] = dpflux + + @property + def sweight(self): + """ + Element sweight ftype=real(8) pytype=float + + + Defined at global.fpp line 304 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__sweight(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + sweight = self._arrays[array_handle] + else: + sweight = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__sweight) + self._arrays[array_handle] = sweight + return sweight + + @sweight.setter + def sweight(self, sweight): + self.sweight[...] = sweight + + @property + def nadof(self): + """ + Element nadof ftype=integer pytype=int + + + Defined at global.fpp line 310 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__nadof(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + nadof = self._arrays[array_handle] + else: + nadof = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__nadof) + self._arrays[array_handle] = nadof + return nadof + + @nadof.setter + def nadof(self, nadof): + self.nadof[...] = nadof + + @property + def nfielddof(self): + """ + Element nfielddof ftype=integer pytype=int + + + Defined at global.fpp line 311 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__nfielddof(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + nfielddof = self._arrays[array_handle] + else: + nfielddof = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__nfielddof) + self._arrays[array_handle] = nfielddof + return nfielddof + + @nfielddof.setter + def nfielddof(self, nfielddof): + self.nfielddof[...] = nfielddof + + @property + def lma(self): + """ + Element lma ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lma(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lma = self._arrays[array_handle] + else: + lma = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lma) + self._arrays[array_handle] = lma + return lma + + @lma.setter + def lma(self, lma): + self.lma[...] = lma + + @property + def lmb(self): + """ + Element lmb ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmb(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmb = self._arrays[array_handle] + else: + lmb = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmb) + self._arrays[array_handle] = lmb + return lmb + + @lmb.setter + def lmb(self, lmb): + self.lmb[...] = lmb + + @property + def lmc(self): + """ + Element lmc ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmc(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmc = self._arrays[array_handle] + else: + lmc = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmc) + self._arrays[array_handle] = lmc + return lmc + + @lmc.setter + def lmc(self, lmc): + self.lmc[...] = lmc + + @property + def lmd(self): + """ + Element lmd ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmd(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmd = self._arrays[array_handle] + else: + lmd = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmd) + self._arrays[array_handle] = lmd + return lmd + + @lmd.setter + def lmd(self, lmd): + self.lmd[...] = lmd + + @property + def lme(self): + """ + Element lme ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lme(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lme = self._arrays[array_handle] + else: + lme = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lme) + self._arrays[array_handle] = lme + return lme + + @lme.setter + def lme(self, lme): + self.lme[...] = lme + + @property + def lmf(self): + """ + Element lmf ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmf(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmf = self._arrays[array_handle] + else: + lmf = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmf) + self._arrays[array_handle] = lmf + return lmf + + @lmf.setter + def lmf(self, lmf): + self.lmf[...] = lmf + + @property + def lmg(self): + """ + Element lmg ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmg(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmg = self._arrays[array_handle] + else: + lmg = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmg) + self._arrays[array_handle] = lmg + return lmg + + @lmg.setter + def lmg(self, lmg): + self.lmg[...] = lmg + + @property + def lmh(self): + """ + Element lmh ftype=integer pytype=int + + + Defined at global.fpp line 328 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmh(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmh = self._arrays[array_handle] + else: + lmh = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmh) + self._arrays[array_handle] = lmh + return lmh + + @lmh.setter + def lmh(self, lmh): + self.lmh[...] = lmh + + @property + def lmavalue(self): + """ + Element lmavalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmavalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmavalue = self._arrays[array_handle] + else: + lmavalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmavalue) + self._arrays[array_handle] = lmavalue + return lmavalue + + @lmavalue.setter + def lmavalue(self, lmavalue): + self.lmavalue[...] = lmavalue + + @property + def lmbvalue(self): + """ + Element lmbvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmbvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmbvalue = self._arrays[array_handle] + else: + lmbvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmbvalue) + self._arrays[array_handle] = lmbvalue + return lmbvalue + + @lmbvalue.setter + def lmbvalue(self, lmbvalue): + self.lmbvalue[...] = lmbvalue + + @property + def lmcvalue(self): + """ + Element lmcvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmcvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmcvalue = self._arrays[array_handle] + else: + lmcvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmcvalue) + self._arrays[array_handle] = lmcvalue + return lmcvalue + + @lmcvalue.setter + def lmcvalue(self, lmcvalue): + self.lmcvalue[...] = lmcvalue + + @property + def lmdvalue(self): + """ + Element lmdvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmdvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmdvalue = self._arrays[array_handle] + else: + lmdvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmdvalue) + self._arrays[array_handle] = lmdvalue + return lmdvalue + + @lmdvalue.setter + def lmdvalue(self, lmdvalue): + self.lmdvalue[...] = lmdvalue + + @property + def lmevalue(self): + """ + Element lmevalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmevalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmevalue = self._arrays[array_handle] + else: + lmevalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmevalue) + self._arrays[array_handle] = lmevalue + return lmevalue + + @lmevalue.setter + def lmevalue(self, lmevalue): + self.lmevalue[...] = lmevalue + + @property + def lmfvalue(self): + """ + Element lmfvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 329 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmfvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmfvalue = self._arrays[array_handle] + else: + lmfvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmfvalue) + self._arrays[array_handle] = lmfvalue + return lmfvalue + + @lmfvalue.setter + def lmfvalue(self, lmfvalue): + self.lmfvalue[...] = lmfvalue + + @property + def lmgvalue(self): + """ + Element lmgvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 330 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmgvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmgvalue = self._arrays[array_handle] + else: + lmgvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmgvalue) + self._arrays[array_handle] = lmgvalue + return lmgvalue + + @lmgvalue.setter + def lmgvalue(self, lmgvalue): + self.lmgvalue[...] = lmgvalue + + @property + def lmhvalue(self): + """ + Element lmhvalue ftype=real(8) pytype=float + + + Defined at global.fpp line 330 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lmhvalue(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lmhvalue = self._arrays[array_handle] + else: + lmhvalue = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lmhvalue) + self._arrays[array_handle] = lmhvalue + return lmhvalue + + @lmhvalue.setter + def lmhvalue(self, lmhvalue): + self.lmhvalue[...] = lmhvalue + + @property + def fso(self): + """ + Element fso ftype=integer pytype=int + + + Defined at global.fpp line 333 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__fso(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + fso = self._arrays[array_handle] + else: + fso = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__fso) + self._arrays[array_handle] = fso + return fso + + @fso.setter + def fso(self, fso): + self.fso[...] = fso + + @property + def fse(self): + """ + Element fse ftype=integer pytype=int + + + Defined at global.fpp line 333 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__fse(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + fse = self._arrays[array_handle] + else: + fse = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__fse) + self._arrays[array_handle] = fse + return fse + + @fse.setter + def fse(self, fse): + self.fse[...] = fse + + @property + def lcoordinatesingularity(self): + """ + Element lcoordinatesingularity ftype=logical pytype=bool + + + Defined at global.fpp line 334 + + """ + return _spec.f90wrap_allglobal__get__lcoordinatesingularity() + + @lcoordinatesingularity.setter + def lcoordinatesingularity(self, lcoordinatesingularity): + _spec.f90wrap_allglobal__set__lcoordinatesingularity(lcoordinatesingularity) + + @property + def lplasmaregion(self): + """ + Element lplasmaregion ftype=logical pytype=bool + + + Defined at global.fpp line 334 + + """ + return _spec.f90wrap_allglobal__get__lplasmaregion() + + @lplasmaregion.setter + def lplasmaregion(self, lplasmaregion): + _spec.f90wrap_allglobal__set__lplasmaregion(lplasmaregion) + + @property + def lvacuumregion(self): + """ + Element lvacuumregion ftype=logical pytype=bool + + + Defined at global.fpp line 334 + + """ + return _spec.f90wrap_allglobal__get__lvacuumregion() + + @lvacuumregion.setter + def lvacuumregion(self, lvacuumregion): + _spec.f90wrap_allglobal__set__lvacuumregion(lvacuumregion) + + @property + def lsavedguvij(self): + """ + Element lsavedguvij ftype=logical pytype=bool + + + Defined at global.fpp line 335 + + """ + return _spec.f90wrap_allglobal__get__lsavedguvij() + + @lsavedguvij.setter + def lsavedguvij(self, lsavedguvij): + _spec.f90wrap_allglobal__set__lsavedguvij(lsavedguvij) + + @property + def localconstraint(self): + """ + Element localconstraint ftype=logical pytype=bool + + + Defined at global.fpp line 336 + + """ + return _spec.f90wrap_allglobal__get__localconstraint() + + @localconstraint.setter + def localconstraint(self, localconstraint): + _spec.f90wrap_allglobal__set__localconstraint(localconstraint) + + @property + def dma(self): + """ + Element dma ftype=real(8) pytype=float + + + Defined at global.fpp line 347 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dma(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dma = self._arrays[array_handle] + else: + dma = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dma) + self._arrays[array_handle] = dma + return dma + + @dma.setter + def dma(self, dma): + self.dma[...] = dma + + @property + def dmb(self): + """ + Element dmb ftype=real(8) pytype=float + + + Defined at global.fpp line 347 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmb(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmb = self._arrays[array_handle] + else: + dmb = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmb) + self._arrays[array_handle] = dmb + return dmb + + @dmb.setter + def dmb(self, dmb): + self.dmb[...] = dmb + + @property + def dmd(self): + """ + Element dmd ftype=real(8) pytype=float + + + Defined at global.fpp line 348 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmd(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmd = self._arrays[array_handle] + else: + dmd = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmd) + self._arrays[array_handle] = dmd + return dmd + + @dmd.setter + def dmd(self, dmd): + self.dmd[...] = dmd + + @property + def dmas(self): + """ + Element dmas ftype=real(8) pytype=float + + + Defined at global.fpp line 349 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmas(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmas = self._arrays[array_handle] + else: + dmas = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmas) + self._arrays[array_handle] = dmas + return dmas + + @dmas.setter + def dmas(self, dmas): + self.dmas[...] = dmas + + @property + def dmds(self): + """ + Element dmds ftype=real(8) pytype=float + + + Defined at global.fpp line 349 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmds(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmds = self._arrays[array_handle] + else: + dmds = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmds) + self._arrays[array_handle] = dmds + return dmds + + @dmds.setter + def dmds(self, dmds): + self.dmds[...] = dmds + + @property + def idmas(self): + """ + Element idmas ftype=integer pytype=int + + + Defined at global.fpp line 350 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__idmas(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + idmas = self._arrays[array_handle] + else: + idmas = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__idmas) + self._arrays[array_handle] = idmas + return idmas + + @idmas.setter + def idmas(self, idmas): + self.idmas[...] = idmas + + @property + def jdmas(self): + """ + Element jdmas ftype=integer pytype=int + + + Defined at global.fpp line 350 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jdmas(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jdmas = self._arrays[array_handle] + else: + jdmas = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jdmas) + self._arrays[array_handle] = jdmas + return jdmas + + @jdmas.setter + def jdmas(self, jdmas): + self.jdmas[...] = jdmas + + @property + def ndmasmax(self): + """ + Element ndmasmax ftype=integer pytype=int + + + Defined at global.fpp line 351 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ndmasmax(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ndmasmax = self._arrays[array_handle] + else: + ndmasmax = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ndmasmax) + self._arrays[array_handle] = ndmasmax + return ndmasmax + + @ndmasmax.setter + def ndmasmax(self, ndmasmax): + self.ndmasmax[...] = ndmasmax + + @property + def ndmas(self): + """ + Element ndmas ftype=integer pytype=int + + + Defined at global.fpp line 351 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ndmas(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ndmas = self._arrays[array_handle] + else: + ndmas = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ndmas) + self._arrays[array_handle] = ndmas + return ndmas + + @ndmas.setter + def ndmas(self, ndmas): + self.ndmas[...] = ndmas + + @property + def dmg(self): + """ + Element dmg ftype=real(8) pytype=float + + + Defined at global.fpp line 352 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmg(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmg = self._arrays[array_handle] + else: + dmg = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmg) + self._arrays[array_handle] = dmg + return dmg + + @dmg.setter + def dmg(self, dmg): + self.dmg[...] = dmg + + @property + def solution(self): + """ + Element solution ftype=real(8) pytype=float + + + Defined at global.fpp line 353 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__solution(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + solution = self._arrays[array_handle] + else: + solution = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__solution) + self._arrays[array_handle] = solution + return solution + + @solution.setter + def solution(self, solution): + self.solution[...] = solution + + @property + def gmreslastsolution(self): + """ + Element gmreslastsolution ftype=real(8) pytype=float + + + Defined at global.fpp line 354 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gmreslastsolution(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gmreslastsolution = self._arrays[array_handle] + else: + gmreslastsolution = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gmreslastsolution) + self._arrays[array_handle] = gmreslastsolution + return gmreslastsolution + + @gmreslastsolution.setter + def gmreslastsolution(self, gmreslastsolution): + self.gmreslastsolution[...] = gmreslastsolution + + @property + def mbpsi(self): + """ + Element mbpsi ftype=real(8) pytype=float + + + Defined at global.fpp line 356 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__mbpsi(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + mbpsi = self._arrays[array_handle] + else: + mbpsi = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__mbpsi) + self._arrays[array_handle] = mbpsi + return mbpsi + + @mbpsi.setter + def mbpsi(self, mbpsi): + self.mbpsi[...] = mbpsi + + @property + def liluprecond(self): + """ + Element liluprecond ftype=logical pytype=bool + + + Defined at global.fpp line 359 + + """ + return _spec.f90wrap_allglobal__get__liluprecond() + + @liluprecond.setter + def liluprecond(self, liluprecond): + _spec.f90wrap_allglobal__set__liluprecond(liluprecond) + + @property + def beltramiinverse(self): + """ + Element beltramiinverse ftype=real(8) pytype=float + + + Defined at global.fpp line 360 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__beltramiinverse(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + beltramiinverse = self._arrays[array_handle] + else: + beltramiinverse = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__beltramiinverse) + self._arrays[array_handle] = beltramiinverse + return beltramiinverse + + @beltramiinverse.setter + def beltramiinverse(self, beltramiinverse): + self.beltramiinverse[...] = beltramiinverse + + @property + def diotadxup(self): + """ + Element diotadxup ftype=real(8) pytype=float + + + Defined at global.fpp line 362 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__diotadxup(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + diotadxup = self._arrays[array_handle] + else: + diotadxup = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__diotadxup) + self._arrays[array_handle] = diotadxup + return diotadxup + + @diotadxup.setter + def diotadxup(self, diotadxup): + self.diotadxup[...] = diotadxup + + @property + def ditgpdxtp(self): + """ + Element ditgpdxtp ftype=real(8) pytype=float + + + Defined at global.fpp line 363 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ditgpdxtp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ditgpdxtp = self._arrays[array_handle] + else: + ditgpdxtp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ditgpdxtp) + self._arrays[array_handle] = ditgpdxtp + return ditgpdxtp + + @ditgpdxtp.setter + def ditgpdxtp(self, ditgpdxtp): + self.ditgpdxtp[...] = ditgpdxtp + + @property + def glambda(self): + """ + Element glambda ftype=real(8) pytype=float + + + Defined at global.fpp line 364 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__glambda(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + glambda = self._arrays[array_handle] + else: + glambda = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__glambda) + self._arrays[array_handle] = glambda + return glambda + + @glambda.setter + def glambda(self, glambda): + self.glambda[...] = glambda + + @property + def lmns(self): + """ + Element lmns ftype=integer pytype=int + + + Defined at global.fpp line 365 + + """ + return _spec.f90wrap_allglobal__get__lmns() + + @lmns.setter + def lmns(self, lmns): + _spec.f90wrap_allglobal__set__lmns(lmns) + + @property + def bemn(self): + """ + Element bemn ftype=real(8) pytype=float + + + Defined at global.fpp line 371 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bemn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bemn = self._arrays[array_handle] + else: + bemn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bemn) + self._arrays[array_handle] = bemn + return bemn + + @bemn.setter + def bemn(self, bemn): + self.bemn[...] = bemn + + @property + def iomn(self): + """ + Element iomn ftype=real(8) pytype=float + + + Defined at global.fpp line 371 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iomn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iomn = self._arrays[array_handle] + else: + iomn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iomn) + self._arrays[array_handle] = iomn + return iomn + + @iomn.setter + def iomn(self, iomn): + self.iomn[...] = iomn + + @property + def somn(self): + """ + Element somn ftype=real(8) pytype=float + + + Defined at global.fpp line 371 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__somn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + somn = self._arrays[array_handle] + else: + somn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__somn) + self._arrays[array_handle] = somn + return somn + + @somn.setter + def somn(self, somn): + self.somn[...] = somn + + @property + def pomn(self): + """ + Element pomn ftype=real(8) pytype=float + + + Defined at global.fpp line 371 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__pomn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pomn = self._arrays[array_handle] + else: + pomn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__pomn) + self._arrays[array_handle] = pomn + return pomn + + @pomn.setter + def pomn(self, pomn): + self.pomn[...] = pomn + + @property + def bomn(self): + """ + Element bomn ftype=real(8) pytype=float + + + Defined at global.fpp line 372 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bomn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bomn = self._arrays[array_handle] + else: + bomn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bomn) + self._arrays[array_handle] = bomn + return bomn + + @bomn.setter + def bomn(self, bomn): + self.bomn[...] = bomn + + @property + def iemn(self): + """ + Element iemn ftype=real(8) pytype=float + + + Defined at global.fpp line 372 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iemn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iemn = self._arrays[array_handle] + else: + iemn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iemn) + self._arrays[array_handle] = iemn + return iemn + + @iemn.setter + def iemn(self, iemn): + self.iemn[...] = iemn + + @property + def semn(self): + """ + Element semn ftype=real(8) pytype=float + + + Defined at global.fpp line 372 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__semn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + semn = self._arrays[array_handle] + else: + semn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__semn) + self._arrays[array_handle] = semn + return semn + + @semn.setter + def semn(self, semn): + self.semn[...] = semn + + @property + def pemn(self): + """ + Element pemn ftype=real(8) pytype=float + + + Defined at global.fpp line 372 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__pemn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + pemn = self._arrays[array_handle] + else: + pemn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__pemn) + self._arrays[array_handle] = pemn + return pemn + + @pemn.setter + def pemn(self, pemn): + self.pemn[...] = pemn + + @property + def bbe(self): + """ + Element bbe ftype=real(8) pytype=float + + + Defined at global.fpp line 373 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bbe(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bbe = self._arrays[array_handle] + else: + bbe = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bbe) + self._arrays[array_handle] = bbe + return bbe + + @bbe.setter + def bbe(self, bbe): + self.bbe[...] = bbe + + @property + def iio(self): + """ + Element iio ftype=real(8) pytype=float + + + Defined at global.fpp line 373 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iio(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iio = self._arrays[array_handle] + else: + iio = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iio) + self._arrays[array_handle] = iio + return iio + + @iio.setter + def iio(self, iio): + self.iio[...] = iio + + @property + def bbo(self): + """ + Element bbo ftype=real(8) pytype=float + + + Defined at global.fpp line 373 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bbo(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bbo = self._arrays[array_handle] + else: + bbo = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bbo) + self._arrays[array_handle] = bbo + return bbo + + @bbo.setter + def bbo(self, bbo): + self.bbo[...] = bbo + + @property + def iie(self): + """ + Element iie ftype=real(8) pytype=float + + + Defined at global.fpp line 373 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iie(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iie = self._arrays[array_handle] + else: + iie = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iie) + self._arrays[array_handle] = iie + return iie + + @iie.setter + def iie(self, iie): + self.iie[...] = iie + + @property + def btemn(self): + """ + Element btemn ftype=real(8) pytype=float + + + Defined at global.fpp line 379 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__btemn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + btemn = self._arrays[array_handle] + else: + btemn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__btemn) + self._arrays[array_handle] = btemn + return btemn + + @btemn.setter + def btemn(self, btemn): + self.btemn[...] = btemn + + @property + def bzemn(self): + """ + Element bzemn ftype=real(8) pytype=float + + + Defined at global.fpp line 379 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bzemn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bzemn = self._arrays[array_handle] + else: + bzemn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bzemn) + self._arrays[array_handle] = bzemn + return bzemn + + @bzemn.setter + def bzemn(self, bzemn): + self.bzemn[...] = bzemn + + @property + def btomn(self): + """ + Element btomn ftype=real(8) pytype=float + + + Defined at global.fpp line 379 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__btomn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + btomn = self._arrays[array_handle] + else: + btomn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__btomn) + self._arrays[array_handle] = btomn + return btomn + + @btomn.setter + def btomn(self, btomn): + self.btomn[...] = btomn + + @property + def bzomn(self): + """ + Element bzomn ftype=real(8) pytype=float + + + Defined at global.fpp line 379 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bzomn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bzomn = self._arrays[array_handle] + else: + bzomn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bzomn) + self._arrays[array_handle] = bzomn + return bzomn + + @bzomn.setter + def bzomn(self, bzomn): + self.bzomn[...] = bzomn + + @property + def bloweremn(self): + """ + Element bloweremn ftype=real(8) pytype=float + + + Defined at global.fpp line 385 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bloweremn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bloweremn = self._arrays[array_handle] + else: + bloweremn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bloweremn) + self._arrays[array_handle] = bloweremn + return bloweremn + + @bloweremn.setter + def bloweremn(self, bloweremn): + self.bloweremn[...] = bloweremn + + @property + def bloweromn(self): + """ + Element bloweromn ftype=real(8) pytype=float + + + Defined at global.fpp line 385 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__bloweromn(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + bloweromn = self._arrays[array_handle] + else: + bloweromn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__bloweromn) + self._arrays[array_handle] = bloweromn + return bloweromn + + @bloweromn.setter + def bloweromn(self, bloweromn): + self.bloweromn[...] = bloweromn + + @property + def lgdof(self): + """ + Element lgdof ftype=integer pytype=int + + + Defined at global.fpp line 391 + + """ + return _spec.f90wrap_allglobal__get__lgdof() + + @lgdof.setter + def lgdof(self, lgdof): + _spec.f90wrap_allglobal__set__lgdof(lgdof) + + @property + def ngdof(self): + """ + Element ngdof ftype=integer pytype=int + + + Defined at global.fpp line 392 + + """ + return _spec.f90wrap_allglobal__get__ngdof() + + @ngdof.setter + def ngdof(self, ngdof): + _spec.f90wrap_allglobal__set__ngdof(ngdof) + + @property + def dbbdrz(self): + """ + Element dbbdrz ftype=real(8) pytype=float + + + Defined at global.fpp line 400 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dbbdrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dbbdrz = self._arrays[array_handle] + else: + dbbdrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dbbdrz) + self._arrays[array_handle] = dbbdrz + return dbbdrz + + @dbbdrz.setter + def dbbdrz(self, dbbdrz): + self.dbbdrz[...] = dbbdrz + + @property + def diidrz(self): + """ + Element diidrz ftype=real(8) pytype=float + + + Defined at global.fpp line 401 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__diidrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + diidrz = self._arrays[array_handle] + else: + diidrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__diidrz) + self._arrays[array_handle] = diidrz + return diidrz + + @diidrz.setter + def diidrz(self, diidrz): + self.diidrz[...] = diidrz + + @property + def dffdrz(self): + """ + Element dffdrz ftype=real(8) pytype=float + + + Defined at global.fpp line 402 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dffdrz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dffdrz = self._arrays[array_handle] + else: + dffdrz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dffdrz) + self._arrays[array_handle] = dffdrz + return dffdrz + + @dffdrz.setter + def dffdrz(self, dffdrz): + self.dffdrz[...] = dffdrz + + @property + def dbbdmp(self): + """ + Element dbbdmp ftype=real(8) pytype=float + + + Defined at global.fpp line 403 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dbbdmp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dbbdmp = self._arrays[array_handle] + else: + dbbdmp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dbbdmp) + self._arrays[array_handle] = dbbdmp + return dbbdmp + + @dbbdmp.setter + def dbbdmp(self, dbbdmp): + self.dbbdmp[...] = dbbdmp + + @property + def dmupfdx(self): + """ + Element dmupfdx ftype=real(8) pytype=float + + + Defined at global.fpp line 445 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dmupfdx(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dmupfdx = self._arrays[array_handle] + else: + dmupfdx = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dmupfdx) + self._arrays[array_handle] = dmupfdx + return dmupfdx + + @dmupfdx.setter + def dmupfdx(self, dmupfdx): + self.dmupfdx[...] = dmupfdx + + @property + def lhessianallocated(self): + """ + Element lhessianallocated ftype=logical pytype=bool + + + Defined at global.fpp line 453 + + """ + return _spec.f90wrap_allglobal__get__lhessianallocated() + + @lhessianallocated.setter + def lhessianallocated(self, lhessianallocated): + _spec.f90wrap_allglobal__set__lhessianallocated(lhessianallocated) + + @property + def hessian(self): + """ + Element hessian ftype=real(8) pytype=float + + + Defined at global.fpp line 454 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__hessian(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + hessian = self._arrays[array_handle] + else: + hessian = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__hessian) + self._arrays[array_handle] = hessian + return hessian + + @hessian.setter + def hessian(self, hessian): + self.hessian[...] = hessian + + @property + def dessian(self): + """ + Element dessian ftype=real(8) pytype=float + + + Defined at global.fpp line 455 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dessian(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dessian = self._arrays[array_handle] + else: + dessian = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dessian) + self._arrays[array_handle] = dessian + return dessian + + @dessian.setter + def dessian(self, dessian): + self.dessian[...] = dessian + + @property + def cosi(self): + """ + Element cosi ftype=real(8) pytype=float + + + Defined at global.fpp line 462 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__cosi(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + cosi = self._arrays[array_handle] + else: + cosi = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__cosi) + self._arrays[array_handle] = cosi + return cosi + + @cosi.setter + def cosi(self, cosi): + self.cosi[...] = cosi + + @property + def sini(self): + """ + Element sini ftype=real(8) pytype=float + + + Defined at global.fpp line 462 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__sini(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + sini = self._arrays[array_handle] + else: + sini = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__sini) + self._arrays[array_handle] = sini + return sini + + @sini.setter + def sini(self, sini): + self.sini[...] = sini + + @property + def gteta(self): + """ + Element gteta ftype=real(8) pytype=float + + + Defined at global.fpp line 462 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gteta(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gteta = self._arrays[array_handle] + else: + gteta = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gteta) + self._arrays[array_handle] = gteta + return gteta + + @gteta.setter + def gteta(self, gteta): + self.gteta[...] = gteta + + @property + def gzeta(self): + """ + Element gzeta ftype=real(8) pytype=float + + + Defined at global.fpp line 462 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gzeta(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gzeta = self._arrays[array_handle] + else: + gzeta = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gzeta) + self._arrays[array_handle] = gzeta + return gzeta + + @gzeta.setter + def gzeta(self, gzeta): + self.gzeta[...] = gzeta + + @property + def ajk(self): + """ + Element ajk ftype=real(8) pytype=float + + + Defined at global.fpp line 463 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__ajk(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + ajk = self._arrays[array_handle] + else: + ajk = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__ajk) + self._arrays[array_handle] = ajk + return ajk + + @ajk.setter + def ajk(self, ajk): + self.ajk[...] = ajk + + @property + def dradr(self): + """ + Element dradr ftype=real(8) pytype=float + + + Defined at global.fpp line 464 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dradr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dradr = self._arrays[array_handle] + else: + dradr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dradr) + self._arrays[array_handle] = dradr + return dradr + + @dradr.setter + def dradr(self, dradr): + self.dradr[...] = dradr + + @property + def dradz(self): + """ + Element dradz ftype=real(8) pytype=float + + + Defined at global.fpp line 464 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dradz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dradz = self._arrays[array_handle] + else: + dradz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dradz) + self._arrays[array_handle] = dradz + return dradz + + @dradz.setter + def dradz(self, dradz): + self.dradz[...] = dradz + + @property + def dzadr(self): + """ + Element dzadr ftype=real(8) pytype=float + + + Defined at global.fpp line 464 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzadr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzadr = self._arrays[array_handle] + else: + dzadr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzadr) + self._arrays[array_handle] = dzadr + return dzadr + + @dzadr.setter + def dzadr(self, dzadr): + self.dzadr[...] = dzadr + + @property + def dzadz(self): + """ + Element dzadz ftype=real(8) pytype=float + + + Defined at global.fpp line 464 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzadz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzadz = self._arrays[array_handle] + else: + dzadz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzadz) + self._arrays[array_handle] = dzadz + return dzadz + + @dzadz.setter + def dzadz(self, dzadz): + self.dzadz[...] = dzadz + + @property + def drodr(self): + """ + Element drodr ftype=real(8) pytype=float + + + Defined at global.fpp line 465 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__drodr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + drodr = self._arrays[array_handle] + else: + drodr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__drodr) + self._arrays[array_handle] = drodr + return drodr + + @drodr.setter + def drodr(self, drodr): + self.drodr[...] = drodr + + @property + def drodz(self): + """ + Element drodz ftype=real(8) pytype=float + + + Defined at global.fpp line 465 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__drodz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + drodz = self._arrays[array_handle] + else: + drodz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__drodz) + self._arrays[array_handle] = drodz + return drodz + + @drodz.setter + def drodz(self, drodz): + self.drodz[...] = drodz + + @property + def dzodr(self): + """ + Element dzodr ftype=real(8) pytype=float + + + Defined at global.fpp line 465 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzodr(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzodr = self._arrays[array_handle] + else: + dzodr = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzodr) + self._arrays[array_handle] = dzodr + return dzodr + + @dzodr.setter + def dzodr(self, dzodr): + self.dzodr[...] = dzodr + + @property + def dzodz(self): + """ + Element dzodz ftype=real(8) pytype=float + + + Defined at global.fpp line 465 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dzodz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dzodz = self._arrays[array_handle] + else: + dzodz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dzodz) + self._arrays[array_handle] = dzodz + return dzodz + + @dzodz.setter + def dzodz(self, dzodz): + self.dzodz[...] = dzodz + + @property + def djkp(self): + """ + Element djkp ftype=integer pytype=int + + + Defined at global.fpp line 466 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__djkp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + djkp = self._arrays[array_handle] + else: + djkp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__djkp) + self._arrays[array_handle] = djkp + return djkp + + @djkp.setter + def djkp(self, djkp): + self.djkp[...] = djkp + + @property + def djkm(self): + """ + Element djkm ftype=integer pytype=int + + + Defined at global.fpp line 466 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__djkm(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + djkm = self._arrays[array_handle] + else: + djkm = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__djkm) + self._arrays[array_handle] = djkm + return djkm + + @djkm.setter + def djkm(self, djkm): + self.djkm[...] = djkm + + @property + def lbbintegral(self): + """ + Element lbbintegral ftype=real(8) pytype=float + + + Defined at global.fpp line 498 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__lbbintegral(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + lbbintegral = self._arrays[array_handle] + else: + lbbintegral = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__lbbintegral) + self._arrays[array_handle] = lbbintegral + return lbbintegral + + @lbbintegral.setter + def lbbintegral(self, lbbintegral): + self.lbbintegral[...] = lbbintegral + + @property + def labintegral(self): + """ + Element labintegral ftype=real(8) pytype=float + + + Defined at global.fpp line 499 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__labintegral(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + labintegral = self._arrays[array_handle] + else: + labintegral = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__labintegral) + self._arrays[array_handle] = labintegral + return labintegral + + @labintegral.setter + def labintegral(self, labintegral): + self.labintegral[...] = labintegral + + @property + def vvolume(self): + """ + Element vvolume ftype=real(8) pytype=float + + + Defined at global.fpp line 503 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__vvolume(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + vvolume = self._arrays[array_handle] + else: + vvolume = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__vvolume) + self._arrays[array_handle] = vvolume + return vvolume + + @vvolume.setter + def vvolume(self, vvolume): + self.vvolume[...] = vvolume + + @property + def dvolume(self): + """ + Element dvolume ftype=real(8) pytype=float + + + Defined at global.fpp line 504 + + """ + return _spec.f90wrap_allglobal__get__dvolume() + + @dvolume.setter + def dvolume(self, dvolume): + _spec.f90wrap_allglobal__set__dvolume(dvolume) + + @property + def ivol(self): + """ + Element ivol ftype=integer pytype=int + + + Defined at global.fpp line 507 + + """ + return _spec.f90wrap_allglobal__get__ivol() + + @ivol.setter + def ivol(self, ivol): + _spec.f90wrap_allglobal__set__ivol(ivol) + + @property + def gbzeta(self): + """ + Element gbzeta ftype=real(8) pytype=float + + + Defined at global.fpp line 508 + + """ + return _spec.f90wrap_allglobal__get__gbzeta() + + @gbzeta.setter + def gbzeta(self, gbzeta): + _spec.f90wrap_allglobal__set__gbzeta(gbzeta) + + @property + def iquad(self): + """ + Element iquad ftype=integer pytype=int + + + Defined at global.fpp line 509 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__iquad(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + iquad = self._arrays[array_handle] + else: + iquad = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__iquad) + self._arrays[array_handle] = iquad + return iquad + + @iquad.setter + def iquad(self, iquad): + self.iquad[...] = iquad + + @property + def gaussianweight(self): + """ + Element gaussianweight ftype=real(8) pytype=float + + + Defined at global.fpp line 510 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gaussianweight(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gaussianweight = self._arrays[array_handle] + else: + gaussianweight = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gaussianweight) + self._arrays[array_handle] = gaussianweight + return gaussianweight + + @gaussianweight.setter + def gaussianweight(self, gaussianweight): + self.gaussianweight[...] = gaussianweight + + @property + def gaussianabscissae(self): + """ + Element gaussianabscissae ftype=real(8) pytype=float + + + Defined at global.fpp line 510 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__gaussianabscissae(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + gaussianabscissae = self._arrays[array_handle] + else: + gaussianabscissae = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__gaussianabscissae) + self._arrays[array_handle] = gaussianabscissae + return gaussianabscissae + + @gaussianabscissae.setter + def gaussianabscissae(self, gaussianabscissae): + self.gaussianabscissae[...] = gaussianabscissae + + @property + def lblinear(self): + """ + Element lblinear ftype=logical pytype=bool + + + Defined at global.fpp line 511 + + """ + return _spec.f90wrap_allglobal__get__lblinear() + + @lblinear.setter + def lblinear(self, lblinear): + _spec.f90wrap_allglobal__set__lblinear(lblinear) + + @property + def lbnewton(self): + """ + Element lbnewton ftype=logical pytype=bool + + + Defined at global.fpp line 511 + + """ + return _spec.f90wrap_allglobal__get__lbnewton() + + @lbnewton.setter + def lbnewton(self, lbnewton): + _spec.f90wrap_allglobal__set__lbnewton(lbnewton) + + @property + def lbsequad(self): + """ + Element lbsequad ftype=logical pytype=bool + + + Defined at global.fpp line 511 + + """ + return _spec.f90wrap_allglobal__get__lbsequad() + + @lbsequad.setter + def lbsequad(self, lbsequad): + _spec.f90wrap_allglobal__set__lbsequad(lbsequad) + + @property + def orzp(self): + """ + Element orzp ftype=real(8) pytype=float + + + Defined at global.fpp line 512 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__orzp(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + orzp = self._arrays[array_handle] + else: + orzp = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__orzp) + self._arrays[array_handle] = orzp + return orzp + + @orzp.setter + def orzp(self, orzp): + self.orzp[...] = orzp + + @property + def globaljk(self): + """ + Element globaljk ftype=integer pytype=int + + + Defined at global.fpp line 520 + + """ + return _spec.f90wrap_allglobal__get__globaljk() + + @globaljk.setter + def globaljk(self, globaljk): + _spec.f90wrap_allglobal__set__globaljk(globaljk) + + @property + def dxyz(self): + """ + Element dxyz ftype=real(8) pytype=float + + + Defined at global.fpp line 521 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__dxyz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + dxyz = self._arrays[array_handle] + else: + dxyz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__dxyz) + self._arrays[array_handle] = dxyz + return dxyz + + @dxyz.setter + def dxyz(self, dxyz): + self.dxyz[...] = dxyz + + @property + def nxyz(self): + """ + Element nxyz ftype=real(8) pytype=float + + + Defined at global.fpp line 522 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__nxyz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + nxyz = self._arrays[array_handle] + else: + nxyz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__nxyz) + self._arrays[array_handle] = nxyz + return nxyz + + @nxyz.setter + def nxyz(self, nxyz): + self.nxyz[...] = nxyz + + @property + def jxyz(self): + """ + Element jxyz ftype=real(8) pytype=float + + + Defined at global.fpp line 523 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__jxyz(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + jxyz = self._arrays[array_handle] + else: + jxyz = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__jxyz) + self._arrays[array_handle] = jxyz + return jxyz + + @jxyz.setter + def jxyz(self, jxyz): + self.jxyz[...] = jxyz + + @property + def tetazeta(self): + """ + Element tetazeta ftype=real(8) pytype=float + + + Defined at global.fpp line 524 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_allglobal__array__tetazeta(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + tetazeta = self._arrays[array_handle] + else: + tetazeta = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_allglobal__array__tetazeta) + self._arrays[array_handle] = tetazeta + return tetazeta + + @tetazeta.setter + def tetazeta(self, tetazeta): + self.tetazeta[...] = tetazeta + + @property + def virtualcasingfactor(self): + """ + Element virtualcasingfactor ftype=real(8) pytype=float + + + Defined at global.fpp line 526 + + """ + return _spec.f90wrap_allglobal__get__virtualcasingfactor() + + @virtualcasingfactor.setter + def virtualcasingfactor(self, virtualcasingfactor): + _spec.f90wrap_allglobal__set__virtualcasingfactor(virtualcasingfactor) + + @property + def iberror(self): + """ + Element iberror ftype=integer pytype=int + + + Defined at global.fpp line 527 + + """ + return _spec.f90wrap_allglobal__get__iberror() + + @iberror.setter + def iberror(self, iberror): + _spec.f90wrap_allglobal__set__iberror(iberror) + + @property + def nfreeboundaryiterations(self): + """ + Element nfreeboundaryiterations ftype=integer pytype=int + + + Defined at global.fpp line 528 + + """ + return _spec.f90wrap_allglobal__get__nfreeboundaryiterations() + + @nfreeboundaryiterations.setter + def nfreeboundaryiterations(self, nfreeboundaryiterations): + _spec.f90wrap_allglobal__set__nfreeboundaryiterations(nfreeboundaryiterations) + + @property + def node(self): + """ + Element node ftype=integer pytype=int + + + Defined at global.fpp line 530 + + """ + return _spec.f90wrap_allglobal__get__node() + + @property + def first_free_bound(self): + """ + Element first_free_bound ftype=logical pytype=bool + + + Defined at global.fpp line 532 + + """ + return _spec.f90wrap_allglobal__get__first_free_bound() + + @first_free_bound.setter + def first_free_bound(self, first_free_bound): + _spec.f90wrap_allglobal__set__first_free_bound(first_free_bound) + + def __str__(self): + ret = ['{\n'] + ret.append(' myid : ') + ret.append(repr(self.myid)) + ret.append(',\n ncpu : ') + ret.append(repr(self.ncpu)) + ret.append(',\n mpi_comm_spec : ') + ret.append(repr(self.mpi_comm_spec)) + ret.append(',\n ismyvolumevalue : ') + ret.append(repr(self.ismyvolumevalue)) + ret.append(',\n cpus : ') + ret.append(repr(self.cpus)) + ret.append(',\n skip_write : ') + ret.append(repr(self.skip_write)) + ret.append(',\n pi2nfp : ') + ret.append(repr(self.pi2nfp)) + ret.append(',\n pi2pi2nfp : ') + ret.append(repr(self.pi2pi2nfp)) + ret.append(',\n pi2pi2nfphalf : ') + ret.append(repr(self.pi2pi2nfphalf)) + ret.append(',\n pi2pi2nfpquart : ') + ret.append(repr(self.pi2pi2nfpquart)) + ret.append(',\n ext : ') + ret.append(repr(self.ext)) + ret.append(',\n forceerr : ') + ret.append(repr(self.forceerr)) + ret.append(',\n energy : ') + ret.append(repr(self.energy)) + ret.append(',\n ipdt : ') + ret.append(repr(self.ipdt)) + ret.append(',\n ipdtdpf : ') + ret.append(repr(self.ipdtdpf)) + ret.append(',\n mvol : ') + ret.append(repr(self.mvol)) + ret.append(',\n yesstellsym : ') + ret.append(repr(self.yesstellsym)) + ret.append(',\n notstellsym : ') + ret.append(repr(self.notstellsym)) + ret.append(',\n yesmatrixfree : ') + ret.append(repr(self.yesmatrixfree)) + ret.append(',\n notmatrixfree : ') + ret.append(repr(self.notmatrixfree)) + ret.append(',\n cheby : ') + ret.append(repr(self.cheby)) + ret.append(',\n zernike : ') + ret.append(repr(self.zernike)) + ret.append(',\n tt : ') + ret.append(repr(self.tt)) + ret.append(',\n rtt : ') + ret.append(repr(self.rtt)) + ret.append(',\n rtm : ') + ret.append(repr(self.rtm)) + ret.append(',\n zernikedof : ') + ret.append(repr(self.zernikedof)) + ret.append(',\n imagneticok : ') + ret.append(repr(self.imagneticok)) + ret.append(',\n iconstraintok : ') + ret.append(repr(self.iconstraintok)) + ret.append(',\n beltramierror : ') + ret.append(repr(self.beltramierror)) + ret.append(',\n mn : ') + ret.append(repr(self.mn)) + ret.append(',\n im : ') + ret.append(repr(self.im)) + ret.append(',\n in_ : ') + ret.append(repr(self.in_)) + ret.append(',\n halfmm : ') + ret.append(repr(self.halfmm)) + ret.append(',\n regumm : ') + ret.append(repr(self.regumm)) + ret.append(',\n rscale : ') + ret.append(repr(self.rscale)) + ret.append(',\n psifactor : ') + ret.append(repr(self.psifactor)) + ret.append(',\n inifactor : ') + ret.append(repr(self.inifactor)) + ret.append(',\n bbweight : ') + ret.append(repr(self.bbweight)) + ret.append(',\n mmpp : ') + ret.append(repr(self.mmpp)) + ret.append(',\n mne : ') + ret.append(repr(self.mne)) + ret.append(',\n ime : ') + ret.append(repr(self.ime)) + ret.append(',\n ine : ') + ret.append(repr(self.ine)) + ret.append(',\n mns : ') + ret.append(repr(self.mns)) + ret.append(',\n ims : ') + ret.append(repr(self.ims)) + ret.append(',\n ins : ') + ret.append(repr(self.ins)) + ret.append(',\n lmpol : ') + ret.append(repr(self.lmpol)) + ret.append(',\n lntor : ') + ret.append(repr(self.lntor)) + ret.append(',\n smpol : ') + ret.append(repr(self.smpol)) + ret.append(',\n sntor : ') + ret.append(repr(self.sntor)) + ret.append(',\n xoffset : ') + ret.append(repr(self.xoffset)) + ret.append(',\n irbc : ') + ret.append(repr(self.irbc)) + ret.append(',\n izbs : ') + ret.append(repr(self.izbs)) + ret.append(',\n irbs : ') + ret.append(repr(self.irbs)) + ret.append(',\n izbc : ') + ret.append(repr(self.izbc)) + ret.append(',\n drbc : ') + ret.append(repr(self.drbc)) + ret.append(',\n dzbs : ') + ret.append(repr(self.dzbs)) + ret.append(',\n drbs : ') + ret.append(repr(self.drbs)) + ret.append(',\n dzbc : ') + ret.append(repr(self.dzbc)) + ret.append(',\n irij : ') + ret.append(repr(self.irij)) + ret.append(',\n izij : ') + ret.append(repr(self.izij)) + ret.append(',\n drij : ') + ret.append(repr(self.drij)) + ret.append(',\n dzij : ') + ret.append(repr(self.dzij)) + ret.append(',\n trij : ') + ret.append(repr(self.trij)) + ret.append(',\n tzij : ') + ret.append(repr(self.tzij)) + ret.append(',\n ivns : ') + ret.append(repr(self.ivns)) + ret.append(',\n ibns : ') + ret.append(repr(self.ibns)) + ret.append(',\n ivnc : ') + ret.append(repr(self.ivnc)) + ret.append(',\n ibnc : ') + ret.append(repr(self.ibnc)) + ret.append(',\n lrbc : ') + ret.append(repr(self.lrbc)) + ret.append(',\n lzbs : ') + ret.append(repr(self.lzbs)) + ret.append(',\n lrbs : ') + ret.append(repr(self.lrbs)) + ret.append(',\n lzbc : ') + ret.append(repr(self.lzbc)) + ret.append(',\n num_modes : ') + ret.append(repr(self.num_modes)) + ret.append(',\n mmrzrz : ') + ret.append(repr(self.mmrzrz)) + ret.append(',\n nnrzrz : ') + ret.append(repr(self.nnrzrz)) + ret.append(',\n allrzrz : ') + ret.append(repr(self.allrzrz)) + ret.append(',\n nt : ') + ret.append(repr(self.nt)) + ret.append(',\n nz : ') + ret.append(repr(self.nz)) + ret.append(',\n ntz : ') + ret.append(repr(self.ntz)) + ret.append(',\n hnt : ') + ret.append(repr(self.hnt)) + ret.append(',\n hnz : ') + ret.append(repr(self.hnz)) + ret.append(',\n sontz : ') + ret.append(repr(self.sontz)) + ret.append(',\n rij : ') + ret.append(repr(self.rij)) + ret.append(',\n zij : ') + ret.append(repr(self.zij)) + ret.append(',\n xij : ') + ret.append(repr(self.xij)) + ret.append(',\n yij : ') + ret.append(repr(self.yij)) + ret.append(',\n sg : ') + ret.append(repr(self.sg)) + ret.append(',\n guvij : ') + ret.append(repr(self.guvij)) + ret.append(',\n gvuij : ') + ret.append(repr(self.gvuij)) + ret.append(',\n guvijsave : ') + ret.append(repr(self.guvijsave)) + ret.append(',\n ki : ') + ret.append(repr(self.ki)) + ret.append(',\n kijs : ') + ret.append(repr(self.kijs)) + ret.append(',\n kija : ') + ret.append(repr(self.kija)) + ret.append(',\n iotakkii : ') + ret.append(repr(self.iotakkii)) + ret.append(',\n iotaksub : ') + ret.append(repr(self.iotaksub)) + ret.append(',\n iotakadd : ') + ret.append(repr(self.iotakadd)) + ret.append(',\n iotaksgn : ') + ret.append(repr(self.iotaksgn)) + ret.append(',\n efmn : ') + ret.append(repr(self.efmn)) + ret.append(',\n ofmn : ') + ret.append(repr(self.ofmn)) + ret.append(',\n cfmn : ') + ret.append(repr(self.cfmn)) + ret.append(',\n sfmn : ') + ret.append(repr(self.sfmn)) + ret.append(',\n evmn : ') + ret.append(repr(self.evmn)) + ret.append(',\n odmn : ') + ret.append(repr(self.odmn)) + ret.append(',\n comn : ') + ret.append(repr(self.comn)) + ret.append(',\n simn : ') + ret.append(repr(self.simn)) + ret.append(',\n ijreal : ') + ret.append(repr(self.ijreal)) + ret.append(',\n ijimag : ') + ret.append(repr(self.ijimag)) + ret.append(',\n jireal : ') + ret.append(repr(self.jireal)) + ret.append(',\n jiimag : ') + ret.append(repr(self.jiimag)) + ret.append(',\n jkreal : ') + ret.append(repr(self.jkreal)) + ret.append(',\n jkimag : ') + ret.append(repr(self.jkimag)) + ret.append(',\n kjreal : ') + ret.append(repr(self.kjreal)) + ret.append(',\n kjimag : ') + ret.append(repr(self.kjimag)) + ret.append(',\n bsupumn : ') + ret.append(repr(self.bsupumn)) + ret.append(',\n bsupvmn : ') + ret.append(repr(self.bsupvmn)) + ret.append(',\n goomne : ') + ret.append(repr(self.goomne)) + ret.append(',\n goomno : ') + ret.append(repr(self.goomno)) + ret.append(',\n gssmne : ') + ret.append(repr(self.gssmne)) + ret.append(',\n gssmno : ') + ret.append(repr(self.gssmno)) + ret.append(',\n gstmne : ') + ret.append(repr(self.gstmne)) + ret.append(',\n gstmno : ') + ret.append(repr(self.gstmno)) + ret.append(',\n gszmne : ') + ret.append(repr(self.gszmne)) + ret.append(',\n gszmno : ') + ret.append(repr(self.gszmno)) + ret.append(',\n gttmne : ') + ret.append(repr(self.gttmne)) + ret.append(',\n gttmno : ') + ret.append(repr(self.gttmno)) + ret.append(',\n gtzmne : ') + ret.append(repr(self.gtzmne)) + ret.append(',\n gtzmno : ') + ret.append(repr(self.gtzmno)) + ret.append(',\n gzzmne : ') + ret.append(repr(self.gzzmne)) + ret.append(',\n gzzmno : ') + ret.append(repr(self.gzzmno)) + ret.append(',\n dtoocc : ') + ret.append(repr(self.dtoocc)) + ret.append(',\n dtoocs : ') + ret.append(repr(self.dtoocs)) + ret.append(',\n dtoosc : ') + ret.append(repr(self.dtoosc)) + ret.append(',\n dtooss : ') + ret.append(repr(self.dtooss)) + ret.append(',\n ttsscc : ') + ret.append(repr(self.ttsscc)) + ret.append(',\n ttsscs : ') + ret.append(repr(self.ttsscs)) + ret.append(',\n ttsssc : ') + ret.append(repr(self.ttsssc)) + ret.append(',\n ttssss : ') + ret.append(repr(self.ttssss)) + ret.append(',\n tdstcc : ') + ret.append(repr(self.tdstcc)) + ret.append(',\n tdstcs : ') + ret.append(repr(self.tdstcs)) + ret.append(',\n tdstsc : ') + ret.append(repr(self.tdstsc)) + ret.append(',\n tdstss : ') + ret.append(repr(self.tdstss)) + ret.append(',\n tdszcc : ') + ret.append(repr(self.tdszcc)) + ret.append(',\n tdszcs : ') + ret.append(repr(self.tdszcs)) + ret.append(',\n tdszsc : ') + ret.append(repr(self.tdszsc)) + ret.append(',\n tdszss : ') + ret.append(repr(self.tdszss)) + ret.append(',\n ddttcc : ') + ret.append(repr(self.ddttcc)) + ret.append(',\n ddttcs : ') + ret.append(repr(self.ddttcs)) + ret.append(',\n ddttsc : ') + ret.append(repr(self.ddttsc)) + ret.append(',\n ddttss : ') + ret.append(repr(self.ddttss)) + ret.append(',\n ddtzcc : ') + ret.append(repr(self.ddtzcc)) + ret.append(',\n ddtzcs : ') + ret.append(repr(self.ddtzcs)) + ret.append(',\n ddtzsc : ') + ret.append(repr(self.ddtzsc)) + ret.append(',\n ddtzss : ') + ret.append(repr(self.ddtzss)) + ret.append(',\n ddzzcc : ') + ret.append(repr(self.ddzzcc)) + ret.append(',\n ddzzcs : ') + ret.append(repr(self.ddzzcs)) + ret.append(',\n ddzzsc : ') + ret.append(repr(self.ddzzsc)) + ret.append(',\n ddzzss : ') + ret.append(repr(self.ddzzss)) + ret.append(',\n tsc : ') + ret.append(repr(self.tsc)) + ret.append(',\n tss : ') + ret.append(repr(self.tss)) + ret.append(',\n dtc : ') + ret.append(repr(self.dtc)) + ret.append(',\n dts : ') + ret.append(repr(self.dts)) + ret.append(',\n dzc : ') + ret.append(repr(self.dzc)) + ret.append(',\n dzs : ') + ret.append(repr(self.dzs)) + ret.append(',\n ttc : ') + ret.append(repr(self.ttc)) + ret.append(',\n tzc : ') + ret.append(repr(self.tzc)) + ret.append(',\n tts : ') + ret.append(repr(self.tts)) + ret.append(',\n tzs : ') + ret.append(repr(self.tzs)) + ret.append(',\n dtflux : ') + ret.append(repr(self.dtflux)) + ret.append(',\n dpflux : ') + ret.append(repr(self.dpflux)) + ret.append(',\n sweight : ') + ret.append(repr(self.sweight)) + ret.append(',\n nadof : ') + ret.append(repr(self.nadof)) + ret.append(',\n nfielddof : ') + ret.append(repr(self.nfielddof)) + ret.append(',\n lma : ') + ret.append(repr(self.lma)) + ret.append(',\n lmb : ') + ret.append(repr(self.lmb)) + ret.append(',\n lmc : ') + ret.append(repr(self.lmc)) + ret.append(',\n lmd : ') + ret.append(repr(self.lmd)) + ret.append(',\n lme : ') + ret.append(repr(self.lme)) + ret.append(',\n lmf : ') + ret.append(repr(self.lmf)) + ret.append(',\n lmg : ') + ret.append(repr(self.lmg)) + ret.append(',\n lmh : ') + ret.append(repr(self.lmh)) + ret.append(',\n lmavalue : ') + ret.append(repr(self.lmavalue)) + ret.append(',\n lmbvalue : ') + ret.append(repr(self.lmbvalue)) + ret.append(',\n lmcvalue : ') + ret.append(repr(self.lmcvalue)) + ret.append(',\n lmdvalue : ') + ret.append(repr(self.lmdvalue)) + ret.append(',\n lmevalue : ') + ret.append(repr(self.lmevalue)) + ret.append(',\n lmfvalue : ') + ret.append(repr(self.lmfvalue)) + ret.append(',\n lmgvalue : ') + ret.append(repr(self.lmgvalue)) + ret.append(',\n lmhvalue : ') + ret.append(repr(self.lmhvalue)) + ret.append(',\n fso : ') + ret.append(repr(self.fso)) + ret.append(',\n fse : ') + ret.append(repr(self.fse)) + ret.append(',\n lcoordinatesingularity : ') + ret.append(repr(self.lcoordinatesingularity)) + ret.append(',\n lplasmaregion : ') + ret.append(repr(self.lplasmaregion)) + ret.append(',\n lvacuumregion : ') + ret.append(repr(self.lvacuumregion)) + ret.append(',\n lsavedguvij : ') + ret.append(repr(self.lsavedguvij)) + ret.append(',\n localconstraint : ') + ret.append(repr(self.localconstraint)) + ret.append(',\n dma : ') + ret.append(repr(self.dma)) + ret.append(',\n dmb : ') + ret.append(repr(self.dmb)) + ret.append(',\n dmd : ') + ret.append(repr(self.dmd)) + ret.append(',\n dmas : ') + ret.append(repr(self.dmas)) + ret.append(',\n dmds : ') + ret.append(repr(self.dmds)) + ret.append(',\n idmas : ') + ret.append(repr(self.idmas)) + ret.append(',\n jdmas : ') + ret.append(repr(self.jdmas)) + ret.append(',\n ndmasmax : ') + ret.append(repr(self.ndmasmax)) + ret.append(',\n ndmas : ') + ret.append(repr(self.ndmas)) + ret.append(',\n dmg : ') + ret.append(repr(self.dmg)) + ret.append(',\n solution : ') + ret.append(repr(self.solution)) + ret.append(',\n gmreslastsolution : ') + ret.append(repr(self.gmreslastsolution)) + ret.append(',\n mbpsi : ') + ret.append(repr(self.mbpsi)) + ret.append(',\n liluprecond : ') + ret.append(repr(self.liluprecond)) + ret.append(',\n beltramiinverse : ') + ret.append(repr(self.beltramiinverse)) + ret.append(',\n diotadxup : ') + ret.append(repr(self.diotadxup)) + ret.append(',\n ditgpdxtp : ') + ret.append(repr(self.ditgpdxtp)) + ret.append(',\n glambda : ') + ret.append(repr(self.glambda)) + ret.append(',\n lmns : ') + ret.append(repr(self.lmns)) + ret.append(',\n bemn : ') + ret.append(repr(self.bemn)) + ret.append(',\n iomn : ') + ret.append(repr(self.iomn)) + ret.append(',\n somn : ') + ret.append(repr(self.somn)) + ret.append(',\n pomn : ') + ret.append(repr(self.pomn)) + ret.append(',\n bomn : ') + ret.append(repr(self.bomn)) + ret.append(',\n iemn : ') + ret.append(repr(self.iemn)) + ret.append(',\n semn : ') + ret.append(repr(self.semn)) + ret.append(',\n pemn : ') + ret.append(repr(self.pemn)) + ret.append(',\n bbe : ') + ret.append(repr(self.bbe)) + ret.append(',\n iio : ') + ret.append(repr(self.iio)) + ret.append(',\n bbo : ') + ret.append(repr(self.bbo)) + ret.append(',\n iie : ') + ret.append(repr(self.iie)) + ret.append(',\n btemn : ') + ret.append(repr(self.btemn)) + ret.append(',\n bzemn : ') + ret.append(repr(self.bzemn)) + ret.append(',\n btomn : ') + ret.append(repr(self.btomn)) + ret.append(',\n bzomn : ') + ret.append(repr(self.bzomn)) + ret.append(',\n bloweremn : ') + ret.append(repr(self.bloweremn)) + ret.append(',\n bloweromn : ') + ret.append(repr(self.bloweromn)) + ret.append(',\n lgdof : ') + ret.append(repr(self.lgdof)) + ret.append(',\n ngdof : ') + ret.append(repr(self.ngdof)) + ret.append(',\n dbbdrz : ') + ret.append(repr(self.dbbdrz)) + ret.append(',\n diidrz : ') + ret.append(repr(self.diidrz)) + ret.append(',\n dffdrz : ') + ret.append(repr(self.dffdrz)) + ret.append(',\n dbbdmp : ') + ret.append(repr(self.dbbdmp)) + ret.append(',\n dmupfdx : ') + ret.append(repr(self.dmupfdx)) + ret.append(',\n lhessianallocated : ') + ret.append(repr(self.lhessianallocated)) + ret.append(',\n hessian : ') + ret.append(repr(self.hessian)) + ret.append(',\n dessian : ') + ret.append(repr(self.dessian)) + ret.append(',\n cosi : ') + ret.append(repr(self.cosi)) + ret.append(',\n sini : ') + ret.append(repr(self.sini)) + ret.append(',\n gteta : ') + ret.append(repr(self.gteta)) + ret.append(',\n gzeta : ') + ret.append(repr(self.gzeta)) + ret.append(',\n ajk : ') + ret.append(repr(self.ajk)) + ret.append(',\n dradr : ') + ret.append(repr(self.dradr)) + ret.append(',\n dradz : ') + ret.append(repr(self.dradz)) + ret.append(',\n dzadr : ') + ret.append(repr(self.dzadr)) + ret.append(',\n dzadz : ') + ret.append(repr(self.dzadz)) + ret.append(',\n drodr : ') + ret.append(repr(self.drodr)) + ret.append(',\n drodz : ') + ret.append(repr(self.drodz)) + ret.append(',\n dzodr : ') + ret.append(repr(self.dzodr)) + ret.append(',\n dzodz : ') + ret.append(repr(self.dzodz)) + ret.append(',\n djkp : ') + ret.append(repr(self.djkp)) + ret.append(',\n djkm : ') + ret.append(repr(self.djkm)) + ret.append(',\n lbbintegral : ') + ret.append(repr(self.lbbintegral)) + ret.append(',\n labintegral : ') + ret.append(repr(self.labintegral)) + ret.append(',\n vvolume : ') + ret.append(repr(self.vvolume)) + ret.append(',\n dvolume : ') + ret.append(repr(self.dvolume)) + ret.append(',\n ivol : ') + ret.append(repr(self.ivol)) + ret.append(',\n gbzeta : ') + ret.append(repr(self.gbzeta)) + ret.append(',\n iquad : ') + ret.append(repr(self.iquad)) + ret.append(',\n gaussianweight : ') + ret.append(repr(self.gaussianweight)) + ret.append(',\n gaussianabscissae : ') + ret.append(repr(self.gaussianabscissae)) + ret.append(',\n lblinear : ') + ret.append(repr(self.lblinear)) + ret.append(',\n lbnewton : ') + ret.append(repr(self.lbnewton)) + ret.append(',\n lbsequad : ') + ret.append(repr(self.lbsequad)) + ret.append(',\n orzp : ') + ret.append(repr(self.orzp)) + ret.append(',\n globaljk : ') + ret.append(repr(self.globaljk)) + ret.append(',\n dxyz : ') + ret.append(repr(self.dxyz)) + ret.append(',\n nxyz : ') + ret.append(repr(self.nxyz)) + ret.append(',\n jxyz : ') + ret.append(repr(self.jxyz)) + ret.append(',\n tetazeta : ') + ret.append(repr(self.tetazeta)) + ret.append(',\n virtualcasingfactor : ') + ret.append(repr(self.virtualcasingfactor)) + ret.append(',\n iberror : ') + ret.append(repr(self.iberror)) + ret.append(',\n nfreeboundaryiterations : ') + ret.append(repr(self.nfreeboundaryiterations)) + ret.append(',\n node : ') + ret.append(repr(self.node)) + ret.append(',\n first_free_bound : ') + ret.append(repr(self.first_free_bound)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +allglobal = Allglobal() + +class Fftw_Interface(f90wrap.runtime.FortranModule): + """ + Module fftw_interface + + + Defined at global.fpp lines 2274-2279 + + """ + @property + def cplxin(self): + """ + Element cplxin ftype=complex(c_double_complex) pytype=complex + + + Defined at global.fpp line 2279 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_fftw_interface__array__cplxin(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + cplxin = self._arrays[array_handle] + else: + cplxin = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_fftw_interface__array__cplxin) + self._arrays[array_handle] = cplxin + return cplxin + + @cplxin.setter + def cplxin(self, cplxin): + self.cplxin[...] = cplxin + + @property + def cplxout(self): + """ + Element cplxout ftype=complex(c_double_complex) pytype=complex + + + Defined at global.fpp line 2279 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_fftw_interface__array__cplxout(f90wrap.runtime.empty_handle) + if array_handle in self._arrays: + cplxout = self._arrays[array_handle] + else: + cplxout = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + f90wrap.runtime.empty_handle, + _spec.f90wrap_fftw_interface__array__cplxout) + self._arrays[array_handle] = cplxout + return cplxout + + @cplxout.setter + def cplxout(self, cplxout): + self.cplxout[...] = cplxout + + def __str__(self): + ret = ['{\n'] + ret.append(' cplxin : ') + ret.append(repr(self.cplxin)) + ret.append(',\n cplxout : ') + ret.append(repr(self.cplxout)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +fftw_interface = Fftw_Interface() + +class Intghs_Module(f90wrap.runtime.FortranModule): + """ + Module intghs_module + + + Defined at intghs.fpp lines 40-49 + + """ + @f90wrap.runtime.register_class("spec.intghs_workspace") + class intghs_workspace(f90wrap.runtime.FortranDerivedType): + """ + Type(name=intghs_workspace) + + + Defined at intghs.fpp lines 41-47 + + """ + def __init__(self, handle=None): + """ + self = Intghs_Workspace() + + + Defined at intghs.fpp lines 41-47 + + + Returns + ------- + this : Intghs_Workspace + Object to be constructed + + + Automatically generated constructor for intghs_workspace + """ + f90wrap.runtime.FortranDerivedType.__init__(self) + result = _spec.f90wrap_intghs_workspace_initialise() + self._handle = result[0] if isinstance(result, tuple) else result + + def __del__(self): + """ + Destructor for class Intghs_Workspace + + + Defined at intghs.fpp lines 41-47 + + Parameters + ---------- + this : Intghs_Workspace + Object to be destructed + + + Automatically generated destructor for intghs_workspace + """ + if self._alloc: + _spec.f90wrap_intghs_workspace_finalise(this=self._handle) + + @property + def efmn(self): + """ + Element efmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 42 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__efmn(self._handle) + if array_handle in self._arrays: + efmn = self._arrays[array_handle] + else: + efmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__efmn) + self._arrays[array_handle] = efmn + return efmn + + @efmn.setter + def efmn(self, efmn): + self.efmn[...] = efmn + + @property + def ofmn(self): + """ + Element ofmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 42 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__ofmn(self._handle) + if array_handle in self._arrays: + ofmn = self._arrays[array_handle] + else: + ofmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__ofmn) + self._arrays[array_handle] = ofmn + return ofmn + + @ofmn.setter + def ofmn(self, ofmn): + self.ofmn[...] = ofmn + + @property + def cfmn(self): + """ + Element cfmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 42 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__cfmn(self._handle) + if array_handle in self._arrays: + cfmn = self._arrays[array_handle] + else: + cfmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__cfmn) + self._arrays[array_handle] = cfmn + return cfmn + + @cfmn.setter + def cfmn(self, cfmn): + self.cfmn[...] = cfmn + + @property + def sfmn(self): + """ + Element sfmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 42 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__sfmn(self._handle) + if array_handle in self._arrays: + sfmn = self._arrays[array_handle] + else: + sfmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__sfmn) + self._arrays[array_handle] = sfmn + return sfmn + + @sfmn.setter + def sfmn(self, sfmn): + self.sfmn[...] = sfmn + + @property + def evmn(self): + """ + Element evmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 43 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__evmn(self._handle) + if array_handle in self._arrays: + evmn = self._arrays[array_handle] + else: + evmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__evmn) + self._arrays[array_handle] = evmn + return evmn + + @evmn.setter + def evmn(self, evmn): + self.evmn[...] = evmn + + @property + def odmn(self): + """ + Element odmn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 43 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__odmn(self._handle) + if array_handle in self._arrays: + odmn = self._arrays[array_handle] + else: + odmn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__odmn) + self._arrays[array_handle] = odmn + return odmn + + @odmn.setter + def odmn(self, odmn): + self.odmn[...] = odmn + + @property + def ijreal(self): + """ + Element ijreal ftype=real(8) pytype=float + + + Defined at intghs.fpp line 44 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__ijreal(self._handle) + if array_handle in self._arrays: + ijreal = self._arrays[array_handle] + else: + ijreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__ijreal) + self._arrays[array_handle] = ijreal + return ijreal + + @ijreal.setter + def ijreal(self, ijreal): + self.ijreal[...] = ijreal + + @property + def jireal(self): + """ + Element jireal ftype=real(8) pytype=float + + + Defined at intghs.fpp line 44 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__jireal(self._handle) + if array_handle in self._arrays: + jireal = self._arrays[array_handle] + else: + jireal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__jireal) + self._arrays[array_handle] = jireal + return jireal + + @jireal.setter + def jireal(self, jireal): + self.jireal[...] = jireal + + @property + def jkreal(self): + """ + Element jkreal ftype=real(8) pytype=float + + + Defined at intghs.fpp line 44 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__jkreal(self._handle) + if array_handle in self._arrays: + jkreal = self._arrays[array_handle] + else: + jkreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__jkreal) + self._arrays[array_handle] = jkreal + return jkreal + + @jkreal.setter + def jkreal(self, jkreal): + self.jkreal[...] = jkreal + + @property + def kjreal(self): + """ + Element kjreal ftype=real(8) pytype=float + + + Defined at intghs.fpp line 44 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__kjreal(self._handle) + if array_handle in self._arrays: + kjreal = self._arrays[array_handle] + else: + kjreal = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__kjreal) + self._arrays[array_handle] = kjreal + return kjreal + + @kjreal.setter + def kjreal(self, kjreal): + self.kjreal[...] = kjreal + + @property + def bloweremn(self): + """ + Element bloweremn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 45 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__bloweremn(self._handle) + if array_handle in self._arrays: + bloweremn = self._arrays[array_handle] + else: + bloweremn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__bloweremn) + self._arrays[array_handle] = bloweremn + return bloweremn + + @bloweremn.setter + def bloweremn(self, bloweremn): + self.bloweremn[...] = bloweremn + + @property + def bloweromn(self): + """ + Element bloweromn ftype=real(8) pytype=float + + + Defined at intghs.fpp line 45 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__bloweromn(self._handle) + if array_handle in self._arrays: + bloweromn = self._arrays[array_handle] + else: + bloweromn = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__bloweromn) + self._arrays[array_handle] = bloweromn + return bloweromn + + @bloweromn.setter + def bloweromn(self, bloweromn): + self.bloweromn[...] = bloweromn + + @property + def gbupper(self): + """ + Element gbupper ftype=real(8) pytype=float + + + Defined at intghs.fpp line 46 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__gbupper(self._handle) + if array_handle in self._arrays: + gbupper = self._arrays[array_handle] + else: + gbupper = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__gbupper) + self._arrays[array_handle] = gbupper + return gbupper + + @gbupper.setter + def gbupper(self, gbupper): + self.gbupper[...] = gbupper + + @property + def blower(self): + """ + Element blower ftype=real(8) pytype=float + + + Defined at intghs.fpp line 46 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__blower(self._handle) + if array_handle in self._arrays: + blower = self._arrays[array_handle] + else: + blower = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__blower) + self._arrays[array_handle] = blower + return blower + + @blower.setter + def blower(self, blower): + self.blower[...] = blower + + @property + def basis(self): + """ + Element basis ftype=real(8) pytype=float + + + Defined at intghs.fpp line 47 + + """ + array_ndim, array_type, array_shape, array_handle = \ + _spec.f90wrap_intghs_workspace__array__basis(self._handle) + if array_handle in self._arrays: + basis = self._arrays[array_handle] + else: + basis = f90wrap.runtime.get_array(f90wrap.runtime.sizeof_fortran_t, + self._handle, + _spec.f90wrap_intghs_workspace__array__basis) + self._arrays[array_handle] = basis + return basis + + @basis.setter + def basis(self, basis): + self.basis[...] = basis + + def __str__(self): + ret = ['{\n'] + ret.append(' efmn : ') + ret.append(repr(self.efmn)) + ret.append(',\n ofmn : ') + ret.append(repr(self.ofmn)) + ret.append(',\n cfmn : ') + ret.append(repr(self.cfmn)) + ret.append(',\n sfmn : ') + ret.append(repr(self.sfmn)) + ret.append(',\n evmn : ') + ret.append(repr(self.evmn)) + ret.append(',\n odmn : ') + ret.append(repr(self.odmn)) + ret.append(',\n ijreal : ') + ret.append(repr(self.ijreal)) + ret.append(',\n jireal : ') + ret.append(repr(self.jireal)) + ret.append(',\n jkreal : ') + ret.append(repr(self.jkreal)) + ret.append(',\n kjreal : ') + ret.append(repr(self.kjreal)) + ret.append(',\n bloweremn : ') + ret.append(repr(self.bloweremn)) + ret.append(',\n bloweromn : ') + ret.append(repr(self.bloweromn)) + ret.append(',\n gbupper : ') + ret.append(repr(self.gbupper)) + ret.append(',\n blower : ') + ret.append(repr(self.blower)) + ret.append(',\n basis : ') + ret.append(repr(self.basis)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + + _dt_array_initialisers = [] + + +intghs_module = Intghs_Module() + +class Newtontime(f90wrap.runtime.FortranModule): + """ + Module newtontime + + + Defined at newton.fpp lines 38-40 + + """ + @property + def nfcalls(self): + """ + Element nfcalls ftype=integer pytype=int + + + Defined at newton.fpp line 39 + + """ + return _spec.f90wrap_newtontime__get__nfcalls() + + @nfcalls.setter + def nfcalls(self, nfcalls): + _spec.f90wrap_newtontime__set__nfcalls(nfcalls) + + @property + def ndcalls(self): + """ + Element ndcalls ftype=integer pytype=int + + + Defined at newton.fpp line 39 + + """ + return _spec.f90wrap_newtontime__get__ndcalls() + + @ndcalls.setter + def ndcalls(self, ndcalls): + _spec.f90wrap_newtontime__set__ndcalls(ndcalls) + + @property + def lastcpu(self): + """ + Element lastcpu ftype=real(8) pytype=float + + + Defined at newton.fpp line 40 + + """ + return _spec.f90wrap_newtontime__get__lastcpu() + + @lastcpu.setter + def lastcpu(self, lastcpu): + _spec.f90wrap_newtontime__set__lastcpu(lastcpu) + + def __str__(self): + ret = ['{\n'] + ret.append(' nfcalls : ') + ret.append(repr(self.nfcalls)) + ret.append(',\n ndcalls : ') + ret.append(repr(self.ndcalls)) + ret.append(',\n lastcpu : ') + ret.append(repr(self.lastcpu)) + ret.append('}') + return ''.join(ret) + + _dt_array_initialisers = [] + + +newtontime = Newtontime() + +def preset(): + """ + preset() + + + Defined at preset.fpp lines 16-2667 + + + """ + _spec.f90wrap_preset() + +def manual(): + """ + manual() + + + Defined at manual.fpp lines 198-230 + + + """ + _spec.f90wrap_manual() + +def rzaxis(mvol, mn, inrbc, inzbs, inrbs, inzbc, ivol, lcomputederivatives): + """ + rzaxis(mvol, mn, inrbc, inzbs, inrbs, inzbc, ivol, lcomputederivatives) + + + Defined at rzaxis.fpp lines 60-696 + + Parameters + ---------- + mvol : int + mn : int + inrbc : float array + inzbs : float array + inrbs : float array + inzbc : float array + ivol : int + lcomputederivatives : bool + + """ + _spec.f90wrap_rzaxis(mvol=mvol, mn=mn, inrbc=inrbc, inzbs=inzbs, inrbs=inrbs, \ + inzbc=inzbc, ivol=ivol, lcomputederivatives=lcomputederivatives) + +def packxi(ngdof, position, mvol, mn, irbc, izbs, irbs, izbc, packorunpack, \ + lcomputederivatives, lcomputeaxis): + """ + packxi(ngdof, position, mvol, mn, irbc, izbs, irbs, izbc, packorunpack, \ + lcomputederivatives, lcomputeaxis) + + + Defined at packxi.fpp lines 66-165 + + Parameters + ---------- + ngdof : int + position : float array + mvol : int + mn : int + irbc : float array + izbs : float array + irbs : float array + izbc : float array + packorunpack : str + lcomputederivatives : bool + lcomputeaxis : bool + + """ + _spec.f90wrap_packxi(ngdof=ngdof, position=position, mvol=mvol, mn=mn, \ + irbc=irbc, izbs=izbs, irbs=irbs, izbc=izbc, packorunpack=packorunpack, \ + lcomputederivatives=lcomputederivatives, lcomputeaxis=lcomputeaxis) + +def volume(lvol, vflag): + """ + volume(lvol, vflag) + + + Defined at volume.fpp lines 31-239 + + Parameters + ---------- + lvol : int + vflag : int + + """ + _spec.f90wrap_volume(lvol=lvol, vflag=vflag) + +def coords(lvol, lss, lcurvature, ntz, mn): + """ + coords(lvol, lss, lcurvature, ntz, mn) + + + Defined at coords.fpp lines 115-552 + + Parameters + ---------- + lvol : int + lss : float + lcurvature : int + ntz : int + mn : int + + """ + _spec.f90wrap_coords(lvol=lvol, lss=lss, lcurvature=lcurvature, ntz=ntz, mn=mn) + +def get_cheby(lss, lrad, cheby): + """ + get_cheby(lss, lrad, cheby) + + + Defined at basefn.fpp lines 9-35 + + Parameters + ---------- + lss : float + lrad : int + cheby : float array + + """ + _spec.f90wrap_get_cheby(lss=lss, lrad=lrad, cheby=cheby) + +def get_cheby_d2(lss, lrad, cheby): + """ + get_cheby_d2(lss, lrad, cheby) + + + Defined at basefn.fpp lines 37-65 + + Parameters + ---------- + lss : float + lrad : int + cheby : float array + + """ + _spec.f90wrap_get_cheby_d2(lss=lss, lrad=lrad, cheby=cheby) + +def get_zernike(r, lrad, mpol, zernike): + """ + get_zernike(r, lrad, mpol, zernike) + + + Defined at basefn.fpp lines 67-119 + + Parameters + ---------- + r : float + lrad : int + mpol : int + zernike : float array + + """ + _spec.f90wrap_get_zernike(r=r, lrad=lrad, mpol=mpol, zernike=zernike) + +def get_zernike_d2(r, lrad, mpol, zernike): + """ + get_zernike_d2(r, lrad, mpol, zernike) + + + Defined at basefn.fpp lines 121-181 + + Parameters + ---------- + r : float + lrad : int + mpol : int + zernike : float array + + """ + _spec.f90wrap_get_zernike_d2(r=r, lrad=lrad, mpol=mpol, zernike=zernike) + +def get_zernike_rm(r, lrad, mpol, zernike): + """ + get_zernike_rm(r, lrad, mpol, zernike) + + + Defined at basefn.fpp lines 183-226 + + Parameters + ---------- + r : float + lrad : int + mpol : int + zernike : float array + + """ + _spec.f90wrap_get_zernike_rm(r=r, lrad=lrad, mpol=mpol, zernike=zernike) + +def allocate_beltrami_matrices(vvol, lcomputederivatives): + """ + allocate_beltrami_matrices(vvol, lcomputederivatives) + + + Defined at memory.fpp lines 13-126 + + Parameters + ---------- + vvol : int + lcomputederivatives : bool + + """ + _spec.f90wrap_allocate_beltrami_matrices(vvol=vvol, \ + lcomputederivatives=lcomputederivatives) + +def deallocate_beltrami_matrices(lcomputederivatives): + """ + deallocate_beltrami_matrices(lcomputederivatives) + + + Defined at memory.fpp lines 129-218 + + Parameters + ---------- + lcomputederivatives : bool + + """ + \ + _spec.f90wrap_deallocate_beltrami_matrices(lcomputederivatives=lcomputederivatives) + +def allocate_geometry_matrices(vvol, lcomputederivatives): + """ + allocate_geometry_matrices(vvol, lcomputederivatives) + + + Defined at memory.fpp lines 221-587 + + Parameters + ---------- + vvol : int + lcomputederivatives : bool + + """ + _spec.f90wrap_allocate_geometry_matrices(vvol=vvol, \ + lcomputederivatives=lcomputederivatives) + +def deallocate_geometry_matrices(lcomputederivatives): + """ + deallocate_geometry_matrices(lcomputederivatives) + + + Defined at memory.fpp lines 590-853 + + Parameters + ---------- + lcomputederivatives : bool + + """ + \ + _spec.f90wrap_deallocate_geometry_matrices(lcomputederivatives=lcomputederivatives) + +def metrix(lquad, lvol): + """ + metrix(lquad, lvol) + + + Defined at metrix.fpp lines 35-110 + + Parameters + ---------- + lquad : int + lvol : int + + """ + _spec.f90wrap_metrix(lquad=lquad, lvol=lvol) + +def compute_guvijsave(lquad, vvol, ideriv, lcurvature): + """ + compute_guvijsave(lquad, vvol, ideriv, lcurvature) + + + Defined at metrix.fpp lines 113-129 + + Parameters + ---------- + lquad : int + vvol : int + ideriv : int + lcurvature : int + + """ + _spec.f90wrap_compute_guvijsave(lquad=lquad, vvol=vvol, ideriv=ideriv, \ + lcurvature=lcurvature) + +def ma00aa(lquad, mn, lvol, lrad): + """ + ma00aa(lquad, mn, lvol, lrad) + + + Defined at ma00aa.fpp lines 40-317 + + Parameters + ---------- + lquad : int + mn : int + lvol : int + lrad : int + + """ + _spec.f90wrap_ma00aa(lquad=lquad, mn=mn, lvol=lvol, lrad=lrad) + +def matrix(lvol, mn, lrad): + """ + matrix(lvol, mn, lrad) + + + Defined at matrix.fpp lines 232-499 + + Parameters + ---------- + lvol : int + mn : int + lrad : int + + """ + _spec.f90wrap_matrix(lvol=lvol, mn=mn, lrad=lrad) + +def matrixbg(lvol, mn, lrad): + """ + matrixbg(lvol, mn, lrad) + + + Defined at matrix.fpp lines 502-533 + + Parameters + ---------- + lvol : int + mn : int + lrad : int + + """ + _spec.f90wrap_matrixbg(lvol=lvol, mn=mn, lrad=lrad) + +def spsmat(lvol, mn, lrad): + """ + spsmat(lvol, mn, lrad) + + + Defined at spsmat.fpp lines 12-421 + + Parameters + ---------- + lvol : int + mn : int + lrad : int + + """ + _spec.f90wrap_spsmat(lvol=lvol, mn=mn, lrad=lrad) + +def push_back(iq, nq, nn, va, vd, vja, qa, qd, qja): + """ + push_back(iq, nq, nn, va, vd, vja, qa, qd, qja) + + + Defined at spsmat.fpp lines 425-446 + + Parameters + ---------- + iq : int + nq : int array + nn : int + va : float + vd : float + vja : int + qa : float array + qd : float array + qja : int array + + """ + _spec.f90wrap_push_back(iq=iq, nq=nq, nn=nn, va=va, vd=vd, vja=vja, qa=qa, \ + qd=qd, qja=qja) + +def clean_queue(nq, nn, qa, qd, qja): + """ + clean_queue(nq, nn, qa, qd, qja) + + + Defined at spsmat.fpp lines 448-459 + + Parameters + ---------- + nq : int array + nn : int + qa : float array + qd : float array + qja : int array + + """ + _spec.f90wrap_clean_queue(nq=nq, nn=nn, qa=qa, qd=qd, qja=qja) + +def addline(nq, nn, qa, qd, qja, ns, nrow, dmas, dmds, jdmas, idmas): + """ + addline(nq, nn, qa, qd, qja, ns, nrow, dmas, dmds, jdmas, idmas) + + + Defined at spsmat.fpp lines 461-477 + + Parameters + ---------- + nq : int array + nn : int + qa : float array + qd : float array + qja : int array + ns : int + nrow : int + dmas : float array + dmds : float array + jdmas : int array + idmas : int array + + """ + _spec.f90wrap_addline(nq=nq, nn=nn, qa=qa, qd=qd, qja=qja, ns=ns, nrow=nrow, \ + dmas=dmas, dmds=dmds, jdmas=jdmas, idmas=idmas) + +def spsint(lquad, mn, lvol, lrad): + """ + spsint(lquad, mn, lvol, lrad) + + + Defined at spsint.fpp lines 12-220 + + Parameters + ---------- + lquad : int + mn : int + lvol : int + lrad : int + + """ + _spec.f90wrap_spsint(lquad=lquad, mn=mn, lvol=lvol, lrad=lrad) + +def mp00ac(ndof, xdof, fdof, ddof, ldfjac, iflag): + """ + mp00ac(ndof, xdof, fdof, ddof, ldfjac, iflag) + + + Defined at mp00ac.fpp lines 89-811 + + Parameters + ---------- + ndof : int + xdof : float array + fdof : float array + ddof : float array + ldfjac : int + iflag : int + + """ + _spec.f90wrap_mp00ac(ndof=ndof, xdof=xdof, fdof=fdof, ddof=ddof, ldfjac=ldfjac, \ + iflag=iflag) + +def rungmres(n, nrestart, mu, vvol, rhs, sol, ipar, fpar, wk, nw, guess, a, au, \ + jau, ju, iperm, ierr): + """ + rungmres(n, nrestart, mu, vvol, rhs, sol, ipar, fpar, wk, nw, guess, a, au, jau, \ + ju, iperm, ierr) + + + Defined at mp00ac.fpp lines 814-868 + + Parameters + ---------- + n : int + nrestart : int + mu : float + vvol : int + rhs : float array + sol : float array + ipar : int array + fpar : float array + wk : float array + nw : int + guess : float array + a : float array + au : float array + jau : int array + ju : int array + iperm : int array + ierr : int + + """ + _spec.f90wrap_rungmres(n=n, nrestart=nrestart, mu=mu, vvol=vvol, rhs=rhs, \ + sol=sol, ipar=ipar, fpar=fpar, wk=wk, nw=nw, guess=guess, a=a, au=au, \ + jau=jau, ju=ju, iperm=iperm, ierr=ierr) + +def matvec(n, x, ax, a, mu, vvol): + """ + matvec(n, x, ax, a, mu, vvol) + + + Defined at mp00ac.fpp lines 870-893 + + Parameters + ---------- + n : int + x : float array + ax : float array + a : float array + mu : float + vvol : int + + """ + _spec.f90wrap_matvec(n=n, x=x, ax=ax, a=a, mu=mu, vvol=vvol) + +def prec_solve(n, vecin, vecout, au, jau, ju, iperm): + """ + prec_solve(n, vecin, vecout, au, jau, ju, iperm) + + + Defined at mp00ac.fpp lines 895-907 + + Parameters + ---------- + n : int + vecin : float array + vecout : float array + au : float array + jau : int array + ju : int array + iperm : int array + + """ + _spec.f90wrap_prec_solve(n=n, vecin=vecin, vecout=vecout, au=au, jau=jau, ju=ju, \ + iperm=iperm) + +def ma02aa(lvol, nn): + """ + ma02aa(lvol, nn) + + + Defined at ma02aa.fpp lines 16-673 + + Parameters + ---------- + lvol : int + nn : int + + """ + _spec.f90wrap_ma02aa(lvol=lvol, nn=nn) + +def packab(packorunpack, lvol, nn, solution, ideriv): + """ + packab(packorunpack, lvol, nn, solution, ideriv) + + + Defined at packab.fpp lines 28-197 + + Parameters + ---------- + packorunpack : str + lvol : int + nn : int + solution : float array + ideriv : int + + """ + _spec.f90wrap_packab(packorunpack=packorunpack, lvol=lvol, nn=nn, \ + solution=solution, ideriv=ideriv) + +def tr00ab(lvol, mn, nn, nt, nz, iflag, ldiota): + """ + tr00ab(lvol, mn, nn, nt, nz, iflag, ldiota) + + + Defined at tr00ab.fpp lines 58-503 + + Parameters + ---------- + lvol : int + mn : int + nn : int + nt : int + nz : int + iflag : int + ldiota : float array + + """ + _spec.f90wrap_tr00ab(lvol=lvol, mn=mn, nn=nn, nt=nt, nz=nz, iflag=iflag, \ + ldiota=ldiota) + +def curent(lvol, mn, nt, nz, iflag, lditgp): + """ + curent(lvol, mn, nt, nz, iflag, lditgp) + + + Defined at curent.fpp lines 54-163 + + Parameters + ---------- + lvol : int + mn : int + nt : int + nz : int + iflag : int + lditgp : float array + + """ + _spec.f90wrap_curent(lvol=lvol, mn=mn, nt=nt, nz=nz, iflag=iflag, lditgp=lditgp) + +def df00ab(pnn, xi, fxi, dfxi, ldfjac, iflag): + """ + df00ab(pnn, xi, fxi, dfxi, ldfjac, iflag) + + + Defined at df00ab.fpp lines 16-82 + + Parameters + ---------- + pnn : int + xi : float array + fxi : float array + dfxi : float array + ldfjac : int + iflag : int + + """ + _spec.f90wrap_df00ab(pnn=pnn, xi=xi, fxi=fxi, dfxi=dfxi, ldfjac=ldfjac, \ + iflag=iflag) + +def lforce(lvol, iocons, ideriv, ntz, dbb, xx, yy, length, ddl, mml, iflag): + """ + lforce(lvol, iocons, ideriv, ntz, dbb, xx, yy, length, ddl, mml, iflag) + + + Defined at lforce.fpp lines 125-295 + + Parameters + ---------- + lvol : int + iocons : int + ideriv : int + ntz : int + dbb : float array + xx : float array + yy : float array + length : float array + ddl : float + mml : float + iflag : int + + """ + _spec.f90wrap_lforce(lvol=lvol, iocons=iocons, ideriv=ideriv, ntz=ntz, dbb=dbb, \ + xx=xx, yy=yy, length=length, ddl=ddl, mml=mml, iflag=iflag) + +def intghs(lquad, mn, lvol, lrad, idx): + """ + intghs(lquad, mn, lvol, lrad, idx) + + + Defined at intghs.fpp lines 52-252 + + Parameters + ---------- + lquad : int + mn : int + lvol : int + lrad : int + idx : int + + """ + _spec.f90wrap_intghs(lquad=lquad, mn=mn, lvol=lvol, lrad=lrad, idx=idx) + +def intghs_workspace_init(lvol): + """ + intghs_workspace_init(lvol) + + + Defined at intghs.fpp lines 255-404 + + Parameters + ---------- + lvol : int + + """ + _spec.f90wrap_intghs_workspace_init(lvol=lvol) + +def intghs_workspace_destroy(): + """ + intghs_workspace_destroy() + + + Defined at intghs.fpp lines 406-520 + + + """ + _spec.f90wrap_intghs_workspace_destroy() + +def mtrxhs(lvol, mn, lrad, resulta, resultd, idx): + """ + mtrxhs(lvol, mn, lrad, resulta, resultd, idx) + + + Defined at mtrxhs.fpp lines 12-204 + + Parameters + ---------- + lvol : int + mn : int + lrad : int + resulta : float array + resultd : float array + idx : int + + """ + _spec.f90wrap_mtrxhs(lvol=lvol, mn=mn, lrad=lrad, resulta=resulta, \ + resultd=resultd, idx=idx) + +def lbpol(lvol, bt00, ideriv, iocons): + """ + lbpol(lvol, bt00, ideriv, iocons) + + + Defined at lbpol.fpp lines 28-127 + + Parameters + ---------- + lvol : int + bt00 : float array + ideriv : int + iocons : int + + """ + _spec.f90wrap_lbpol(lvol=lvol, bt00=bt00, ideriv=ideriv, iocons=iocons) + +def brcast(lvol): + """ + brcast(lvol) + + + Defined at brcast.fpp lines 25-228 + + Parameters + ---------- + lvol : int + + """ + _spec.f90wrap_brcast(lvol=lvol) + +def dfp100(ndofgl, x, fvec, lcomputederivatives): + """ + dfp100(ndofgl, x, fvec, lcomputederivatives) + + + Defined at dfp100.fpp lines 31-311 + + Parameters + ---------- + ndofgl : int + x : float array + fvec : float array + lcomputederivatives : bool + + ------ + vvol: loop index on volumes + Ndofgl: Input parameter necessary for the use of hybrd1. Unused otherwise. + iflag: Flag changed by hybrd1 + cpu_send_one, cpu_send_two: CPU IDs, used for MPI communications + status: MPI status + Fvec: Global constraint values + x: Degrees of freedom of hybrd1. For now contains only the poloidal flux + """ + _spec.f90wrap_dfp100(ndofgl=ndofgl, x=x, fvec=fvec, \ + lcomputederivatives=lcomputederivatives) + +def dfp200(lcomputederivatives, vvol): + """ + dfp200(lcomputederivatives, vvol) + + + Defined at dfp200.fpp lines 38-781 + + Parameters + ---------- + lcomputederivatives : bool + vvol : int + + """ + _spec.f90wrap_dfp200(lcomputederivatives=lcomputederivatives, vvol=vvol) + +def get_lu_beltrami_matrices(vvol, obi, nn): + """ + get_lu_beltrami_matrices(vvol, obi, nn) + + + Defined at dfp200.fpp lines 786-891 + + Parameters + ---------- + vvol : int + obi : Matrixlu + nn : int + + """ + _spec.f90wrap_get_lu_beltrami_matrices(vvol=vvol, obi=obi._handle, nn=nn) + +def get_perturbed_solution(vvol, obi, nn): + """ + get_perturbed_solution(vvol, obi, nn) + + + Defined at dfp200.fpp lines 894-961 + + Parameters + ---------- + vvol : int + obi : Matrixlu + nn : int + + ------ + """ + _spec.f90wrap_get_perturbed_solution(vvol=vvol, obi=obi._handle, nn=nn) + +def evaluate_dmupfdx(innout, idof, ii, issym, irz): + """ + evaluate_dmupfdx(innout, idof, ii, issym, irz) + + + Defined at dfp200.fpp lines 964-1303 + + Parameters + ---------- + innout : int + idof : int + ii : int + issym : int + irz : int + + """ + _spec.f90wrap_evaluate_dmupfdx(innout=innout, idof=idof, ii=ii, issym=issym, \ + irz=irz) + +def evaluate_dbb(lvol, idof, innout, issym, irz, ii, dbb, xx, yy, length, drr, \ + dzz, dii, dll, dpp, ntz): + """ + evaluate_dbb(lvol, idof, innout, issym, irz, ii, dbb, xx, yy, length, drr, dzz, \ + dii, dll, dpp, ntz) + + + Defined at dfp200.fpp lines 1306-1615 + + Parameters + ---------- + lvol : int + idof : int + innout : int + issym : int + irz : int + ii : int + dbb : float array + xx : float array + yy : float array + length : float array + drr : float array + dzz : float array + dii : float array + dll : float array + dpp : float array + ntz : int + + ------ + """ + _spec.f90wrap_evaluate_dbb(lvol=lvol, idof=idof, innout=innout, issym=issym, \ + irz=irz, ii=ii, dbb=dbb, xx=xx, yy=yy, length=length, drr=drr, dzz=dzz, \ + dii=dii, dll=dll, dpp=dpp, ntz=ntz) + +def dforce(ngdof, position, force, lcomputederivatives, lcomputeaxis): + """ + dforce(ngdof, position, force, lcomputederivatives, lcomputeaxis) + + + Defined at dforce.fpp lines 80-656 + + Parameters + ---------- + ngdof : int + position : float array + force : float array + lcomputederivatives : bool + lcomputeaxis : bool + + """ + _spec.f90wrap_dforce(ngdof=ngdof, position=position, force=force, \ + lcomputederivatives=lcomputederivatives, lcomputeaxis=lcomputeaxis) + +def newton(ngdof, position): + """ + ihybrd = newton(ngdof, position) + + + Defined at newton.fpp lines 43-356 + + Parameters + ---------- + ngdof : int + position : float array + + Returns + ------- + ihybrd : int + + """ + ihybrd = _spec.f90wrap_newton(ngdof=ngdof, position=position) + return ihybrd + +def writereadgf(readorwrite, ngdof): + """ + ireadhessian = writereadgf(readorwrite, ngdof) + + + Defined at newton.fpp lines 360-485 + + Parameters + ---------- + readorwrite : str + ngdof : int + + Returns + ------- + ireadhessian : int + + """ + ireadhessian = _spec.f90wrap_writereadgf(readorwrite=readorwrite, ngdof=ngdof) + return ireadhessian + +def fcn1(ngdof, xx, fvec, irevcm): + """ + fcn1(ngdof, xx, fvec, irevcm) + + + Defined at newton.fpp lines 489-615 + + Parameters + ---------- + ngdof : int + xx : float array + fvec : float array + irevcm : int + + """ + _spec.f90wrap_fcn1(ngdof=ngdof, xx=xx, fvec=fvec, irevcm=irevcm) + +def fcn2(ngdof, xx, fvec, fjac, ldfjac, irevcm): + """ + fcn2(ngdof, xx, fvec, fjac, ldfjac, irevcm) + + + Defined at newton.fpp lines 619-782 + + Parameters + ---------- + ngdof : int + xx : float array + fvec : float array + fjac : float array + ldfjac : int + irevcm : int + + """ + _spec.f90wrap_fcn2(ngdof=ngdof, xx=xx, fvec=fvec, fjac=fjac, ldfjac=ldfjac, \ + irevcm=irevcm) + +def casing(teta, zeta, icasing): + """ + gbn = casing(teta, zeta, icasing) + + + Defined at casing.fpp lines 77-207 + + Parameters + ---------- + teta : float + zeta : float + icasing : int + + Returns + ------- + gbn : float + + """ + gbn = _spec.f90wrap_casing(teta=teta, zeta=zeta, icasing=icasing) + return gbn + +def dvcfield(ndim, tz, nfun, vcintegrand): + """ + dvcfield(ndim, tz, nfun, vcintegrand) + + + Defined at casing.fpp lines 234-442 + + Parameters + ---------- + ndim : int + tz : float array + nfun : int + vcintegrand : float array + + """ + _spec.f90wrap_dvcfield(ndim=ndim, tz=tz, nfun=nfun, vcintegrand=vcintegrand) + +def bnorml(mn, ntz, efmn, ofmn): + """ + bnorml(mn, ntz, efmn, ofmn) + + + Defined at bnorml.fpp lines 61-289 + + Parameters + ---------- + mn : int + ntz : int + efmn : float array + ofmn : float array + + """ + _spec.f90wrap_bnorml(mn=mn, ntz=ntz, efmn=efmn, ofmn=ofmn) + +def vcintegrand(lteta, lzeta): + """ + vcintegrand = vcintegrand(lteta, lzeta) + + + Defined at bnorml.fpp lines 370-559 + + Parameters + ---------- + lteta : float + lzeta : float + + Returns + ------- + vcintegrand : float + + """ + vcintegrand = _spec.f90wrap_vcintegrand(lteta=lteta, lzeta=lzeta) + return vcintegrand + +def zetalow(teta): + """ + zetalow = zetalow(teta) + + + Defined at bnorml.fpp lines 563-574 + + Parameters + ---------- + teta : float + + Returns + ------- + zetalow : float + + """ + zetalow = _spec.f90wrap_zetalow(teta=teta) + return zetalow + +def zetaupp(teta): + """ + zetaupp = zetaupp(teta) + + + Defined at bnorml.fpp lines 578-589 + + Parameters + ---------- + teta : float + + Returns + ------- + zetaupp : float + + """ + zetaupp = _spec.f90wrap_zetaupp(teta=teta) + return zetaupp + +def jo00aa(lvol, ntz, lquad, mn): + """ + jo00aa(lvol, ntz, lquad, mn) + + + Defined at jo00aa.fpp lines 46-374 + + Parameters + ---------- + lvol : int + ntz : int + lquad : int + mn : int + + """ + _spec.f90wrap_jo00aa(lvol=lvol, ntz=ntz, lquad=lquad, mn=mn) + +def pp00aa(): + """ + pp00aa() + + + Defined at pp00aa.fpp lines 69-306 + + + """ + _spec.f90wrap_pp00aa() + +def pp00ab(lvol, sti, nz, nppts, poincaredata, fittedtransform): + """ + utflag = pp00ab(lvol, sti, nz, nppts, poincaredata, fittedtransform) + + + Defined at pp00ab.fpp lines 33-159 + + Parameters + ---------- + lvol : int + sti : float array + nz : int + nppts : int + poincaredata : float array + fittedtransform : float array + + Returns + ------- + utflag : int + + """ + utflag = _spec.f90wrap_pp00ab(lvol=lvol, sti=sti, nz=nz, nppts=nppts, \ + poincaredata=poincaredata, fittedtransform=fittedtransform) + return utflag + +def bfield(zeta, st, bst): + """ + bfield(zeta, st, bst) + + + Defined at bfield.fpp lines 30-137 + + Parameters + ---------- + zeta : float + st : float array + bst : float array + + """ + _spec.f90wrap_bfield(zeta=zeta, st=st, bst=bst) + +def bfield_tangent(zeta, st, bst): + """ + bfield_tangent(zeta, st, bst) + + + Defined at bfield.fpp lines 140-277 + + Parameters + ---------- + zeta : float + st : float array + bst : float array + + """ + _spec.f90wrap_bfield_tangent(zeta=zeta, st=st, bst=bst) + +def stzxyz(lvol, stz, rpz): + """ + stzxyz(lvol, stz, rpz) + + + Defined at stzxyz.fpp lines 22-119 + + Parameters + ---------- + lvol : int + stz : float array + rpz : float array + + """ + _spec.f90wrap_stzxyz(lvol=lvol, stz=stz, rpz=rpz) + +def hesian(ngdof, position, mvol, mn, lgdof): + """ + hesian(ngdof, position, mvol, mn, lgdof) + + + Defined at hesian.fpp lines 17-556 + + Parameters + ---------- + ngdof : int + position : float array + mvol : int + mn : int + lgdof : int + + """ + _spec.f90wrap_hesian(ngdof=ngdof, position=position, mvol=mvol, mn=mn, \ + lgdof=lgdof) + +def ra00aa(writeorread): + """ + ra00aa(writeorread) + + + Defined at ra00aa.fpp lines 37-282 + + Parameters + ---------- + writeorread : str + + """ + _spec.f90wrap_ra00aa(writeorread=writeorread) + +def gi00ab(mpol, ntor, nfp, mn, im, in_): + """ + gi00ab(mpol, ntor, nfp, mn, im, in_) + + + Defined at numrec.fpp lines 48-64 + + Parameters + ---------- + mpol : int + ntor : int + nfp : int + mn : int + im : int array + in_ : int array + + """ + _spec.f90wrap_gi00ab(mpol=mpol, ntor=ntor, nfp=nfp, mn=mn, im=im, in_=in_) + +def getimn(mpol, ntor, nfp, mi, ni): + """ + idx = getimn(mpol, ntor, nfp, mi, ni) + + + Defined at numrec.fpp lines 67-78 + + Parameters + ---------- + mpol : int + ntor : int + nfp : int + mi : int + ni : int + + Returns + ------- + idx : int + + """ + idx = _spec.f90wrap_getimn(mpol=mpol, ntor=ntor, nfp=nfp, mi=mi, ni=ni) + return idx + +def tfft(nt, nz, ijreal, ijimag, mn, im, in_, efmn, ofmn, cfmn, sfmn, ifail): + """ + tfft(nt, nz, ijreal, ijimag, mn, im, in_, efmn, ofmn, cfmn, sfmn, ifail) + + + Defined at numrec.fpp lines 94-140 + + Parameters + ---------- + nt : int + nz : int + ijreal : float array + ijimag : float array + mn : int + im : int array + in_ : int array + efmn : float array + ofmn : float array + cfmn : float array + sfmn : float array + ifail : int + + """ + _spec.f90wrap_tfft(nt=nt, nz=nz, ijreal=ijreal, ijimag=ijimag, mn=mn, im=im, \ + in_=in_, efmn=efmn, ofmn=ofmn, cfmn=cfmn, sfmn=sfmn, ifail=ifail) + +def invfft(mn, im, in_, efmn, ofmn, cfmn, sfmn, nt, nz, ijreal, ijimag): + """ + invfft(mn, im, in_, efmn, ofmn, cfmn, sfmn, nt, nz, ijreal, ijimag) + + + Defined at numrec.fpp lines 148-176 + + Parameters + ---------- + mn : int + im : int array + in_ : int array + efmn : float array + ofmn : float array + cfmn : float array + sfmn : float array + nt : int + nz : int + ijreal : float array + ijimag : float array + + """ + _spec.f90wrap_invfft(mn=mn, im=im, in_=in_, efmn=efmn, ofmn=ofmn, cfmn=cfmn, \ + sfmn=sfmn, nt=nt, nz=nz, ijreal=ijreal, ijimag=ijimag) + +def gauleg(n, weight, abscis): + """ + ifail = gauleg(n, weight, abscis) + + + Defined at numrec.fpp lines 184-224 + + Parameters + ---------- + n : int + weight : float array + abscis : float array + + Returns + ------- + ifail : int + + """ + ifail = _spec.f90wrap_gauleg(n=n, weight=weight, abscis=abscis) + return ifail + +def read_command_args(): + """ + read_command_args() + + + Defined at xspech.fpp lines 150-206 + + + """ + _spec.f90wrap_read_command_args() + +def spec(): + """ + spec() + + + Defined at xspech.fpp lines 210-671 + + + """ + _spec.f90wrap_spec() + +def final_diagnostics(): + """ + final_diagnostics() + + + Defined at xspech.fpp lines 681-856 + + + """ + _spec.f90wrap_final_diagnostics() + +def ending(): + """ + ending() + + + Defined at xspech.fpp lines 860-1196 + + + """ + _spec.f90wrap_ending() + diff --git a/Utilities/python_wrapper/spec/__init__.py b/Utilities/python_wrapper/spec/__init__.py index 183e796b..8249f03e 100644 --- a/Utilities/python_wrapper/spec/__init__.py +++ b/Utilities/python_wrapper/spec/__init__.py @@ -7,4 +7,4 @@ if not path_to_spec_f90wrapped in sys.path: sys.path.append(path_to_spec_f90wrapped) -# import spec_f90wrapped as spec +#import spec_f90wrapped as spec diff --git a/setup.py b/setup.py index 8af1e4f2..ca2f8982 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,6 @@ # Include additional parameters from CMAKE_ARGS environment variable. # This is the way Anaconda tells CMake its specific needs. if 'CMAKE_ARGS' in os.environ: - print("CMAKE_ARGS = '%s'"%(os.environ['CMAKE_ARGS'])) for cmake_arg in os.environ['CMAKE_ARGS'].split(" "): d['cmake_args'].append(cmake_arg) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fcc9bfb3..fa094b83 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,85 +1,6 @@ - -# ATM the sources are added as it is. For some files, preprocessing is required. Use custom command to do preprocessing once it is understood. - -set(MACROS ${CMAKE_CURRENT_SOURCE_DIR}/macros) - -function(preprocess_m4 outvar) - message(STATUS "preprocess_fortran arguments: ${outvar}, followed by ${ARGN}") - set(srcs) - foreach(f ${ARGN}) - message(STATUS "Got file: ${f}") - # construct output filename - if(NOT IS_ABSOLUTE "${f}") - get_filename_component(f "${f}" ABSOLUTE) - endif() - file(RELATIVE_PATH r "${CMAKE_CURRENT_SOURCE_DIR}" "${f}") - get_filename_component(e "${r}" EXT) - get_filename_component(n "${r}" NAME_WE) - get_filename_component(p "${r}" PATH) - string(TOUPPER "${e}" e) - set(of "${CMAKE_CURRENT_BINARY_DIR}/${n}_m${e}") - #set(of "${n}_m${e}") - message(STATUS "Output name: ${of}") - # preprocess the thing - add_custom_command(OUTPUT ${of} - COMMAND m4 - ARGS -P ${MACROS} ${f} > ${of} - DEPENDS ${MACROS} ${f} - COMMENT "Preprocessing ${f}" - VERBATIM - ) - list(APPEND srcs "${of}") - endforeach() - # return the (preprocessed) sources - set(${outvar} "${srcs}" PARENT_SCOPE) -endfunction() - -set(unprocessed_src_files - manual.f90 - rzaxis.f90 - packxi.f90 - volume.f90 - coords.f90 - basefn.f90 - memory.f90 - metrix.f90 - ma00aa.f90 - matrix.f90 - spsmat.f90 - spsint.f90 - mp00ac.f90 - ma02aa.f90 - packab.f90 - tr00ab.f90 - curent.f90 - df00ab.f90 - lforce.f90 - intghs.f90 - mtrxhs.f90 - lbpol.f90 - brcast.f90 - dfp100.f90 - dfp200.f90 - dforce.f90 - newton.f90 - casing.f90 - bnorml.f90 - jo00aa.f90 - pp00aa.f90 - pp00ab.f90 - bfield.f90 - stzxyz.f90 - hesian.f90 - ra00aa.f90 - numrec.f90 - preset.f90 - #sphdf5.f90 - global.f90 - inputlist.f90 - #${CMAKE_CURRENT_SOURCE_DIR}/xspech.f90 -) # below assumes the .f files are double precision; the CFLAGS = -r8 option is not required; - -set(f_src_files +# below assumes the .f files are double precision +# the CFLAGS = -r8 option is not required +set(src_spec_contrib ${CMAKE_CURRENT_SOURCE_DIR}/dcuhre.f ${CMAKE_CURRENT_SOURCE_DIR}/minpack.f ${CMAKE_CURRENT_SOURCE_DIR}/iqpack.f @@ -90,134 +11,67 @@ set(f_src_files ${CMAKE_CURRENT_SOURCE_DIR}/iters.f ) -preprocess_m4(srcs ${unprocessed_src_files}) - -set(SPHDF5_FILE "${CMAKE_CURRENT_SOURCE_DIR}/sphdf5.f90") -set(SPHDF5_AWK_FILE "${CMAKE_CURRENT_SOURCE_DIR}/msphdf5.f90") -message(STATUS "sphdf5_FILE is ${SPHDF5_FILE}") -if(AWK MATCHES ".+-NOTFOUND") - message(FATAL_ERROR "FATAL: awk (and mawk and gawk) could not be found (${AWK}).") -else() - execute_process( - COMMAND /bin/sh -c "\"${AWK}\" -v file=sphdf5.f90 \ - '{ gsub(\"__LINE__\", NR); gsub(\"__FILE__\",file); print }' \ - \"${SPHDF5_FILE}\"" - RESULT_VARIABLE AWK_EXITCODE - OUTPUT_FILE "${SPHDF5_AWK_FILE}" - ) - message(STATUS "Exit code from awk: ${AWK_EXITCODE}") -endif() -preprocess_m4(sphdf5_src ${SPHDF5_AWK_FILE}) -message(STATUS "sphdf5_src is ${sphdf5_src}") - -#set(srcs) -#foreach(f ${unprocessed_src_files}) -# # is it a Fortran file? -# if(f MATCHES "\\.[Ff](9[05])?") -# message(STATUS "Got fortran file: ${f}") -# # construct output filename -# if(NOT IS_ABSOLUTE "${f}") -# get_filename_component(f "${f}" ABSOLUTE) -# endif() -# file(RELATIVE_PATH r "${CMAKE_CURRENT_SOURCE_DIR}" "${f}") -# get_filename_component(e "${r}" EXT) -# get_filename_component(n "${r}" NAME_WE) -# get_filename_component(p "${r}" PATH) -# #set(of1 "${n}_m${e}") -# set(of "${CMAKE_CURRENT_BINARY_DIR}/${n}${e}") -# #set(of "${n}_m${e}") -# message(STATUS "Output name: ${of}") -# # preprocess the thing -# add_custom_command(OUTPUT ${of} -# COMMAND m4 -# ARGS -P ${MACROS} ${f} > ${of} -# DEPENDS ${MACROS} ${f} -# COMMENT "Preprocessing ${f}" -# #VERBATIM -# ) -# list(APPEND srcs "${of}") -# endif() -#endforeach() - - -#set_source_files_properties(${m4_output_files} -#set_source_files_properties(${src} -# PROPERTIES GENERATED true) - -#add_custom_target(m4_process ALL - #DEPENDS ${m4_output_files} -# DEPENDS ${src} -#) -#add_dependencies(spec m4_process) - - -list(APPEND srcs "${sphdf5_src}") - -add_library(spec3p OBJECT ${f_src_files}) -target_compile_options(spec3p - PRIVATE - "-cpp" - $<$:-ffree-line-length-none> +add_library(spec_contrib OBJECT ${src_spec_contrib}) +target_compile_options(spec_contrib + PRIVATE + #"-cpp" + #$<$:-ffree-line-length-none> # $<$:-fdefault-real-8> $<$:-fbounds-check> - # $<$:-std=legacy> + $<$:-std=legacy> $<$:-fexternal-blas> # $<$:-r8> ) -set(ALLFILES manual rzaxis packxi volume coords basefn memory metrix ma00aa matrix spsmat spsint mp00ac ma02aa packab tr00ab curent df00ab lforce intghs mtrxhs lbpol brcast dfp100 dfp200 dforce newton casing bnorml jo00aa pp00aa pp00ab bfield stzxyz hesian ra00aa numrec dcuhre minpack iqpack rksuite i1mach d1mach ilut iters sphdf5 preset global xspech) -string(REPLACE ";" " " ALLFILES_STR "${ALLFILES}") - -# Build spec executable -set(XSPEC_FILE "${CMAKE_CURRENT_SOURCE_DIR}/xspech.f90") -set(XSPEC_AWK_FILE "${CMAKE_CURRENT_SOURCE_DIR}/mxspech.f90") - -if(AWK MATCHES ".+-NOTFOUND") - message(FATAL_ERROR "FATAL: awk (and mawk and gawk) could not be found (${AWK}).") -else() - execute_process( - COMMAND bash -c "date" - OUTPUT_VARIABLE DATE - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - execute_process( - COMMAND /bin/sh -c "\"${AWK}\" -v date=\"${DATE}\" -v pwd=\"$ENV{PWD}\" -v macros=\"${MACROS}\" \ - -v fc=\"${CMAKE_Fortran_COMPILER}\" -v flags=\"${COMP_DEFS}\" -v allfiles=\"${ALLFILES_STR}\" \ - 'BEGIN{nfiles=split(allfiles,files,\" \")} \ - {if($2==\"COMPILATION\") { \ - print \" write(ounit,*)\\\" : compiled : date = \"date\" ; \\\"\" ; \ - print \" write(ounit,*)\\\" : : srcdir = \"pwd\" ; \\\"\" ; \ - print \" write(ounit,*)\\\" : : macros = \"macros\" ; \\\"\" ; \ - print \" write(ounit,*)\\\" : : fc = \"fc\" ; \\\"\" ; \ - print \" write(ounit,*)\\\" : : flags = \"flags\" ; \\\"\" }} \ - {if($2==\"SUMTIME\") {for (i=1;i<=nfiles;i++) print \" SUMTIME(\"files[i]\")\"}}\ - {if($2==\"PRTTIME\") {for (i=1;i<=nfiles;i++) print \" PRTTIME(\"files[i]\")\"}}\ - {print}' \"${XSPEC_FILE}\"" - RESULT_VARIABLE AWK_EXITCODE - OUTPUT_FILE "${XSPEC_AWK_FILE}" - ) - message(STATUS "Exit code from awk: ${AWK_EXITCODE}") -endif() - -preprocess_m4(XSPEC_OUT_FILE ${XSPEC_AWK_FILE}) -message(STATUS "XSPEC_OUT_FILE is ${XSPEC_OUT_FILE}") - -list(APPEND srcs "${XSPEC_OUT_FILE}") -if(SKBUILD) - set(fortran_src_files "${srcs}" PARENT_SCOPE) -endif() -message(STATUS "srcs variable is ${srcs}") - - -add_library(spec - ${srcs} "$" +set(src_spec + ${CMAKE_CURRENT_SOURCE_DIR}/basefn.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/bfield.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/bnorml.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/brcast.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/casing.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/coords.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/curent.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/df00ab.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/dforce.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/dfp100.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/dfp200.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/global.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/h5utils.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/hesian.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/inputlist.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/intghs.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/jo00aa.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/lbpol.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/lforce.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/ma00aa.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/ma02aa.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/manual.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/matrix.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/memory.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/metrix.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/mod_kinds.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/mp00ac.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/mtrxhs.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/newton.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/numrec.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/packab.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/packxi.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/pp00aa.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/pp00ab.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/preset.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/ra00aa.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/rzaxis.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/sphdf5.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/spsint.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/spsmat.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/stzxyz.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/tr00ab.F90 + ${CMAKE_CURRENT_SOURCE_DIR}/volume.F90 ) - -#target_compile_options(spec PUBLIC "-cpp") -# For gfortran set the -ffree-line-length-none option +add_library(spec ${src_spec} "$") target_compile_options(spec - PUBLIC + PUBLIC "-cpp" $<$:-ffree-line-length-none> $<$:-fdefault-real-8> @@ -227,6 +81,11 @@ target_compile_options(spec $<$:-r8> ) +if(SKBUILD) + set(fortran_src_files "${src_spec}" PARENT_SCOPE) +endif() +message(STATUS "fortran_src_files variable is ${fortran_src_files}") + set_target_properties (spec PROPERTIES Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/spec_modules) target_include_directories(spec PUBLIC ${CMAKE_Fortran_MODULE_DIRECTORY}/spec_modules) @@ -239,8 +98,6 @@ endif() if (OpenMP_Fortran_FOUND AND NOT SKBUILD) target_link_libraries(spec PUBLIC OpenMP::OpenMP_Fortran) target_compile_definitions(spec PUBLIC OPENMP) - target_link_libraries(spec3p PUBLIC OpenMP::OpenMP_Fortran) - target_compile_definitions(spec3p PUBLIC OPENMP) endif() # Add threads @@ -260,12 +117,12 @@ target_include_directories(spec PUBLIC ${HDF5_C_INCLUDE_DIRS} ${HDF5_Fortran_INC # Add FFTW, LAPACK and BLAS libraries. # MKL could be used for all the three -target_link_libraries(spec - PUBLIC +target_link_libraries(spec + PUBLIC ${FFTW_LIBRARIES} ${LAPACK_LIBRARIES} ${BLAS_LIBRARIES} ) -target_include_directories(spec - PUBLIC +target_include_directories(spec + PUBLIC ${FFTW_INCLUDE_DIRS} ${LAPACK_INCLUDE_DIRS} ${BLAS_INCLUDE_DIRS} ) @@ -273,7 +130,7 @@ target_include_directories(spec #set_target_properties(spec PROPERTIES POSITION_INDEPENDENT_CODE ON) #endif() -get_property(COMP_DEFS +get_property(COMP_DEFS TARGET spec PROPERTY COMPILE_OPTIONS ) @@ -281,19 +138,21 @@ get_property(COMP_DEFS # Get the spec library linker properties and pass them to python wrapper get_target_property(SPEC_LINK_LIB spec LINK_LIBRARIES) message(STATUS "spec linked libraries are ${SPEC_LINK_LIB}") + get_target_property(SPEC_COMPILE_OPTIONS spec COMPILE_OPTIONS) message(STATUS "spec compile options are ${SPEC_COMPILE_OPTIONS}") + get_target_property(SPEC_COMPILE_DEFS spec COMPILE_DEFINITIONS) message(STATUS "spec compile definitions are ${SPEC_COMPILE_DEFS}") # export linker flags for spec to parent scope # for re-use when building python wrapper -if(SKBUILD) +if(SKBUILD) # get_target_property(SPEC_LINK_LIB spec LINK_LIBRARIES) # redundant from debug out above set(SPEC_LINK_LIB ${SPEC_LINK_LIB} PARENT_SCOPE) endif() -add_executable(xspec ${XSPEC_OUT_FILE}) +add_executable(xspec xspech.F90) target_link_libraries(xspec PUBLIC spec) set_target_properties(xspec PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/src/basefn.f90 b/src/basefn.F90 similarity index 89% rename from src/basefn.f90 rename to src/basefn.F90 index 375ff633..72ebecc1 100644 --- a/src/basefn.f90 +++ b/src/basefn.F90 @@ -20,14 +20,14 @@ !> @param[in] lrad radial resolution !> @param[out] cheby the value, first derivative of Chebyshev polynomial subroutine get_cheby(lss, lrad, cheby) - + use mod_kinds, only: wp => dp use constants, only : zero, one, two implicit none - REAL,intent(in) :: lss - INTEGER, intent(in) :: lrad - REAL, intent(inout) :: cheby(0:lrad,0:1) + real(wp),intent(in) :: lss + integer, intent(in) :: lrad + real(wp), intent(inout) :: cheby(0:lrad,0:1) integer :: ll @@ -57,14 +57,14 @@ end subroutine get_cheby !> @param[in] lrad radial resolution !> @param[out] cheby the value, first and second derivative of Chebyshev polynomial subroutine get_cheby_d2(lss, lrad, cheby) - + use mod_kinds, only: wp => dp use constants, only : zero, one, two implicit none - REAL,intent(in) :: lss - INTEGER, intent(in) :: lrad - REAL, intent(inout) :: cheby(0:lrad,0:2) + real(wp),intent(in) :: lss + integer, intent(in) :: lrad + real(wp), intent(inout) :: cheby(0:lrad,0:2) integer :: ll @@ -128,18 +128,18 @@ end subroutine get_cheby_d2 !> @param[in] mpol poloidal resolution !> @param[out] zernike the value, first derivative of Zernike polynomial subroutine get_zernike(r, lrad, mpol, zernike) - + use mod_kinds, only: wp => dp use constants, only : zero, one, two implicit none - REAL,intent(in) :: r - INTEGER, intent(in) :: lrad, mpol - REAL, intent(inout) :: zernike(0:lrad,0:mpol,0:1) + real(wp),intent(in) :: r + integer, intent(in) :: lrad, mpol + real(wp), intent(inout) :: zernike(0:lrad,0:mpol,0:1) - REAL :: rm, rm1 ! r to the power of m'th and m-1'th - REAL :: factor1, factor2, factor3, factor4 - INTEGER :: m, n ! Zernike R^m_n + real(wp) :: rm, rm1 ! r to the power of m'th and m-1'th + real(wp) :: factor1, factor2, factor3, factor4 + integer :: m, n ! Zernike R^m_n rm = one ! r to the power of m'th rm1 = zero ! r to the power of m-1'th @@ -196,18 +196,18 @@ end subroutine get_zernike !> @param[in] mpol poloidal resolution !> @param[out] zernike the value, first/second derivative of Zernike polynomial subroutine get_zernike_d2(r, lrad, mpol, zernike) - + use mod_kinds, only: wp => dp use constants, only : zero, one, two implicit none - REAL,intent(in) :: r - INTEGER, intent(in) :: lrad, mpol - REAL, intent(inout) :: zernike(0:lrad,0:mpol,0:2) + real(wp),intent(in) :: r + integer, intent(in) :: lrad, mpol + real(wp), intent(inout) :: zernike(0:lrad,0:mpol,0:2) - REAL :: rm, rm1, rm2 ! r to the power of m'th, m-1'th and m-2'th - REAL :: factor1, factor2, factor3, factor4 - INTEGER :: m, n ! Zernike R^m_n + real(wp) :: rm, rm1, rm2 ! r to the power of m'th, m-1'th and m-2'th + real(wp) :: factor1, factor2, factor3, factor4 + integer :: m, n ! Zernike R^m_n rm = one ! r to the power of m'th rm1 = zero ! r to the power of m-1'th @@ -270,17 +270,17 @@ end subroutine get_zernike_d2 !> @param[in] mpol poloidal resolution !> @param[out] zernike the value subroutine get_zernike_rm(r, lrad, mpol, zernike) - + use mod_kinds, only: wp => dp use constants, only : zero, one, two implicit none - REAL,intent(in) :: r - INTEGER, intent(in) :: lrad, mpol - REAL, intent(inout) :: zernike(0:lrad,0:mpol) + real(wp),intent(in) :: r + integer, intent(in) :: lrad, mpol + real(wp), intent(inout) :: zernike(0:lrad,0:mpol) - REAL :: factor1, factor2, factor3, factor4 - INTEGER :: m, n ! Zernike R^m_n + real(wp) :: factor1, factor2, factor3, factor4 + integer :: m, n ! Zernike R^m_n zernike(:,:) = zero do m = 0, mpol diff --git a/src/bfield.f90 b/src/bfield.F90 similarity index 83% rename from src/bfield.f90 rename to src/bfield.F90 index f0d000a5..99fd2f04 100644 --- a/src/bfield.f90 +++ b/src/bfield.F90 @@ -49,7 +49,7 @@ !> @param[in] st radial coordinate \f$s\f$ and poloidal angle \f$\theta\f$ !> @param[out] Bst tangential magnetic field directions \f$B_s, B_\theta\f$ subroutine bfield( zeta, st, Bst ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, half, two @@ -71,23 +71,45 @@ subroutine bfield( zeta, st, Bst ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - REAL, intent(in) :: zeta, st(1:Node) - REAL, intent(out) :: Bst(1:Node) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + - INTEGER :: lvol, ii, ll, mi, ni, ideriv - REAL :: teta, lss, sbar, sbarhm(0:1), arg, carg, sarg, dBu(1:3) - REAL :: cheby(0:Lrad(ivol),0:1), zernike(0:Lrad(1),0:Mpol,0:1) + real(wp), intent(in) :: zeta, st(1:Node) + real(wp), intent(out) :: Bst(1:Node) - REAL :: TT(0:Lrad(ivol),0:1) ! this is almost identical to cheby; 17 Dec 15; + integer :: lvol, ii, ll, mi, ni, ideriv + real(wp) :: teta, lss, sbar, sbarhm(0:1), arg, carg, sarg, dBu(1:3) + real(wp) :: cheby(0:Lrad(ivol),0:1), zernike(0:Lrad(1),0:Mpol,0:1) + + real(wp) :: TT(0:Lrad(ivol),0:1) ! this is almost identical to cheby; 17 Dec 15; + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(bfield) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( bfield, ivol.lt.1 .or. ivol.gt.Mvol, invalid ivol ) + + if( ivol.lt.1 .or. ivol.gt.Mvol ) then + write(6,'("bfield : fatal : myid=",i3," ; ivol.lt.1 .or. ivol.gt.Mvol ; invalid ivol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : ivol.lt.1 .or. ivol.gt.Mvol : invalid ivol ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -124,7 +146,13 @@ subroutine bfield( zeta, st, Bst ) if( Lcoordinatesingularity ) then ! regularization factor depends on mi; 17 Dec 15; - FATAL( bfield, abs(sbar).lt.vsmall, need to avoid divide-by-zero ) + + if( abs(sbar).lt.vsmall ) then + write(6,'("bfield : fatal : myid=",i3," ; abs(sbar).lt.vsmall ; need to avoid divide-by-zero ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : abs(sbar).lt.vsmall : need to avoid divide-by-zero ;" + endif + do ll = 0, Lrad(lvol) ; TT(ll,0:1) = (/ zernike(ll,mi,0), zernike(ll,mi,1)*half /) enddo @@ -161,12 +189,18 @@ subroutine bfield( zeta, st, Bst ) if( abs(gBzeta).lt.vsmall ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("bfield : ",f10.2," : lvol=",i3," ; zeta="es23.15" ; (s,t)=("es23.15" ,"es23.15" ) ; B^z="es23.15" ;")') & cput-cpus, lvol, zeta, st(1:2), dBu(3) - FATAL( bfield, abs(dBu(3)).lt.vsmall, field is not toroidal ) + + if( abs(dBu(3)).lt.vsmall ) then + write(6,'("bfield : fatal : myid=",i3," ; abs(dBu(3)).lt.vsmall ; field is not toroidal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : abs(dBu(3)).lt.vsmall : field is not toroidal ;" + endif + endif @@ -176,7 +210,12 @@ subroutine bfield( zeta, st, Bst ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(bfield) + +9999 continue + cput = MPI_WTIME() + Tbfield = Tbfield + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -188,7 +227,7 @@ end subroutine bfield !> @param[in] st radial(s) and poloidal(theta) positions !> @param[out] Bst tangential magnetic field subroutine bfield_tangent( zeta, st, Bst ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, half, two @@ -210,25 +249,47 @@ subroutine bfield_tangent( zeta, st, Bst ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - REAL, intent(in) :: zeta, st(1:6) - REAL, intent(out) :: Bst(1:6) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + - INTEGER :: lvol, ii, ll, mi, ni, ideriv - REAL :: teta, lss, sbar, sbarhm(0:1), arg, carg, sarg, dBu(1:3,0:2) - REAL :: cheby(0:Lrad(ivol),0:2), zernike(0:Lrad(1),0:Mpol,0:2) + real(wp), intent(in) :: zeta, st(1:6) + real(wp), intent(out) :: Bst(1:6) - REAL :: M(2,2), deltax(2,2) + integer :: lvol, ii, ll, mi, ni, ideriv + real(wp) :: teta, lss, sbar, sbarhm(0:1), arg, carg, sarg, dBu(1:3,0:2) + real(wp) :: cheby(0:Lrad(ivol),0:2), zernike(0:Lrad(1),0:Mpol,0:2) - REAL :: TT(0:Lrad(ivol),0:2) ! this is almost identical to cheby; 17 Dec 15; + real(wp) :: M(2,2), deltax(2,2) + + real(wp) :: TT(0:Lrad(ivol),0:2) ! this is almost identical to cheby; 17 Dec 15; + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(bfield) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( bfield, ivol.lt.1 .or. ivol.gt.Mvol, invalid ivol ) + + if( ivol.lt.1 .or. ivol.gt.Mvol ) then + write(6,'("bfield : fatal : myid=",i3," ; ivol.lt.1 .or. ivol.gt.Mvol ; invalid ivol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : ivol.lt.1 .or. ivol.gt.Mvol : invalid ivol ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -269,7 +330,13 @@ subroutine bfield_tangent( zeta, st, Bst ) if( Lcoordinatesingularity ) then ! regularization factor depends on mi; 17 Dec 15; - FATAL( bfield, abs(sbar).lt.vsmall, need to avoid divide-by-zero ) + + if( abs(sbar).lt.vsmall ) then + write(6,'("bfield : fatal : myid=",i3," ; abs(sbar).lt.vsmall ; need to avoid divide-by-zero ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : abs(sbar).lt.vsmall : need to avoid divide-by-zero ;" + endif + do ll = 0, Lrad(lvol) ; TT(ll,0:2) = (/ zernike(ll,mi,0), zernike(ll,mi,1)*half , zernike(ll,mi,2)*half*half /) enddo @@ -324,12 +391,18 @@ subroutine bfield_tangent( zeta, st, Bst ) if( abs(gBzeta).lt.vsmall ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("bfield : ",f10.2," : lvol=",i3," ; zeta="es23.15" ; (s,t)=("es23.15" ,"es23.15" ) ; B^z="es23.15" ;")') & cput-cpus, lvol, zeta, st(1:2), gBzeta - FATAL( bfield, abs(gBzeta).lt.vsmall, field is not toroidal ) + + if( abs(gBzeta).lt.vsmall ) then + write(6,'("bfield : fatal : myid=",i3," ; abs(gBzeta).lt.vsmall ; field is not toroidal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bfield : abs(gBzeta).lt.vsmall : field is not toroidal ;" + endif + endif @@ -351,7 +424,12 @@ subroutine bfield_tangent( zeta, st, Bst ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(bfield) + +9999 continue + cput = MPI_WTIME() + Tbfield = Tbfield + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/bnorml.f90 b/src/bnorml.F90 similarity index 86% rename from src/bnorml.f90 rename to src/bnorml.F90 index 4a47e8fb..ccc20da9 100644 --- a/src/bnorml.f90 +++ b/src/bnorml.F90 @@ -76,7 +76,7 @@ !> @param[out] efmn even Fourier coefficients !> @param[out] ofmn odd Fouier coefficients subroutine bnorml( mn, Ntz, efmn, ofmn ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi, pi2, ten @@ -100,19 +100,35 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: mn, Ntz - REAL , intent(out) :: efmn(1:mn), ofmn(1:mn) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: mn, Ntz + real(wp) , intent(out) :: efmn(1:mn), ofmn(1:mn) - INTEGER :: lvol, Lcurvature, Lparallel, ii, jj, kk, jk, ll, kkmodnp, jkmodnp, ifail, id01daf, nvccalls, icasing, ideriv - REAL :: lss, zeta, teta, cszeta(0:1), tetalow, tetaupp, absacc, gBn - REAL :: Jxyz(1:Ntz,1:3), Bxyz(1:Ntz,1:3), dAt(1:Ntz), dAz(1:Ntz), distance(1:Ntz) + integer :: lvol, Lcurvature, Lparallel, ii, jj, kk, jk, ll, kkmodnp, jkmodnp, ifail, id01daf, nvccalls, icasing, ideriv + real(wp) :: lss, zeta, teta, cszeta(0:1), tetalow, tetaupp, absacc, gBn + real(wp) :: Jxyz(1:Ntz,1:3), Bxyz(1:Ntz,1:3), dAt(1:Ntz), dAz(1:Ntz), distance(1:Ntz) !REAL :: vcintegrand, zetalow, zetaupp ! external :: vcintegrand, zetalow, zetaupp - BEGIN(bnorml) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -153,7 +169,13 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) case( 1 ) ! Lparallel = 1 ; 09 Mar 17; if( myid.ne.modulo(jk-1,ncpu) ) cycle ! 11 Oct 12; this is a weird parallelization, but perhaps better exploits all available cpus; case default ! Lparallel; 09 Mar 17; - FATAL( bnorml, .true., invalid Lparallel in parallelization loop ) + + if( .true. ) then + write(6,'("bnorml : fatal : myid=",i3," ; .true. ; invalid Lparallel in parallelization loop ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bnorml : .true. : invalid Lparallel in parallelization loop ;" + endif + end select ! end of select case( Lparallel ) ; 09 Mar 17; tetazeta(1:2) = (/ teta, zeta /) ! this is global; passed through to zetalow & zetaupp; 14 Apr 17; @@ -168,7 +190,12 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) ! !#endif - WCALL( bnorml, casing, ( teta, zeta, gBn, icasing ) ) ! tetazeta is global; 26 Apr 17; + + cput = MPI_WTIME() + Tbnorml = Tbnorml + ( cput-cpuo ) + call casing( teta, zeta, gBn, icasing ) + cpuo = MPI_WTIME() + ! tetazeta is global; 26 Apr 17; ijreal(jk) = gBn @@ -205,7 +232,9 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) case( 0 ) ! Lparallel = 0 ; 09 Mar 17; - RlBCAST(ijreal(1+kk*Nt:Nt+kk*Nt),Nt,kkmodnp) ! plasma; 03 Apr 13; + + call MPI_BCAST(ijreal(1+kk*Nt:Nt+kk*Nt),Nt,MPI_DOUBLE_PRECISION,kkmodnp,MPI_COMM_SPEC,ierr) + ! plasma; 03 Apr 13; !RlBCAST(ijimag(1+kk*Nt:Nt+kk*Nt),Nt,kkmodnp) !RlBCAST(jireal(1+kk*Nt:Nt+kk*Nt),Nt,kkmodnp) @@ -219,7 +248,9 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) jkmodnp = modulo(jk-1,ncpu) - RlBCAST(ijreal(jk),1,jkmodnp) ! plasma; 03 Apr 13; + + call MPI_BCAST(ijreal(jk),1,MPI_DOUBLE_PRECISION,jkmodnp,MPI_COMM_SPEC,ierr) + ! plasma; 03 Apr 13; !RlBCAST(ijimag(jk),1,jkmodnp) !RlBCAST(jireal(jk),1,jkmodnp) @@ -229,7 +260,13 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) case default ! Lparallel; 09 Mar 17; - FATAL( bnorml, .true., invalid Lparallel for broadcasting ) + + if( .true. ) then + write(6,'("bnorml : fatal : myid=",i3," ; .true. ; invalid Lparallel for broadcasting ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bnorml : .true. : invalid Lparallel for broadcasting ;" + endif + end select ! end of select case( Lparallel ) ; 09 Mar 17; @@ -245,7 +282,12 @@ subroutine bnorml( mn, Ntz, efmn, ofmn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(bnorml) + +9999 continue + cput = MPI_WTIME() + Tbnorml = Tbnorml + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/brcast.f90 b/src/brcast.F90 similarity index 61% rename from src/brcast.f90 rename to src/brcast.F90 index 0fc94027..1e364f07 100644 --- a/src/brcast.f90 +++ b/src/brcast.F90 @@ -16,7 +16,7 @@ !> !> @param[in] lvol index of nested volume subroutine brcast( lvol ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero @@ -46,13 +46,29 @@ subroutine brcast( lvol ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol + + integer :: llmodnp, io, iRZl, ii, ideriv, Nbc - INTEGER :: llmodnp, io, iRZl, ii, ideriv, Nbc - BEGIN(brcast) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -60,7 +76,13 @@ subroutine brcast( lvol ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - FATAL( brcast, lvol.le.0 .or. lvol.gt.Mvol, error ) + + if( lvol.le.0 .or. lvol.gt.Mvol ) then + write(6,'("brcast : fatal : myid=",i3," ; lvol.le.0 .or. lvol.gt.Mvol ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "brcast : lvol.le.0 .or. lvol.gt.Mvol : error ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -68,17 +90,35 @@ subroutine brcast( lvol ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RlBCAST( mu(lvol), 1, llmodnp ) - RlBCAST( dtflux(lvol), 1, llmodnp ) - RlBCAST( dpflux(lvol), 1, llmodnp ) - RlBCAST( helicity(lvol), 1, llmodnp) - RlBCAST( vvolume(lvol), 1, llmodnp ) - RlBCAST( lBBintegral(lvol), 1, llmodnp ) - RlBCAST( lABintegral(lvol), 1, llmodnp ) + call MPI_BCAST(mu(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(dtflux(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(dpflux(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(helicity(lvol),1,MPI_DOUBLE_PRECISION,llmodnp,MPI_COMM_SPEC,ierr) + + + + call MPI_BCAST(vvolume(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(lBBintegral(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(lABintegral(lvol),1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + + call MPI_BCAST(diotadxup(0:1,-1:2,lvol),8,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(dItGpdxtp(0:1,-1:2,lvol),8,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) - RlBCAST( diotadxup(0:1,-1:2,lvol), 8, llmodnp ) - RlBCAST( dItGpdxtp(0:1,-1:2,lvol), 8, llmodnp ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -87,14 +127,20 @@ subroutine brcast( lvol ) if( LocalConstraint ) then Nbc = LGdof* 2* LGdof* 2 - RlBCAST( dFFdRZ(1:LGdof,0:1,1:LGdof,0:1,lvol), Nbc, llmodnp ) - + + call MPI_BCAST(dFFdRZ(1:LGdof,0:1,1:LGdof,0:1,lvol),Nbc,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + Nbc = LGdof* 2* 2 - RlBCAST( dBBdmp(1:LGdof,lvol,0:1,1:2), Nbc, llmodnp ) + + call MPI_BCAST(dBBdmp(1:LGdof,lvol,0:1,1:2),Nbc,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + Nbc = 2* LGdof* 2 - RlBCAST( dmupfdx(lvol,1:1 ,1:2,1:LGdof,0:1), Nbc, llmodnp ) ! why is this broadcast; 02 Sep 14; + + call MPI_BCAST(dmupfdx(lvol,1:1 ,1:2,1:LGdof,0:1),Nbc,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + ! why is this broadcast; 02 Sep 14; endif @@ -103,13 +149,19 @@ subroutine brcast( lvol ) if (Lhessian3Dallocated) then Nbc = LGdof* 2* LGdof* 2 - RlBCAST(denergydrr(1:LGdof,lvol,0:1,1:LGdof,0:1), Nbc, llmodnp ) - RlBCAST(denergydzr(1:LGdof,lvol,0:1,1:LGdof,0:1), Nbc, llmodnp ) + + call MPI_BCAST(denergydrr(1:LGdof,lvol,0:1,1:LGdof,0:1),Nbc,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(denergydzr(1:LGdof,lvol,0:1,1:LGdof,0:1),Nbc,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + endif - + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LlBCAST( ImagneticOK(lvol), 1, llmodnp ) + + call MPI_BCAST(ImagneticOK(lvol),1,MPI_LOGICAL,llmodnp ,MPI_COMM_SPEC,ierr) + ! Commented - broadcasted in dfp200 ! do ideriv = 0, 2 @@ -121,10 +173,18 @@ subroutine brcast( lvol ) ! enddo - RlBCAST( Bemn(1:mn,lvol,0:1), 2*mn, llmodnp ) ! perhaps all these should be re-ordered; 18 Jul 14; - RlBCAST( Iomn(1:mn,lvol ), mn, llmodnp ) - RlBCAST( Somn(1:mn,lvol,0:1), 2*mn, llmodnp ) - RlBCAST( Pomn(1:mn,lvol,0:2), 3*mn, llmodnp ) ! 15 Sep 15; + + call MPI_BCAST(Bemn(1:mn,lvol,0:1),2*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + ! perhaps all these should be re-ordered; 18 Jul 14; + + call MPI_BCAST(Iomn(1:mn,lvol ),mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Somn(1:mn,lvol,0:1),2*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Pomn(1:mn,lvol,0:2),3*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + ! 15 Sep 15; if( NOTstellsym ) then ! do ideriv = 0, 2 @@ -134,10 +194,18 @@ subroutine brcast( lvol ) ! enddo ! enddo - RlBCAST( Bomn(1:mn,lvol,0:1), 2*mn, llmodnp ) - RlBCAST( Iemn(1:mn,lvol ), mn, llmodnp ) - RlBCAST( Semn(1:mn,lvol,0:1), 2*mn, llmodnp ) - RlBCAST( Pemn(1:mn,lvol,0:2), 3*mn, llmodnp ) + + call MPI_BCAST(Bomn(1:mn,lvol,0:1),2*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Iemn(1:mn,lvol ),mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Semn(1:mn,lvol,0:1),2*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Pemn(1:mn,lvol,0:2),3*mn,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + endif ! end of if( NOTstellsym) ; 11 Aug 14; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -145,12 +213,21 @@ subroutine brcast( lvol ) ! if( lvol.gt.Nvol .and. Lconstraint.eq.-1 .and. Wcurent ) then ! 27 Feb 17; if( lvol.gt.Nvol .and. Wcurent ) then ! 27 Feb 17; !write(ounit,'("brcast : " 10x " : myid="i3" ; broadcasting : curtor="es13.5" ; curpol="es13.5" ;")') myid, curtor, curpol - RlBCAST( curtor, 1, llmodnp ) - RlBCAST( curpol, 1, llmodnp ) + + call MPI_BCAST(curtor,1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(curpol,1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(brcast) + +9999 continue + cput = MPI_WTIME() + Tbrcast = Tbrcast + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/casing.f90 b/src/casing.F90 similarity index 89% rename from src/casing.f90 rename to src/casing.F90 index 99e00888..7af6a1a6 100644 --- a/src/casing.f90 +++ b/src/casing.F90 @@ -101,7 +101,7 @@ !> @param[out] gBn \f$ \sqrt g {\bf B} \cdot {\bf n}\f$ !> @param[out] icasing return flag from dcuhre() subroutine casing( teta, zeta, gBn, icasing ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, pi, pi2 @@ -118,22 +118,38 @@ subroutine casing( teta, zeta, gBn, icasing ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - REAL, intent(in) :: teta, zeta ! arbitrary location; Cartesian; - REAL, intent(out) :: gBn ! magnetic field; Cartesian; - INTEGER, intent(out) :: icasing +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + real(wp), intent(in) :: teta, zeta ! arbitrary location; Cartesian; + real(wp), intent(out) :: gBn ! magnetic field; Cartesian; + integer, intent(out) :: icasing - INTEGER, parameter :: Ndim = 2, Nfun = 1 + integer, parameter :: Ndim = 2, Nfun = 1 - INTEGER :: ldim, lfun, minpts, maxpts, Lrwk, idcuhre, jk, irestart, funcls, key, num, maxsub - REAL :: integrals(1:Nfun), low(1:Ndim), upp(1:Ndim), labs, lrel, absest(1:Nfun) - REAL, allocatable :: rwk(:) + integer :: ldim, lfun, minpts, maxpts, Lrwk, idcuhre, jk, irestart, funcls, key, num, maxsub + real(wp) :: integrals(1:Nfun), low(1:Ndim), upp(1:Ndim), labs, lrel, absest(1:Nfun) + real(wp), allocatable :: rwk(:) ! REAL :: dvcfield external :: dvcfield - BEGIN(casing) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -168,7 +184,10 @@ subroutine casing( teta, zeta, gBn, icasing ) Lrwk = maxsub * ( 2 * Ndim + 2 * Nfun + 2 ) + 17 * Nfun + 1 - SALLOCATE( rwk, (1:Lrwk), zero ) + + allocate( rwk(1:Lrwk), stat=astat ) + rwk(1:Lrwk) = zero + irestart = 0 ; funcls = 0 @@ -181,7 +200,7 @@ subroutine casing( teta, zeta, gBn, icasing ) gBn = integrals(1) - cput = GETTIME + cput = MPI_WTIME() select case( idcuhre ) ! "123456789012345678901234" case(0) ; ; ; exit @@ -219,9 +238,14 @@ subroutine casing( teta, zeta, gBn, icasing ) Lrwk = maxsub * ( 2 * Ndim + 2 * Nfun + 2 ) + 17 * Nfun + 1 - DALLOCATE(rwk) - SALLOCATE(rwk, (1:Lrwk), zero) + deallocate(rwk,stat=astat) + + + + allocate( rwk(1:Lrwk), stat=astat ) + rwk(1:Lrwk) = zero + enddo ! end of virtual casing accuracy infinite-do-loop; 10 Apr 13; @@ -239,11 +263,18 @@ subroutine casing( teta, zeta, gBn, icasing ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - DALLOCATE(rwk) + + deallocate(rwk,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(casing) + +9999 continue + cput = MPI_WTIME() + Tcasing = Tcasing + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -261,7 +292,7 @@ end subroutine casing !> @param[in] Nfun number of function values (==3) !> @param[out] vcintegrand cartesian components of magnetic field subroutine dvcfield( Ndim, tz, Nfun, vcintegrand ) ! differential virtual-casing field; format is fixed by NAG requirements; - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, three, four @@ -287,23 +318,43 @@ subroutine dvcfield( Ndim, tz, Nfun, vcintegrand ) ! differential virtual-casing !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER , intent(in) :: Ndim, Nfun - REAL , intent(in) :: tz(1:Ndim) - REAL , intent(out) :: vcintegrand(1:Nfun) ! integrand; components of magnetic field due to plasma currents in Cartesian coordinates; +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer , intent(in) :: Ndim, Nfun + real(wp) , intent(in) :: tz(1:Ndim) + real(wp) , intent(out) :: vcintegrand(1:Nfun) ! integrand; components of magnetic field due to plasma currents in Cartesian coordinates; - INTEGER :: ii, mi, ni, ll, ideriv, jk - REAL :: dR(0:3), dZ(0:3), gBut, gBuz, gtt, gtz, gzz, sqrtg, Blt, Blz, czeta, szeta, arg, carg, sarg, XX, YY, ZZ, teta, zeta - REAL :: jj(1:3), rr(1:3), distance(1:3), firstorderfactor + integer :: ii, mi, ni, ll, ideriv, jk + real(wp) :: dR(0:3), dZ(0:3), gBut, gBuz, gtt, gtz, gzz, sqrtg, Blt, Blz, czeta, szeta, arg, carg, sarg, XX, YY, ZZ, teta, zeta + real(wp) :: jj(1:3), rr(1:3), distance(1:3), firstorderfactor - REAL :: XXt, XXz, YYt, YYz, ZZt, ZZz, ds, Bxyz(1:3) + real(wp) :: XXt, XXz, YYt, YYz, ZZt, ZZz, ds, Bxyz(1:3) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( casing, Ndim.ne. 2, incorrect ) - FATAL( casing, Nfun.ne. 1, incorrect ) + + if( Ndim.ne. 2 ) then + write(6,'("casing : fatal : myid=",i3," ; Ndim.ne. 2 ; incorrect ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "casing : Ndim.ne. 2 : incorrect ;" + endif + + + if( Nfun.ne. 1 ) then + write(6,'("casing : fatal : myid=",i3," ; Nfun.ne. 1 ; incorrect ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "casing : Nfun.ne. 1 : incorrect ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -371,7 +422,13 @@ subroutine dvcfield( Ndim, tz, Nfun, vcintegrand ) ! differential virtual-casing case( 2 ) ! Igeometry = 2 ; 09 Mar 17; - FATAL( casing, .true., virtual casing under construction for cylindrical geometry ) + + if( .true. ) then + write(6,'("casing : fatal : myid=",i3," ; .true. ; virtual casing under construction for cylindrical geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "casing : .true. : virtual casing under construction for cylindrical geometry ;" + endif + case( 3 ) ! Igeometry = 3 ; 09 Mar 17; @@ -452,7 +509,13 @@ subroutine dvcfield( Ndim, tz, Nfun, vcintegrand ) ! differential virtual-casing case( 2 ) ! Igeometry = 2 ; 09 Mar 17; - FATAL( casing, .true., virtual casing under construction for cylindrical geometry ) + + if( .true. ) then + write(6,'("casing : fatal : myid=",i3," ; .true. ; virtual casing under construction for cylindrical geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "casing : .true. : virtual casing under construction for cylindrical geometry ;" + endif + case( 3 ) ! Igeometry = 3 ; 09 Mar 17; @@ -481,7 +544,13 @@ subroutine dvcfield( Ndim, tz, Nfun, vcintegrand ) ! differential virtual-casing case( 2 ) ! Igeometry = 2 ; 09 Mar 17; - FATAL( casing, .true., virtual casing under construction for cylindrical geometry ) + + if( .true. ) then + write(6,'("casing : fatal : myid=",i3," ; .true. ; virtual casing under construction for cylindrical geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "casing : .true. : virtual casing under construction for cylindrical geometry ;" + endif + case( 3 ) ! Igeometry = 3 ; toroidal geometry; diff --git a/src/coords.f90 b/src/coords.F90 similarity index 84% rename from src/coords.f90 rename to src/coords.F90 index 651e0b20..c5b8b348 100644 --- a/src/coords.f90 +++ b/src/coords.F90 @@ -141,7 +141,7 @@ !> @param[in] Ntz number of points in \f$\theta\f$ and \f$\zeta\f$ !> @param[in] mn number of Fourier harmonics subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi2 @@ -167,24 +167,58 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, Lcurvature, Ntz, mn - REAL , intent(in) :: lss +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, Lcurvature, Ntz, mn + real(wp) , intent(in) :: lss - INTEGER :: ii, jj, kk, irz, innout, issym, signlss, mi, ni, imn - REAL :: Remn(1:mn,0:2), Zomn(1:mn,0:2), Romn(1:mn,0:2), Zemn(1:mn,0:2), alss, blss, sbar, sbarhim(1:mn), fj(1:mn,0:2) + integer :: ii, jj, kk, irz, innout, issym, signlss, mi, ni, imn + real(wp) :: Remn(1:mn,0:2), Zomn(1:mn,0:2), Romn(1:mn,0:2), Zemn(1:mn,0:2), alss, blss, sbar, sbarhim(1:mn), fj(1:mn,0:2) - REAL :: Dij(1:Ntz,0:3), dguvij(1:Ntz,1:3,1:3), DRxij(1:Ntz,0:3), DZxij(1:Ntz,0:3) + real(wp) :: Dij(1:Ntz,0:3), dguvij(1:Ntz,1:3,1:3), DRxij(1:Ntz,0:3), DZxij(1:Ntz,0:3) + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(coords) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( coords, lvol.lt.1 .or. lvol.gt.Mvol, invalid volume label ) - FATAL( coords, abs(lss).gt.one, invalid radial coordinate ) - FATAL( coords, Lcurvature.lt.0 .or. Lcurvature.gt.5, invalid input value for Lcurvature ) + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("coords : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; invalid volume label ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : lvol.lt.1 .or. lvol.gt.Mvol : invalid volume label ;" + endif + + + if( abs(lss).gt.one ) then + write(6,'("coords : fatal : myid=",i3," ; abs(lss).gt.one ; invalid radial coordinate ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : abs(lss).gt.one : invalid radial coordinate ;" + endif + + + if( Lcurvature.lt.0 .or. Lcurvature.gt.5 ) then + write(6,'("coords : fatal : myid=",i3," ; Lcurvature.lt.0 .or. Lcurvature.gt.5 ; invalid input value for Lcurvature ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : Lcurvature.lt.0 .or. Lcurvature.gt.5 : invalid input value for Lcurvature ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -206,14 +240,26 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) sbar = ( lss + one ) * half #ifdef DEBUG - FATAL( coords, sbar.lt.zero .or. sbar.gt.one, invalid sbar ) + + if( sbar.lt.zero .or. sbar.gt.one ) then + write(6,'("coords : fatal : myid=",i3," ; sbar.lt.zero .or. sbar.gt.one ; invalid sbar ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : sbar.lt.zero .or. sbar.gt.one : invalid sbar ;" + endif + #endif select case( Igeometry ) case( 2 ) ; fj( 1:Ntor+1,0) = sbar ! these are the mj.eq.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 ; ; fj(Ntor+2:mn ,0) = sbar**(im(Ntor+2:mn)+1) ! these are the me.ne.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 case( 3 ) ; fj( 1:Ntor+1,0) = sbar**2 ! switch to sbar=r; 29 Jun 19 ; ; fj(Ntor+2:mn ,0) = sbar**im(Ntor+2:mn) ! these are the me.ne.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 - case default ; FATAL( coords, .true., invalid Igeometry for Lcoordinatesingularity=T ) + case default ; + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; invalid Igeometry for Lcoordinatesingularity=T ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : invalid Igeometry for Lcoordinatesingularity=T ;" + endif + end select @@ -257,14 +303,26 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) if( Lcoordinatesingularity ) then #ifdef DEBUG - FATAL( coords, sbar.lt.small, small denominator ) + + if( sbar.lt.small ) then + write(6,'("coords : fatal : myid=",i3," ; sbar.lt.small ; small denominator ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : sbar.lt.small : small denominator ;" + endif + #endif select case( Igeometry ) case( 2 ) ; fj( 1:Ntor+1,1) = half ! these are the mj.eq.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 ; ; fj(Ntor+2:mn ,1) = half*(im(Ntor+2:mn)+one) * fj(Ntor+2:mn ,0) / sbar ! these are the me.ne.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 case( 3 ) ; fj( 1:Ntor+1,1) = sbar ! these are the mj.eq.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 ; ; fj(Ntor+2:mn ,1) = half * im(Ntor+2:mn) * fj(Ntor+2:mn ,0) / sbar ! these are the me.ne.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 - case default ; FATAL( coords, .true., invalid Igeometry for Lcoordinatesingularity=T and Lcurvature.ne.0 ) + case default ; + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; invalid Igeometry for Lcoordinatesingularity=T and Lcurvature.ne.0 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : invalid Igeometry for Lcoordinatesingularity=T and Lcurvature.ne.0 ;" + endif + end select Remn(1:mn,1) = ( iRbc(1:mn,1) - iRbc(1:mn,0) ) * fj(1:mn,1) @@ -366,7 +424,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - FATAL( coords, .true., selected Igeometry not supported ) + + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; selected Igeometry not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : selected Igeometry not supported ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -392,7 +456,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) if( Lcoordinatesingularity ) then #ifdef DEBUG - FATAL( coords, sbar.lt.small, small denominator ) + + if( sbar.lt.small ) then + write(6,'("coords : fatal : myid=",i3," ; sbar.lt.small ; small denominator ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : sbar.lt.small : small denominator ;" + endif + #endif select case( Igeometry ) case( 2 ) ; fj( 1:Ntor+1,2) = zero ! these are the mj.eq.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 @@ -400,7 +470,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) case( 3 ) ; fj( 1:Ntor+1,2) = half ! these are the mj.eq.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 ; ; fj(Ntor+2:mn ,2) = half * ( im(Ntor+2:mn) - one ) * fj(Ntor+2:mn ,1) / sbar ! these are the me.ne.0 harmonics; 11 Aug 14; switch to sbar=r; 29 Jun 19 case default ; - ; ; FATAL( coords, .true., invalid Igeometry for Lcoordinatesingularity=T and Lcurvature=2 ) + ; ; + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; invalid Igeometry for Lcoordinatesingularity=T and Lcurvature=2 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : invalid Igeometry for Lcoordinatesingularity=T and Lcurvature=2 ;" + endif + end select ; Remn(1:mn,2) = ( iRbc(1:mn,1) - iRbc(1:mn,0) ) * fj(1:mn,2) @@ -520,7 +596,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) case default - FATAL( coords, .true., selected Igeometry not supported for Lcurvature.eq.2 ) + + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; selected Igeometry not supported for Lcurvature.eq.2 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : selected Igeometry not supported for Lcurvature.eq.2 ;" + endif + end select ! end of select case( Igeometry ) ; 15 Sep 16; @@ -538,11 +620,23 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) if( Lcoordinatesingularity ) then #ifdef DEBUG - FATAL( coords, innout.eq.0, cannot differentiate metric elements wrt coordinate singularity ) + + if( innout.eq.0 ) then + write(6,'("coords : fatal : myid=",i3," ; innout.eq.0 ; cannot differentiate metric elements wrt coordinate singularity ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : innout.eq.0 : cannot differentiate metric elements wrt coordinate singularity ;" + endif + #endif #ifdef DEBUG - FATAL( coords, Igeometry.eq.1, Cartesian does not need regularization factor ) + + if( Igeometry.eq.1 ) then + write(6,'("coords : fatal : myid=",i3," ; Igeometry.eq.1 ; Cartesian does not need regularization factor ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : Igeometry.eq.1 : Cartesian does not need regularization factor ;" + endif + #endif if( ( irz.eq.0 .and. issym.eq.0 ) .or. ( irz.eq.1 .and. issym.eq.1 ) ) then ! cosine harmonics; 13 Sep 13; @@ -616,7 +710,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) if( irz.eq.1 ) sg(1:Ntz,1) = ( Dij(1:Ntz,1 )*Rij(1:Ntz,2,0) - Rij(1:Ntz,1,0)*Dij(1:Ntz,2 ) ) else - FATAL( coords, Lcurvature.eq.5 .and. Igeometry.ne.3, Lcurvature.eq.5 can only be combined with Igeometry.ne.3 ) + + if( Lcurvature.eq.5 .and. Igeometry.ne.3 ) then + write(6,'("coords : fatal : myid=",i3," ; Lcurvature.eq.5 .and. Igeometry.ne.3 ; Lcurvature.eq.5 can only be combined with Igeometry.ne.3 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : Lcurvature.eq.5 .and. Igeometry.ne.3 : Lcurvature.eq.5 can only be combined with Igeometry.ne.3 ;" + endif + end if ! if (Igeometry .eq. 3) ; 13 Jan 20 else ! we need more for Lcurvature=3,4 ; 13 Jan 20 @@ -626,7 +726,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) case( 1 ) ! Lcurvature=3,4 ; Igeometry=1 ; Cartesian; 04 Dec 14; #ifdef DEBUG - FATAL( coords, irz.eq.1, there is no dependence on Zbs or Zbc ) + + if( irz.eq.1 ) then + write(6,'("coords : fatal : myid=",i3," ; irz.eq.1 ; there is no dependence on Zbs or Zbc ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : irz.eq.1 : there is no dependence on Zbs or Zbc ;" + endif + #endif ! sg(1:Ntz,0) = Rij(1:Ntz,1,0)*rpol*rtor @@ -643,7 +749,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) case( 2 ) ! Lcurvature=3,4,5 ; Igeometry=2 ; cylindrical; #ifdef DEBUG - FATAL( coords, irz.eq.1, there is no dependence on Zbs or Zbc ) + + if( irz.eq.1 ) then + write(6,'("coords : fatal : myid=",i3," ; irz.eq.1 ; there is no dependence on Zbs or Zbc ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : irz.eq.1 : there is no dependence on Zbs or Zbc ;" + endif + #endif ! sg(1:Ntz,0) = Rij(1:Ntz,1,0) * Rij(1:Ntz,0,0) @@ -654,7 +766,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) do jj = ii, 3 if( irz.eq.0 ) dguvij(1:Ntz,ii,jj) = Dij(1:Ntz,ii) * Rij(1:Ntz,jj,0) + Rij(1:Ntz,ii,0) * Dij(1:Ntz,jj) if( irz.eq.1 ) then - FATAL(coords, .true., No Z-geometrical degree of freedom when Igeometry=2)!dguvij(1:Ntz,ii,jj) = Dij(1:Ntz,ii) * Zij(1:Ntz,jj,0) + Zij(1:Ntz,ii,0) * Dij(1:Ntz,jj) ! TODO REMOVE + + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; No Z-geometrical degree of freedom when Igeometry=2;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : No Z-geometrical degree of freedom when Igeometry=2 ;" + endif +!dguvij(1:Ntz,ii,jj) = Dij(1:Ntz,ii) * Zij(1:Ntz,jj,0) + Zij(1:Ntz,ii,0) * Dij(1:Ntz,jj) ! TODO REMOVE endif enddo enddo @@ -704,7 +822,13 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) case default - FATAL( coords, .true., supplied Igeometry is not yet supported for Lcurvature.eq.3 or Lcurvature.eq.4 ) + + if( .true. ) then + write(6,'("coords : fatal : myid=",i3," ; .true. ; supplied Igeometry is not yet supported for Lcurvature.eq.3 or Lcurvature.eq.4 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "coords : .true. : supplied Igeometry is not yet supported for Lcurvature.eq.3 or Lcurvature.eq.4 ;" + endif + end select ! end of select case( Igeometry ); 7 Mar 13; @@ -737,7 +861,12 @@ subroutine coords( lvol, lss, Lcurvature, Ntz, mn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(coords) + +9999 continue + cput = MPI_WTIME() + Tcoords = Tcoords + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/curent.f90 b/src/curent.F90 similarity index 89% rename from src/curent.f90 rename to src/curent.F90 index 87cd1255..7c828238 100644 --- a/src/curent.f90 +++ b/src/curent.F90 @@ -54,7 +54,7 @@ !> @param[in] iflag some integer flag !> @param[out] ldItGp plasma and linking current subroutine curent( lvol, mn, Nt, Nz, iflag, ldItGp ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, pi2 @@ -78,21 +78,43 @@ subroutine curent( lvol, mn, Nt, Nz, iflag, ldItGp ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, mn, Nt, Nz, iflag - REAL , intent(out) :: ldItGp(0:1,-1:2) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, mn, Nt, Nz, iflag + real(wp) , intent(out) :: ldItGp(0:1,-1:2) + + integer :: innout, ideriv, ii, ll, Lcurvature, ifail + real(wp) :: lss + real(wp) :: Bsupt(1:Nt*Nz,-1:2), Bsupz(1:Nt*Nz,-1:2) + - INTEGER :: innout, ideriv, ii, ll, Lcurvature, ifail - REAL :: lss - REAL :: Bsupt(1:Nt*Nz,-1:2), Bsupz(1:Nt*Nz,-1:2) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(curent) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( curent, lvol.ne.Mvol, this is only defined in the vacuum region ) + + if( lvol.ne.Mvol ) then + write(6,'("curent : fatal : myid=",i3," ; lvol.ne.Mvol ; this is only defined in the vacuum region ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "curent : lvol.ne.Mvol : this is only defined in the vacuum region ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -126,7 +148,12 @@ subroutine curent( lvol, mn, Nt, Nz, iflag, ldItGp ) case( 2 ) ; Lcurvature = 1 end select - WCALL( curent, coords,( lvol, lss, Lcurvature, Ntz, mn ) ) ! get "lower" metric elements evaluated on innout interface; + + cput = MPI_WTIME() + Tcurent = Tcurent + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! get "lower" metric elements evaluated on innout interface; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -183,7 +210,12 @@ subroutine curent( lvol, mn, Nt, Nz, iflag, ldItGp ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(curent) + +9999 continue + cput = MPI_WTIME() + Tcurent = Tcurent + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/df00ab.f90 b/src/df00ab.F90 similarity index 72% rename from src/df00ab.f90 rename to src/df00ab.F90 index b36d5e23..38f5805b 100644 --- a/src/df00ab.f90 +++ b/src/df00ab.F90 @@ -11,7 +11,7 @@ !> @param[in] Ldfjac !> @param[in] iflag subroutine df00ab( pNN , xi , Fxi , DFxi , Ldfjac , iflag ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two @@ -28,23 +28,45 @@ subroutine df00ab( pNN , xi , Fxi , DFxi , Ldfjac , iflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: pNN, Ldfjac - INTEGER, intent(in) :: iflag - REAL , intent(in) :: xi(0:pNN-1) - REAL , intent(out) :: Fxi(0:pNN-1), DFxi(0:Ldfjac-1,0:pNN-1) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: pNN, Ldfjac + integer, intent(in) :: iflag + real(wp) , intent(in) :: xi(0:pNN-1) + real(wp) , intent(out) :: Fxi(0:pNN-1), DFxi(0:Ldfjac-1,0:pNN-1) + + integer :: NN + real(wp) :: lmu ! , dpsi(1:2) + - INTEGER :: NN - REAL :: lmu ! , dpsi(1:2) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(df00ab) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL(df00ab, ivol.lt.1 .or. ivol.gt.Nvol, ivol invalid ) ! 26 Feb 13; + + if( ivol.lt.1 .or. ivol.gt.Nvol ) then + write(6,'("df00ab : fatal : myid=",i3," ; ivol.lt.1 .or. ivol.gt.Nvol ; ivol invalid ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "df00ab : ivol.lt.1 .or. ivol.gt.Nvol : ivol invalid ;" + endif + ! 26 Feb 13; #endif @@ -75,13 +97,24 @@ subroutine df00ab( pNN , xi , Fxi , DFxi , Ldfjac , iflag ) case default - FATAL(df00ab, .true., supplied value of iflag is not supported ) + + if( .true. ) then + write(6,'("df00ab : fatal : myid=",i3," ; .true. ; supplied value of iflag is not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "df00ab : .true. : supplied value of iflag is not supported ;" + endif + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(df00ab) + +9999 continue + cput = MPI_WTIME() + Tdf00ab = Tdf00ab + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/dforce.f90 b/src/dforce.F90 similarity index 79% rename from src/dforce.f90 rename to src/dforce.F90 index 05f59b2c..a5553232 100644 --- a/src/dforce.f90 +++ b/src/dforce.F90 @@ -91,7 +91,7 @@ !> @param[in] LComputeDerivatives indicates whether derivatives are to be calculated; 0: no derivatives, 1: !> @param[inout] LComputeAxis subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, pi, pi2 @@ -138,40 +138,53 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - - INTEGER, parameter :: NB = 3 ! optimal workspace block size for LAPACK:DSYSVX; - - INTEGER, intent(in) :: NGdof ! dimensions; - REAL, intent(in) :: position(0:NGdof) - REAL, intent(out) :: force(0:NGdof) ! force; - LOGICAL, intent(in) :: LComputeDerivatives ! - - INTEGER :: vvol, innout, ii, jj, irz, issym, iocons, tdoc, idoc, idof, tdof, jdof, ivol, imn, ll, ihybrd1, lwa, Ndofgl, llmodnp - INTEGER :: maxfev, ml, muhybr, mode, nprint, nfev, ldfjac, lr, Nbc, NN, cpu_id, ideriv - REAL :: epsfcn, factor - REAL :: Fdof(1:Mvol-1), Xdof(1:Mvol-1) - INTEGER :: ipiv(1:Mvol) - REAL, allocatable :: fjac(:, :), r(:), Fvec(:), dpfluxout(:) - - INTEGER :: status(MPI_STATUS_SIZE), request_recv, request_send, cpu_send - INTEGER :: id - INTEGER :: iflag, idgesv, Lwork - INTEGER :: idofr,idofz,tdofr,tdofz - CHARACTER :: packorunpack +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer, intent(in) :: NGdof ! dimensions; + real(wp), intent(in) :: position(0:NGdof) ! degrees-of-freedom = internal geometry; + real(wp), intent(out) :: force(0:NGdof) ! force; + LOGICAL, intent(in) :: LComputeDerivatives ! indicates whether derivatives are to be calculated; + + integer :: vvol, innout, ii, jj, irz, issym, iocons, tdoc, idoc, idof, tdof, jdof, ivol, imn, ll, ihybrd1, lwa, Ndofgl, llmodnp + integer :: maxfev, ml, muhybr, mode, nprint, nfev, ldfjac, lr, Nbc, NN, cpu_id, ideriv + real(wp) :: epsfcn, factor + real(wp) :: Fdof(1:Mvol-1), Xdof(1:Mvol-1) + integer :: ipiv(1:Mvol) + real(wp), allocatable :: fjac(:, :), r(:), Fvec(:), dpfluxout(:) + + integer :: status(MPI_STATUS_SIZE), request_recv, request_send, cpu_send + integer :: id + integer :: iflag, idgesv, Lwork + integer :: idofr,idofz,tdofr,tdofz + + character :: packorunpack EXTERNAL :: dfp100, dfp200 LOGICAL :: LComputeAxis, dfp100_logical #ifdef DEBUG - INTEGER :: isymdiff - REAL :: dvol(-1:+1), evolume, imupf(1:2,-2:2), lfactor - REAL, allocatable :: isolution(:,:) - REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:), iforce(:,:), iposition(:,:), finitediff_hessian(:,:) ! original geometry; + integer :: isymdiff + real(wp) :: dvol(-1:+1), evolume, imupf(1:2,-2:2), lfactor + real(wp), allocatable :: isolution(:,:) + real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:), iforce(:,:), iposition(:,:), finitediff_hessian(:,:) ! original geometry; +#endif + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 #endif - BEGIN(dforce) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -184,14 +197,25 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) LComputeAxis = .true. #endif - WCALL( dforce, packxi,( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), packorunpack, LcomputeDerivatives, LComputeAxis ) ) + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), packorunpack, LcomputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( LcomputeDerivatives ) then #ifdef DEBUG - FATAL( dforce, .not.allocated(dBBdmp), do not pass go ) + + if( .not.allocated(dBBdmp) ) then + write(6,'("dforce : fatal : myid=",i3," ; .not.allocated(dBBdmp) ; do not pass go ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .not.allocated(dBBdmp) : do not pass go ;" + endif + #endif dBBdmp(1:LGdof,1:Mvol,0:1,1:2) = zero endif @@ -214,16 +238,26 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) if( LocalConstraint ) then - SALLOCATE( Fvec, (1:Mvol-1), zero) + + allocate( Fvec(1:Mvol-1), stat=astat ) + Fvec(1:Mvol-1) = zero + Ndofgl = 0; Fvec(1:Mvol-1) = 0; dfp100_logical = .FALSE.; Xdof(1:Mvol-1) = dpflux(2:Mvol) + xoffset ! Solve for field dBdX%L = LComputeDerivatives - WCALL(dforce, dfp100, (Ndofgl, Xdof, Fvec, dfp100_logical) ) - DALLOCATE( Fvec ) + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call dfp100(Ndofgl, Xdof, Fvec, dfp100_logical) + cpuo = MPI_WTIME() + + + + deallocate(Fvec ,stat=astat) + !do vvol=1,Mvol ! WCALL(dforce, brcast, (vvol) ) @@ -243,13 +277,24 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) Ndofgl = Mvol-1 endif - SALLOCATE( Fvec, (1:Ndofgl), zero ) + + allocate( Fvec(1:Ndofgl), stat=astat ) + Fvec(1:Ndofgl) = zero + dfp100_logical = .FALSE. - WCALL(dforce, dfp100, (Ndofgl, Xdof(1:Mvol-1), Fvec(1:Ndofgl), dfp100_logical)) - SALLOCATE(dpfluxout, (1:Ndofgl), zero ) + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call dfp100(Ndofgl, Xdof(1:Mvol-1), Fvec(1:Ndofgl), dfp100_logical) + cpuo = MPI_WTIME() + + + + allocate( dpfluxout(1:Ndofgl), stat=astat ) + dpfluxout(1:Ndofgl) = zero + if ( myid .eq. 0 ) then dpfluxout = Fvec @@ -263,34 +308,69 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) endif ! Broadcast the field and pflux - RlBCAST(dpfluxout(1:Ndofgl), Ndofgl, 0) - RlBCAST(dpflux(1:Mvol) , Mvol, 0) + + call MPI_BCAST(dpfluxout(1:Ndofgl),Ndofgl,MPI_DOUBLE_PRECISION,0,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(dpflux(1:Mvol) ,Mvol,MPI_DOUBLE_PRECISION,0,MPI_COMM_SPEC,ierr) + if( Lfreebound.eq.1 ) then - RlBCAST(dtflux(Mvol), 1, 0) + + call MPI_BCAST(dtflux(Mvol),1,MPI_DOUBLE_PRECISION,0,MPI_COMM_SPEC,ierr) + endif do vvol = 2, Mvol - WCALL(dforce, IsMyVolume, (vvol)) + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call IsMyVolume(vvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dforce, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : Unassociated volume ;" + endif + endif NN = NAdof(vvol) - SALLOCATE( solution, (1:NN, 0:2), zero) + + allocate( solution(1:NN, 0:2), stat=astat ) + solution(1:NN, 0:2) = zero + ! Pack field and its derivatives packorunpack = 'P' - WCALL( dforce, packab, ( packorunpack, vvol, NN, solution(1:NN,0), 0 ) ) ! packing; - WCALL( dforce, packab, ( packorunpack, vvol, NN, solution(1:NN,2), 2 ) ) ! packing; + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,0), 0 ) + cpuo = MPI_WTIME() + ! packing; + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,2), 2 ) + cpuo = MPI_WTIME() + ! packing; ! compute the field with renewed dpflux via single Newton method step if( Lfreebound.eq.1 .and.(vvol.eq.Mvol) ) then - WCALL( dforce, packab, ( packorunpack, vvol, NN, solution(1:NN,1), 1 ) ) ! packing; + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,1), 1 ) + cpuo = MPI_WTIME() + ! packing; solution(1:NN, 0) = solution(1:NN, 0) - dpfluxout(vvol-1) * solution(1:NN, 2) & ! derivative w.r.t pflux - dpfluxout(vvol ) * solution(1:NN, 1) ! derivative w.r.t tflux else @@ -299,14 +379,25 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! Unpack field in vector potential Fourier harmonics packorunpack = 'U' - WCALL( dforce, packab, ( packorunpack, vvol, NN, solution(1:NN,0), 0 ) ) ! unpacking; - DALLOCATE( solution ) + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,0), 0 ) + cpuo = MPI_WTIME() + ! unpacking; + + + deallocate(solution ,stat=astat) + enddo ! end of do vvol = 1, Mvol - DALLOCATE(Fvec) - DALLOCATE(dpfluxout) + + deallocate(Fvec,stat=astat) + + + deallocate(dpfluxout,stat=astat) + ! #ifdef DEBUG ! select case( ihybrd1 ) @@ -332,13 +423,19 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! Broadcast all ImagneticOK !write(ounit,'("dforce : " 10x " : myid="i3"; vvol="i3"; ; ImagneticOK="999L2)') myid, vvol, ImagneticOK(1:Mvol) !write(ounit,'("dforce : " 10x " : cpu_id="i3"; vvol="i3"; ; ImagneticOK="999L2)') cpu_id, vvol, ImagneticOK(vvol) - LlBCAST( ImagneticOK(vvol) , 1, cpu_id) + + call MPI_BCAST(ImagneticOK(vvol) ,1,MPI_LOGICAL,cpu_id,MPI_COMM_SPEC,ierr) + do ideriv=0,2 if( (.not.LcomputeDerivatives) .and. (ideriv.ne.0) ) cycle do ii = 1, mn - RlBCAST( Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, cpu_id) - RlBCAST( Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, cpu_id) + + call MPI_BCAST(Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,cpu_id,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,cpu_id,MPI_COMM_SPEC,ierr) + enddo enddo @@ -347,8 +444,12 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) do ideriv=0,2 if( (.not.LcomputeDerivatives) .and. (ideriv.ne.0) ) cycle do ii = 1, mn - RlBCAST( Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, cpu_id) - RlBCAST( Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, cpu_id) + + call MPI_BCAST(Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,cpu_id,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,cpu_id,MPI_COMM_SPEC,ierr) + enddo enddo endif @@ -358,7 +459,12 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! Compute local force and derivatives !Compuatation of Hessian2d is cleared at this point. - WCALL(dforce, dfp200, ( LcomputeDerivatives, vvol) ) + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call dfp200( LcomputeDerivatives, vvol) + cpuo = MPI_WTIME() + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -368,7 +474,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) stop "dforce : : myid= ; finished computing derivatives of rotational-transform wrt mu and dpflux ;" ! this will allow other cpus to finish; endif - FATAL( dforce, Lcheck.eq.2, finished computing derivatives of rotational-transform wrt mu and dpflux ) + + if( Lcheck.eq.2 ) then + write(6,'("dforce : fatal : myid=",i3," ; Lcheck.eq.2 ; finished computing derivatives of rotational-transform wrt mu and dpflux ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : Lcheck.eq.2 : finished computing derivatives of rotational-transform wrt mu and dpflux ;" + endif + if( Wdforce ) write(ounit,'("dforce : " 10x " : myid="i3" ; LComputeDerivatives="L2" ; ImagneticOK="999L2)') myid, LComputeDerivatives, ImagneticOK(1:Mvol) #endif @@ -378,8 +490,23 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! Broadcast information to all CPUs do vvol = 1, Mvol - LREGION( vvol ) - WCALL( dforce, brcast, ( vvol ) ) + + if( Igeometry.eq.1 .or. vvol .gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol .le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call brcast( vvol ) + cpuo = MPI_WTIME() + enddo @@ -405,7 +532,17 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) do vvol = 1, Mvol-1 - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + tdoc = (vvol-1) * LGdof @@ -414,7 +551,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ; idoc = 0 ! degree-of-constraint counter; set; if( Lextrap.eq.1 .and. vvol.eq.1 ) then ! to be made redundant; - FATAL( dforce, 2.gt.Mvol, psifactor needs attention ) + + if( 2.gt.Mvol ) then + write(6,'("dforce : fatal : myid=",i3," ; 2.gt.Mvol ; psifactor needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : 2.gt.Mvol : psifactor needs attention ;" + endif + ;force(tdoc+idoc+1:tdoc+idoc+mn) = position(1:mn) - ( iRbc(1:mn,2) / psifactor(1:mn,2) ) else ;force(tdoc+idoc+1:tdoc+idoc+mn ) = ( Bemn(1:mn ,vvol+1,0) - Bemn(1:mn ,vvol+0,1) ) * BBweight(1:mn) ! pressure imbalance; @@ -467,7 +610,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) endif ! end of if( NOTstellsym ) ; #ifdef DEBUG - FATAL( dforce, idoc.ne.LGdof, counting error ) ! this has caught bugs; + + if( idoc.ne.LGdof ) then + write(6,'("dforce : fatal : myid=",i3," ; idoc.ne.LGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : idoc.ne.LGdof : counting error ;" + endif + ! this has caught bugs; #endif else ! matches if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ); @@ -496,7 +645,7 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) if( Wdforce .and. myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() ; ; write(ounit,4000) cput-cpus, ForceErr, cput-cpuo, "|BB|e", alog10(BBe(1:min(Mvol-1,28))) if( Igeometry.ge.3 ) write(ounit,4001) "|II|o", alog10(IIo(1:min(Mvol-1,28))) if( NOTstellsym ) then @@ -616,7 +765,11 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) else ! matches if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; - FATAL( dforce, .true., need to provide suitable values for hessian2D in case of field failure ) + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; need to provide suitable values for hessian2D in case of field failure ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : need to provide suitable values for hessian2D in case of field failure ;" + endif endif ! end of if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; @@ -707,8 +860,11 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! In the general case of global constraint, there are no zero element in the hessian. We thus loop again on all volumes - FATAL( dforce, .true., incorrect choice of Lconstraint in SPEC) - + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; incorrect choice of Lconstraint in SPEC;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : incorrect choice of Lconstraint in SPEC ;" + endif endif ! matches if( LocalConstraint ); enddo ! matches do issym ; @@ -718,7 +874,12 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) else ! matches if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; - FATAL( dforce, .true., need to provide suitable values for hessian2D in case of field failure ) + + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; need to provide suitable values for hessian2D in case of field failure ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : need to provide suitable values for hessian2D in case of field failure ;" + endif endif ! end of if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; @@ -738,7 +899,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) if( LcomputeDerivatives .and. Lhessianallocated) then ! construct Hessian; #ifdef DEBUG - FATAL( dforce, .not.Lhessianallocated, need to allocate hessian ) + + if( .not.Lhessianallocated ) then + write(6,'("dforce : fatal : myid=",i3," ; .not.Lhessianallocated ; need to allocate hessian ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .not.Lhessianallocated : need to allocate hessian ;" + endif + #endif hessian(1:NGdof,1:NGdof) = zero @@ -783,7 +950,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) #ifdef DEBUG if( idof.gt.LGdof ) write(ounit,1000) myid, vvol, ii, irz, issym, idof, LGdof ! can be deleted; 1000 format("hforce : " 10x " : myid=",i3," ; vvol=",i3," ; ii= ",i3," ; irz="i3" ; issym="i3" ; idof="i3" ; LGdof="i3" ;") - FATAL( hforce, idof.gt.LGdof, illegal degree-of-freedom index constructing hessian ) ! can be deleted; + + if( idof.gt.LGdof ) then + write(6,'("hforce : fatal : myid=",i3," ; idof.gt.LGdof ; illegal degree-of-freedom index constructing hessian ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hforce : idof.gt.LGdof : illegal degree-of-freedom index constructing hessian ;" + endif + ! can be deleted; #endif if( LocalConstraint ) then @@ -884,7 +1057,13 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) else ! matches if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; - FATAL( dforce, .true., need to provide suitable values for hessian in case of field failure ) + + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; need to provide suitable values for hessian in case of field failure ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : need to provide suitable values for hessian in case of field failure ;" + endif + endif ! end of if( ImagneticOK(vvol) .and. ImagneticOK(vvol+1) ) ; enddo ! end of do vvol; @@ -892,7 +1071,12 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) ! Evaluate force gradient #ifdef DEBUG if( Lcheck.eq.6 ) then - WCALL(dforce, fndiff_dforce, ( NGdof ) ) + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call fndiff_dforce( NGdof ) + cpuo = MPI_WTIME() + endif #endif @@ -902,7 +1086,12 @@ subroutine dforce( NGdof, position, force, LComputeDerivatives, LComputeAxis) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(dforce) + +9999 continue + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -910,7 +1099,7 @@ end subroutine dforce subroutine fndiff_dforce( NGdof ) - + use mod_kinds, only: wp => dp use constants, only: zero, one, half, two use fileunits, only: ounit @@ -928,36 +1117,73 @@ subroutine fndiff_dforce( NGdof ) YESstellsym, NOTstellsym, & hessian, ext -LOCALS -INTEGER, intent(in) :: NGdof +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + +integer, intent(in) :: NGdof -INTEGER :: vvol, idof, ii, irz, issym, isymdiff ! loop indices -INTEGER :: tdof ! hessian index +integer :: vvol, idof, ii, irz, issym, isymdiff ! loop indices +integer :: tdof ! hessian index -REAL :: lfactor -CHARACTER :: packorunpack +real(wp) :: lfactor +character :: packorunpack LOGICAL :: LComputeAxis -REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! used to store original geometry; -REAL, allocatable :: iposition(:,:), iforce(:,:) ! perturbed interfaces position and force -REAL, allocatable :: finitediff_estimate(:,:) ! store finite differences +real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! used to store original geometry; +real(wp), allocatable :: iposition(:,:), iforce(:,:) ! perturbed interfaces position and force +real(wp), allocatable :: finitediff_estimate(:,:) ! store finite differences + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif -BEGIN(dforce) write(ounit, '("fndiff : Starting finite difference evaluation of hessian ...")') if( ncpu.eq.1) then - SALLOCATE( finitediff_estimate, (1:NGdof, 1:NGdof), zero ) + + allocate( finitediff_estimate(1:NGdof, 1:NGdof), stat=astat ) + finitediff_estimate(1:NGdof, 1:NGdof) = zero + dBdX%L = .false. - SALLOCATE( oRbc, (1:mn,0:Mvol), iRbc(1:mn,0:Mvol) ) !save unperturbed geometry - SALLOCATE( oZbs, (1:mn,0:Mvol), iZbs(1:mn,0:Mvol) ) - SALLOCATE( oRbs, (1:mn,0:Mvol), iRbs(1:mn,0:Mvol) ) - SALLOCATE( oZbc, (1:mn,0:Mvol), iZbc(1:mn,0:Mvol) ) - SALLOCATE( iforce, (-2:2, 0:NGdof), zero) - SALLOCATE( iposition, (-2:2, 0:NGdof), zero) + + allocate( oRbc(1:mn,0:Mvol), stat=astat ) + oRbc(1:mn,0:Mvol) = iRbc(1:mn,0:Mvol) + !save unperturbed geometry + + allocate( oZbs(1:mn,0:Mvol), stat=astat ) + oZbs(1:mn,0:Mvol) = iZbs(1:mn,0:Mvol) + + + allocate( oRbs(1:mn,0:Mvol), stat=astat ) + oRbs(1:mn,0:Mvol) = iRbs(1:mn,0:Mvol) + + + allocate( oZbc(1:mn,0:Mvol), stat=astat ) + oZbc(1:mn,0:Mvol) = iZbc(1:mn,0:Mvol) + + + allocate( iforce(-2:2, 0:NGdof), stat=astat ) + iforce(-2:2, 0:NGdof) = zero + + + allocate( iposition(-2:2, 0:NGdof), stat=astat ) + iposition(-2:2, 0:NGdof) = zero + do vvol = 1, Mvol-1 ! loop over interior surfaces; @@ -1005,9 +1231,19 @@ subroutine fndiff_dforce( NGdof ) !LComputeAxis = .false. ! keep axis fixed LComputeAxis = .true. - WCALL(dforce, packxi,( NGdof, iposition(isymdiff,0:NGdof), Mvol, mn,iRbc(1:mn,0:Mvol),iZbs(1:mn,0:Mvol),iRbs(1:mn,0:Mvol),& - iZbc(1:mn,0:Mvol),packorunpack, .false., LComputeAxis ) ) - WCALL(dforce, dforce,( NGdof, iposition(isymdiff,0:NGdof), iforce(isymdiff,0:NGdof), .false., LComputeAxis) ) + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call packxi( NGdof, iposition(isymdiff,0:NGdof), Mvol, mn,iRbc(1:mn,0:Mvol),iZbs(1:mn,0:Mvol),iRbs(1:mn,0:Mvol),& + iZbc(1:mn,0:Mvol),packorunpack, .false., LComputeAxis ) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + call dforce( NGdof, iposition(isymdiff,0:NGdof), iforce(isymdiff,0:NGdof), .false., LComputeAxis) + cpuo = MPI_WTIME() + enddo ! Fourth order centered finite difference scheme @@ -1025,12 +1261,24 @@ subroutine fndiff_dforce( NGdof ) enddo !vvol - DALLOCATE(iforce) - DALLOCATE(iposition) - DALLOCATE(oZbc) - DALLOCATE(oRbs) - DALLOCATE(oZbs) - DALLOCATE(oRbc) + + deallocate(iforce,stat=astat) + + + deallocate(iposition,stat=astat) + + + deallocate(oZbc,stat=astat) + + + deallocate(oRbs,stat=astat) + + + deallocate(oZbs,stat=astat) + + + deallocate(oRbc,stat=astat) + ! Print in file for diagnostics if(myid.eq.0) then @@ -1061,12 +1309,25 @@ subroutine fndiff_dforce( NGdof ) endif - DALLOCATE(finitediff_estimate) + + deallocate(finitediff_estimate,stat=astat) + endif -FATAL(fndiff, .true., Finite differences have been evaluated. ) -RETURN(dforce) + if( .true. ) then + write(6,'("fndiff : fatal : myid=",i3," ; .true. ; Finite differences have been evaluated. ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "fndiff : .true. : Finite differences have been evaluated. ;" + endif + + + +9999 continue + cput = MPI_WTIME() + Tdforce = Tdforce + ( cput-cpuo ) + return + end subroutine fndiff_dforce diff --git a/src/dfp100.f90 b/src/dfp100.F90 similarity index 64% rename from src/dfp100.f90 rename to src/dfp100.F90 index acb75e37..90a8ac69 100644 --- a/src/dfp100.f90 +++ b/src/dfp100.F90 @@ -37,7 +37,7 @@ !> @param Fvec !> @param LComputeDerivatives subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) - + use mod_kinds, only: wp => dp use constants, only : zero, half, one, two, pi2, pi, mu0 use fileunits, only : ounit @@ -65,7 +65,15 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) dMA, dMB, dMD, dMG, MBpsi, solution, & Nt, Nz, LILUprecond, Lsavedguvij, NOTMatrixFree, guvijsave, izbs - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !------ ! vvol: loop index on volumes ! Ndofgl: Input parameter necessary for the use of hybrd1. Unused otherwise. @@ -75,22 +83,40 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) ! Fvec: Global constraint values ! x: Degrees of freedom of hybrd1. For now contains only the poloidal flux - INTEGER :: vvol, Ndofgl, iflag, cpu_send_one, cpu_send_two, ll, NN, ideriv, iocons - INTEGER :: status(MPI_STATUS_SIZE), request1, request2 - REAL :: Fvec(1:Ndofgl), x(1:Mvol-1), Bt00(1:Mvol, 0:1, -1:2), ldItGp(0:1, -1:2) + integer :: vvol, Ndofgl, iflag, cpu_send_one, cpu_send_two, ll, NN, ideriv, iocons + integer :: status(MPI_STATUS_SIZE), request1, request2 + real(wp) :: Fvec(1:Ndofgl), x(1:Mvol-1), Bt00(1:Mvol, 0:1, -1:2), ldItGp(0:1, -1:2) LOGICAL :: LComputeDerivatives - INTEGER :: deriv, Lcurvature + integer :: deriv, Lcurvature - BEGIN(dfp100) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + dpflux(2:Mvol) = x - xoffset ! Now each CPU perform the calculation in its volume(s) do vvol = 1, Mvol - LREGION(vvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; ImagneticOK(vvol) = .false. !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -100,7 +126,13 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dfp100, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp100 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp100 : .true. : Unassociated volume ;" + endif + endif NN = NAdof(vvol) @@ -113,25 +145,58 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) dBdX%L = .false. ! No need to take derivatives of matrices w.r.t geometry. ideriv = 0 ; Lcurvature = 1 - WCALL( dfp100, compute_guvijsave, (Iquad(vvol), vvol, ideriv, Lcurvature) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call compute_guvijsave(Iquad(vvol), vvol, ideriv, Lcurvature) + cpuo = MPI_WTIME() + Lsavedguvij = .true. ! we need to construct the preconditioner if needed if (LILUprecond) then - WCALL( dfp100, spsint, ( Iquad(vvol), mn, vvol, ll ) ) - WCALL( dfp100, spsmat, ( vvol, mn, ll) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call spsint( Iquad(vvol), mn, vvol, ll ) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call spsmat( vvol, mn, ll) + cpuo = MPI_WTIME() + endif if (NOTMatrixFree) then ! construct Beltrami matrix - WCALL( dfp100, ma00aa, ( Iquad(vvol), mn, vvol, ll ) ) ! compute volume integrals of metric elements; - WCALL( dfp100, matrix, ( vvol, mn, ll ) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call ma00aa( Iquad(vvol), mn, vvol, ll ) + cpuo = MPI_WTIME() + ! compute volume integrals of metric elements; + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call matrix( vvol, mn, ll ) + cpuo = MPI_WTIME() + else ! matrix free, so construct something else ! we will still need to construct the dMB and dMG matrix - WCALL( dfp100, matrixBG, ( vvol, mn, ll ) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call matrixBG( vvol, mn, ll ) + cpuo = MPI_WTIME() + endif ! Call Beltrami solver to get the magnetic field in the current volume. - WCALL( dfp100, ma02aa, ( vvol, NN ) ) + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call ma02aa( vvol, NN ) + cpuo = MPI_WTIME() Lsavedguvij = .false. @@ -147,24 +212,50 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) if( ( Lcoordinatesingularity .and. iocons.eq.0 ) .or. ( Lvacuumregion .and. iocons.eq.1 ) ) cycle ideriv = 0 - WCALL( dfp100, lbpol, (vvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) ) !Compute field at interface for global constraint + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call lbpol(vvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) + cpuo = MPI_WTIME() + !Compute field at interface for global constraint ideriv = 2 - WCALL( dfp100, lbpol, (vvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) ) !Compute field at interface for global constraint, d w.r.t. pflux + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call lbpol(vvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) + cpuo = MPI_WTIME() + !Compute field at interface for global constraint, d w.r.t. pflux enddo if( Lvacuumregion ) then #ifdef DEBUG - FATAL( dfp100, vvol.ne.Mvol, Incorrect vvol in last volume) + + if( vvol.ne.Mvol ) then + write(6,'("dfp100 : fatal : myid=",i3," ; vvol.ne.Mvol ; Incorrect vvol in last volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp100 : vvol.ne.Mvol : Incorrect vvol in last volume ;" + endif + #endif ideriv=1 ! derivatives of Btheta w.r.t tflux iocons=0 ! Only need inner side of volume derivatives - WCALL( dfp100, lbpol, (Mvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call lbpol(Mvol, Bt00(1:Mvol, 0:1, -1:2), ideriv, iocons) + cpuo = MPI_WTIME() + iflag=2 ! derivatives of poloidal linking current w.r.t geometry not required - WCALL( dfp100, curent, (Mvol, mn, Nt, Nz, iflag, ldItGp(0:1,-1:2) ) ) + + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + call curent(Mvol, mn, Nt, Nz, iflag, ldItGp(0:1,-1:2) ) + cpuo = MPI_WTIME() + endif endif enddo @@ -187,10 +278,18 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) call WhichCpuID(vvol+1, cpu_send_two) ! Broadcast magnetic field at the interface. - RlBCAST(Bt00(vvol , 1, 0), 1, cpu_send_one) - RlBCAST(Bt00(vvol+1, 0, 0), 1, cpu_send_two) - RlBCAST(Bt00(vvol , 1, 2), 1, cpu_send_one) - RlBCAST(Bt00(vvol+1, 0, 2), 1, cpu_send_two) + + call MPI_BCAST(Bt00(vvol , 1, 0),1,MPI_DOUBLE_PRECISION,cpu_send_one,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Bt00(vvol+1, 0, 0),1,MPI_DOUBLE_PRECISION,cpu_send_two,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Bt00(vvol , 1, 2),1,MPI_DOUBLE_PRECISION,cpu_send_one,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Bt00(vvol+1, 0, 2),1,MPI_DOUBLE_PRECISION,cpu_send_two,MPI_COMM_SPEC,ierr) + ! Evaluate surface current IPDt(vvol) = pi2 * (Bt00(vvol+1, 0, 0) - Bt00(vvol, 1, 0)) @@ -210,8 +309,12 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) ! Communicate additional derivatives call WhichCpuID(Mvol, cpu_send_one) - RlBCAST( ldItGp(0:1, -1:2), 8, cpu_send_one ) - RlBCAST( Bt00(Mvol, 0:1, 1), 2, cpu_send_one ) + + call MPI_BCAST(ldItGp(0:1, -1:2),8,MPI_DOUBLE_PRECISION,cpu_send_one ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Bt00(Mvol, 0:1, 1),2,MPI_DOUBLE_PRECISION,cpu_send_one ,MPI_COMM_SPEC,ierr) + ! Complete output: RHS Fvec(Mvol ) = ldItGp(1, 0) - curpol @@ -224,10 +327,21 @@ subroutine dfp100(Ndofgl, x, Fvec, LComputeDerivatives) case default - FATAL(dfp100, .true., Unaccepted value for Lconstraint) + + if( .true. ) then + write(6,'("dfp100 : fatal : myid=",i3," ; .true. ; Unaccepted value for Lconstraint;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp100 : .true. : Unaccepted value for Lconstraint ;" + endif + end select endif - RETURN(dfp100) + +9999 continue + cput = MPI_WTIME() + Tdfp100 = Tdfp100 + ( cput-cpuo ) + return + end subroutine dfp100 diff --git a/src/dfp200.f90 b/src/dfp200.F90 similarity index 77% rename from src/dfp200.f90 rename to src/dfp200.F90 index 2015b3b6..2a7dd719 100644 --- a/src/dfp200.f90 +++ b/src/dfp200.F90 @@ -43,7 +43,7 @@ !> @param LcomputeDerivatives !> @param vvol subroutine dfp200( LcomputeDerivatives, vvol) - + use mod_kinds, only: wp => dp use constants, only : zero, half, one, two use numerical, only : small @@ -97,28 +97,44 @@ subroutine dfp200( LcomputeDerivatives, vvol) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL, intent(in) :: LComputeDerivatives ! indicates whether derivatives are to be calculated; LOGICAL :: LInnerVolume - INTEGER :: NN, IA, ifail, if01adf, vflag, MM, idgetrf, idgetri, Lwork, lvol, pvol - INTEGER :: vvol, innout, ii, jj, irz, issym, iocons, idoc, idof, imn, ll - INTEGER :: Lcurvature, ideriv, id - INTEGER :: iflag, cpu_id, cpu_id1, even_or_odd, vol_parity - INTEGER :: stat(MPI_STATUS_SIZE), tag, tag2, req1, req2, req3, req4 + integer :: NN, IA, ifail, if01adf, vflag, MM, idgetrf, idgetri, Lwork, lvol, pvol + integer :: vvol, innout, ii, jj, irz, issym, iocons, idoc, idof, imn, ll + integer :: Lcurvature, ideriv, id + integer :: iflag, cpu_id, cpu_id1, even_or_odd, vol_parity + integer :: stat(MPI_STATUS_SIZE), tag, tag2, req1, req2, req3, req4 - REAL :: lastcpu, lss, lfactor, DDl, MMl - REAL :: det - REAL , allocatable :: XX(:), YY(:), dBB(:,:), dII(:), dLL(:), dPP(:), length(:), dRR(:,:), dZZ(:,:), constraint(:) - REAL , allocatable :: ddFcol1(:), ddFcol2(:), ddFcol3(:), ddFcol4(:) + real(wp) :: lastcpu, lss, lfactor, DDl, MMl + real(wp) :: det + real(wp) , allocatable :: XX(:), YY(:), dBB(:,:), dII(:), dLL(:), dPP(:), length(:), dRR(:,:), dZZ(:,:), constraint(:) + real(wp) , allocatable :: ddFcol1(:), ddFcol2(:), ddFcol3(:), ddFcol4(:) - CHARACTER :: packorunpack + character :: packorunpack type(MatrixLU) :: oBI(1:Mvol) - BEGIN(dfp200) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + #ifdef DEBUG @@ -127,38 +143,101 @@ subroutine dfp200( LcomputeDerivatives, vvol) endif #endif - SALLOCATE( dBB , (1:Ntz,-1:2), zero ) ! magnetic field strength (on interfaces) in real space and derivatives; - SALLOCATE( XX , (1:Ntz ), zero ) - SALLOCATE( YY , (1:Ntz ), zero ) - SALLOCATE( length , (1:Ntz ), zero ) ! this is calculated in lforce; + + allocate( dBB (1:Ntz,-1:2), stat=astat ) + dBB (1:Ntz,-1:2) = zero + ! magnetic field strength (on interfaces) in real space and derivatives; + + allocate( XX (1:Ntz ), stat=astat ) + XX (1:Ntz ) = zero + + + allocate( YY (1:Ntz ), stat=astat ) + YY (1:Ntz ) = zero + + + allocate( length (1:Ntz ), stat=astat ) + length (1:Ntz ) = zero + ! this is calculated in lforce; if( LComputeDerivatives ) then - SALLOCATE( dRR , (1:Ntz,-1:1), zero ) - SALLOCATE( dZZ , (1:Ntz,-1:1), zero ) - SALLOCATE( dII , (1:Ntz ), zero ) ! spectral constraint; - SALLOCATE( dLL , (1:Ntz ), zero ) ! length constraint; - SALLOCATE( dPP , (1:Ntz ), zero ) ! poloidal constraint; - SALLOCATE( constraint, (1:Ntz ), zero ) - SALLOCATE( ddFcol1 , (1:Ntz ), zero ) - SALLOCATE( ddFcol2 , (1:Ntz ), zero ) - SALLOCATE( ddFcol3 , (1:Ntz ), zero ) - SALLOCATE( ddFcol4 , (1:Ntz ), zero ) + + allocate( dRR (1:Ntz,-1:1), stat=astat ) + dRR (1:Ntz,-1:1) = zero + + + allocate( dZZ (1:Ntz,-1:1), stat=astat ) + dZZ (1:Ntz,-1:1) = zero + + + allocate( dII (1:Ntz ), stat=astat ) + dII (1:Ntz ) = zero + ! spectral constraint; + + allocate( dLL (1:Ntz ), stat=astat ) + dLL (1:Ntz ) = zero + ! length constraint; + + allocate( dPP (1:Ntz ), stat=astat ) + dPP (1:Ntz ) = zero + ! poloidal constraint; + + allocate( constraint(1:Ntz ), stat=astat ) + constraint(1:Ntz ) = zero + + + allocate( ddFcol1 (1:Ntz ), stat=astat ) + ddFcol1 (1:Ntz ) = zero + + + allocate( ddFcol2 (1:Ntz ), stat=astat ) + ddFcol2 (1:Ntz ) = zero + + + allocate( ddFcol3 (1:Ntz ), stat=astat ) + ddFcol3 (1:Ntz ) = zero + + + allocate( ddFcol4 (1:Ntz ), stat=astat ) + ddFcol4 (1:Ntz ) = zero + endif if( LocalConstraint ) then do vvol = 1, Mvol - WCALL(dfp200, IsMyVolume, (vvol)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(vvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + endif - LREGION(vvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; dBdX%vol = vvol ! Label ll = Lrad(vvol) ! Shorthand @@ -167,7 +246,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! vflag = 1 - WCALL( dfp200, volume, ( vvol, vflag ) ) ! compute volume; + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call volume( vvol, vflag ) + cpuo = MPI_WTIME() + ! compute volume; !!----Hessian2D cleared----- @@ -179,7 +263,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) ideriv = 0 ; id = ideriv iflag = 0 ! XX & YY are returned by lforce; Bemn(1:mn,vvol,iocons), Iomn(1:mn,vvol) etc. are returned through global; - WCALL( dfp200, lforce, ( vvol, iocons, ideriv, Ntz, dBB(1:Ntz,id), XX(1:Ntz), YY(1:Ntz), length(1:Ntz), DDl, MMl, iflag ) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce( vvol, iocons, ideriv, Ntz, dBB(1:Ntz,id), XX(1:Ntz), YY(1:Ntz), length(1:Ntz), DDl, MMl, iflag ) + cpuo = MPI_WTIME() + enddo ! end of do iocons = 0, 1; @@ -187,8 +276,14 @@ subroutine dfp200( LcomputeDerivatives, vvol) ! Allocate some memory for storing the LU matrix and ipivot ! we need one extra dimension for helicity constraint, so dimension is NN+1 -> 0:NN - SALLOCATE( oBI(vvol)%mat, (0:NN,0:NN ), zero ) ! inverse of ``original'', i.e. unperturbed, Beltrami matrix; - SALLOCATE( oBI(vvol)%ipivot, (0:NN), zero) + + allocate( oBI(vvol)%mat(0:NN,0:NN ), stat=astat ) + oBI(vvol)%mat(0:NN,0:NN ) = zero + ! inverse of ``original'', i.e. unperturbed, Beltrami matrix; + + allocate( oBI(vvol)%ipivot(0:NN), stat=astat ) + oBI(vvol)%ipivot(0:NN) = zero + ! initialize matrices for Beltrami linear system call allocate_geometry_matrices(vvol, LcomputeDerivatives) @@ -196,7 +291,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) call intghs_workspace_init(vvol) packorunpack = 'P' - WCALL( dfp200, packab, ( packorunpack, vvol, NN, solution(1:NN,0), 0 ) ) ! packing, put the solutions back to the solution matrix; + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,0), 0 ) + cpuo = MPI_WTIME() + ! packing, put the solutions back to the solution matrix; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -224,7 +324,13 @@ subroutine dfp200( LcomputeDerivatives, vvol) idof = idof + 1 ! this labels the degree-of-freedom that the derivative is taken with respect to; this is outside do innout; #ifdef DEBUG - FATAL( dfp200, idof.gt.LGdof, illegal degree-of-freedom index constructing derivatives ) ! this can be deleted; + + if( idof.gt.LGdof ) then + write(6,'("dfp200 : fatal : myid=",i3," ; idof.gt.LGdof ; illegal degree-of-freedom index constructing derivatives ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : idof.gt.LGdof : illegal degree-of-freedom index constructing derivatives ;" + endif + ! this can be deleted; #endif do innout = 0, 1 ! loop over deformations to inner and outer interface; inside do vvol; inside do ii; inside do irz; @@ -271,8 +377,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) call deallocate_Beltrami_matrices(LcomputeDerivatives) call deallocate_geometry_matrices(LcomputeDerivatives) - DALLOCATE(oBI(vvol)%mat) - DALLOCATE(oBI(vvol)%ipivot) + + deallocate(oBI(vvol)%mat,stat=astat) + + + deallocate(oBI(vvol)%ipivot,stat=astat) + endif ! end of if( LComputeDerivatives ) ; @@ -283,23 +393,49 @@ subroutine dfp200( LcomputeDerivatives, vvol) else ! CASE SEMI GLOBAL CONSTRAINT do vvol = 1, Mvol - WCALL(dfp200, IsMyVolume, (vvol)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(vvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + endif - LREGION(vvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; ll = Lrad(vvol) ! Shorthand NN = NAdof(vvol) ! shorthand; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! vflag = 1 - WCALL( dfp200, volume, ( vvol, vflag ) ) ! compute volume; + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call volume( vvol, vflag ) + cpuo = MPI_WTIME() + ! compute volume; do iocons = 0, 1 ! construct field magnitude on inner and outer interfaces; inside do vvol; @@ -308,7 +444,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) ideriv = 0 ; id = ideriv iflag = 0 ! dAt, dAz, XX & YY are returned by lforce; Bemn(1:mn,vvol,iocons), Iomn(1:mn,vvol) etc. are returned through global; - WCALL( dfp200, lforce, ( vvol, iocons, ideriv, Ntz, dBB(1:Ntz,id), XX(1:Ntz), YY(1:Ntz), length(1:Ntz), DDl, MMl, iflag ) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce( vvol, iocons, ideriv, Ntz, dBB(1:Ntz,id), XX(1:Ntz), YY(1:Ntz), length(1:Ntz), DDl, MMl, iflag ) + cpuo = MPI_WTIME() + enddo ! end of do iocons = 0, 1; enddo @@ -327,8 +468,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) call WhichCpuID(Mvol, cpu_id) - RlBCAST( Ate(Mvol,ideriv,ii)%s(0:Lrad(Mvol)), Lrad(Mvol)+1, cpu_id ) - RlBCAST( Aze(Mvol,ideriv,ii)%s(0:Lrad(Mvol)), Lrad(Mvol)+1, cpu_id ) + + call MPI_BCAST(Ate(Mvol,ideriv,ii)%s(0:Lrad(Mvol)),Lrad(Mvol)+1,MPI_DOUBLE_PRECISION,cpu_id ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Aze(Mvol,ideriv,ii)%s(0:Lrad(Mvol)),Lrad(Mvol)+1,MPI_DOUBLE_PRECISION,cpu_id ,MPI_COMM_SPEC,ierr) + enddo enddo endif @@ -348,19 +493,46 @@ subroutine dfp200( LcomputeDerivatives, vvol) ! First invert Beltrami matrices and store them in OBI do vvol = 1, Mvol - LREGION(vvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; TODO: maybe not necessary, remove + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; TODO: maybe not necessary, remove NN = NAdof(vvol) ! shorthand; ll = Lrad(vvol) - SALLOCATE( oBI(vvol)%mat, (0:NN,0:NN ), zero ) ! inverse of ``original'', i.e. unperturbed, Beltrami matrix; - SALLOCATE( oBI(vvol)%ipivot, (0:NN), zero) + + allocate( oBI(vvol)%mat(0:NN,0:NN ), stat=astat ) + oBI(vvol)%mat(0:NN,0:NN ) = zero + ! inverse of ``original'', i.e. unperturbed, Beltrami matrix; + + allocate( oBI(vvol)%ipivot(0:NN), stat=astat ) + oBI(vvol)%ipivot(0:NN) = zero + ! Parallelization - WCALL(dfp200, IsMyVolume, (vvol)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(vvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + endif @@ -401,22 +573,44 @@ subroutine dfp200( LcomputeDerivatives, vvol) if( (vol_parity.eq.0 ) .and. (even_or_odd.eq.1) ) cycle ! even_or_odd=1 thus perturb only odd interfaces if( (vol_parity.eq.1 ) .and. (even_or_odd.eq.0) ) cycle ! even_or_odd=0 thus perturb only even interfaces - WCALL(dfp200, IsMyVolume, (vvol)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(vvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue.EQ.0 ) then ! This CPU does not deal with interface's inner volume - WCALL(dfp200, IsMyVolume, (vvol+1)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(vvol+1) + cpuo = MPI_WTIME() + if( IsMyVolumeValue.eq.0 ) then ! This CPU does not deal with interface's outer volume either - cycle cycle else if( IsMyVolumeValue.eq.-1 ) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + else LInnerVolume = .false. endif else if( IsMyVolumeValue.EQ.-1 ) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + else LinnerVolume = .true. endif @@ -440,7 +634,13 @@ subroutine dfp200( LcomputeDerivatives, vvol) idof = idof + 1 ! this labels the degree-of-freedom that the derivative is taken with respect to; this is outside do innout; #ifdef DEBUG - FATAL( dfp200, idof.gt.LGdof, illegal degree-of-freedom index constructing derivatives ) ! this can be deleted; + + if( idof.gt.LGdof ) then + write(6,'("dfp200 : fatal : myid=",i3," ; idof.gt.LGdof ; illegal degree-of-freedom index constructing derivatives ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : idof.gt.LGdof : illegal degree-of-freedom index constructing derivatives ;" + endif + ! this can be deleted; #endif @@ -460,7 +660,17 @@ subroutine dfp200( LcomputeDerivatives, vvol) dBdX%L = .true. ! Set up volume information - LREGION(lvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; + + if( Igeometry.eq.1 .or. lvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( lvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; NN = NAdof(lvol) ! Allocate memory. This cannot be moved outside due to NN and ll dependence on volume. @@ -540,12 +750,23 @@ subroutine dfp200( LcomputeDerivatives, vvol) endif do lvol = vvol, vvol+1 - WCALL(dfp200, IsMyVolume, (lvol)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call IsMyVolume(lvol) + cpuo = MPI_WTIME() + if( IsMyVolumeValue .EQ. 0 ) then cycle else if( IsMyVolumeValue .EQ. -1) then - FATAL(dfp200, .true., Unassociated volume) + + if( .true. ) then + write(6,'("dfp200 : fatal : myid=",i3," ; .true. ; Unassociated volume;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : .true. : Unassociated volume ;" + endif + endif if( lvol.eq.vvol ) then @@ -555,7 +776,17 @@ subroutine dfp200( LcomputeDerivatives, vvol) endif dBdX%innout = innout - LREGION(lvol) ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; + + if( Igeometry.eq.1 .or. lvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( lvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! assigns Lcoordinatesingularity, Lplasmaregion, etc. ; ! EVALUATE dBB call evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, dRR, dZZ, dII, dLL, dPP, Ntz, & @@ -575,8 +806,12 @@ subroutine dfp200( LcomputeDerivatives, vvol) ! Free memory do vvol = 1, Mvol - DALLOCATE(oBi(vvol)%mat) - DALLOCATE(oBI(vvol)%ipivot) + + deallocate(oBi(vvol)%mat,stat=astat) + + + deallocate(oBI(vvol)%ipivot,stat=astat) + enddo dBdX%L = .false. ! probably not needed, but included anyway; @@ -612,27 +847,60 @@ subroutine dfp200( LcomputeDerivatives, vvol) endif ! End of if( LocalConstraint ) if( LcomputeDerivatives ) then - DALLOCATE(constraint) - DALLOCATE(dPP) - DALLOCATE(dLL) - DALLOCATE(dII) - DALLOCATE(dZZ) - DALLOCATE(dRR) - DALLOCATE(ddFcol1) - DALLOCATE(ddFcol2) - DALLOCATE(ddFcol3) - DALLOCATE(ddFcol4) + + deallocate(constraint,stat=astat) + + + deallocate(dPP,stat=astat) + + + deallocate(dLL,stat=astat) + + + deallocate(dII,stat=astat) + + + deallocate(dZZ,stat=astat) + + + deallocate(dRR,stat=astat) + + + deallocate(ddFcol1,stat=astat) + + + deallocate(ddFcol2,stat=astat) + + + deallocate(ddFcol3,stat=astat) + + + deallocate(ddFcol4,stat=astat) + endif - DALLOCATE(dBB) - DALLOCATE( XX) ! spectral constraints; not used; - DALLOCATE( YY) - DALLOCATE(length) + + deallocate(dBB,stat=astat) + + + deallocate(XX,stat=astat) + ! spectral constraints; not used; + + deallocate(YY,stat=astat) + + + deallocate(length,stat=astat) + 2000 continue - RETURN(dfp200) + +9999 continue + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + return + end subroutine dfp200 @@ -648,7 +916,7 @@ end subroutine dfp200 !> @param oBI !> @param NN subroutine get_LU_beltrami_matrices(vvol, oBI, NN) - + use mod_kinds, only: wp => dp ! Evaluate the LU factorization of Beltrami matrices and store the original one in oBI. ! INPUT @@ -677,19 +945,27 @@ subroutine get_LU_beltrami_matrices(vvol, oBI, NN) use typedefns - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + ! ------ TYPE(MatrixLU), intent(inout) :: oBI - INTEGER, intent(in) :: vvol, NN + integer, intent(in) :: vvol, NN - INTEGER :: IA, MM, LDA, Lwork, ll - INTEGER :: idgetrf, idgetri - REAL :: lastcpu - REAL, allocatable :: work(:) + integer :: IA, MM, LDA, Lwork, ll + integer :: idgetrf, idgetri + real(wp) :: lastcpu + real(wp), allocatable :: work(:) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!- - lastcpu = GETTIME + lastcpu = MPI_WTIME() ll = Lrad(vvol) @@ -698,10 +974,20 @@ subroutine get_LU_beltrami_matrices(vvol, oBI, NN) Lsavedguvij = .false. - WCALL( dfp200, ma00aa, (Iquad(vvol), mn, vvol, ll) ) - WCALL( dfp200, matrix, (vvol, mn, ll) ) - lastcpu = GETTIME + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call ma00aa(Iquad(vvol), mn, vvol, ll) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call matrix(vvol, mn, ll) + cpuo = MPI_WTIME() + + + lastcpu = MPI_WTIME() if(Lconstraint .eq. 2) then ! for helicity constraint @@ -716,12 +1002,18 @@ subroutine get_LU_beltrami_matrices(vvol, oBI, NN) MM = NN ; LDA = NN+1 ; Lwork = NN+1 idgetrf = 1 ; call DGETRF( MM+1, NN+1, dMA(0:LDA-1,0:NN), LDA, oBI%ipivot, idgetrf ) - cput = GETTIME + cput = MPI_WTIME() select case( idgetrf ) ! 0123456789012345678 case( :-1 ) ; write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "input error; " case( 0 ) ; if( Wdforce ) write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "success; " case( 1: ) ; write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "singular; " - case default ; FATAL( dforce, .true., illegal ifail returned from F07ADF ) + case default ; + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; illegal ifail returned from F07ADF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : illegal ifail returned from F07ADF ;" + endif + end select call DCOPY((1+NN)*(1+NN), dMA, 1, oBI%mat, 1) ! BLAS version; 24 Jul 2019 @@ -741,12 +1033,18 @@ subroutine get_LU_beltrami_matrices(vvol, oBI, NN) idgetrf = 1 ; call DGETRF( MM, NN, dMA(1,1), LDA, oBI%ipivot(1:NN), idgetrf ) - cput = GETTIME + cput = MPI_WTIME() select case( idgetrf ) ! 0123456789012345678 case( :-1 ) ; write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "input error; " case( 0 ) ; if( Wdforce ) write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "success; " case( 1: ) ; write(ounit,1010) cput-cpus, myid, vvol, cput-lastcpu, idgetrf, "singular; " - case default ; FATAL( dforce, .true., illegal ifail returned from F07ADF ) + case default ; + if( .true. ) then + write(6,'("dforce : fatal : myid=",i3," ; .true. ; illegal ifail returned from F07ADF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dforce : .true. : illegal ifail returned from F07ADF ;" + endif + end select oBI%mat(1:NN,1:NN) = dMA(1:NN,1:NN) @@ -767,7 +1065,7 @@ end subroutine get_LU_beltrami_matrices !> @param oBI !> @param NN subroutine get_perturbed_solution(vvol, oBI, NN) - + use mod_kinds, only: wp => dp ! This routine evaluates the value of the magnetic field once the interface is perturbed using matrix perturbation theory. ! Separated from the main dfp200 core to allow local and semi-global constraints to be prescribed ! @@ -788,15 +1086,23 @@ subroutine get_perturbed_solution(vvol, oBI, NN) use typedefns - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !------ - INTEGER, intent(in) :: vvol, NN + integer, intent(in) :: vvol, NN TYPE(MatrixLU),intent(inout) :: oBI - INTEGER :: ideriv, ll, idgetrs - REAL :: dpsi(1:2), work(1:NN+1), rhs(0:NN), dVA(0:NN), dVD(0:NN) - CHARACTER :: packorunpack + integer :: ideriv, ll, idgetrs + real(wp) :: dpsi(1:2), work(1:NN+1), rhs(0:NN), dVA(0:NN), dVD(0:NN) + character :: packorunpack !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!- @@ -805,9 +1111,19 @@ subroutine get_perturbed_solution(vvol, oBI, NN) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - WCALL( dfp200, intghs, ( Iquad(vvol), mn, vvol, ll, 0 ) ) - WCALL( dfp200, mtrxhs, ( vvol, mn, ll, dVA, dVD, 0) ) + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call intghs( Iquad(vvol), mn, vvol, ll, 0 ) + cpuo = MPI_WTIME() + + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call mtrxhs( vvol, mn, ll, dVA, dVD, 0) + cpuo = MPI_WTIME() + rhs(0) = zero rhs(1:NN) = -dVA(1:NN) @@ -825,7 +1141,12 @@ subroutine get_perturbed_solution(vvol, oBI, NN) ! Unpack derivatives of solution packorunpack = 'U' - WCALL( dfp200, packab,( packorunpack, vvol, NN, solution(1:NN,-1), -1 ) ) ! derivatives placed in Ate(lvol,ideriv,1:mn)%s(0:Lrad), + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call packab( packorunpack, vvol, NN, solution(1:NN,-1), -1 ) + cpuo = MPI_WTIME() + ! derivatives placed in Ate(lvol,ideriv,1:mn)%s(0:Lrad), end subroutine get_perturbed_solution @@ -843,7 +1164,7 @@ end subroutine get_perturbed_solution !> @param issym !> @param irz subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) - + use mod_kinds, only: wp => dp ! Evaluate mu and psip derivatives and store them in dmupfdx. ! If debug and Lcheck=4, compare with finite difference approximation ! @@ -889,28 +1210,34 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) dMA, dMB, dMG, dMD - LOCALS: -! ------- - INTEGER :: vvol, innout, idof, iflag, ii, issym, irz, ll, NN, ifail - INTEGER :: vflag, N, iwork(1:Nvol-1), idgesvx, pvol, order, IDGESV - INTEGER :: iocons - INTEGER, allocatable:: IPIV(:) - REAL :: det, lfactor, Bt00(1:Mvol, 0:1, -1:2) - REAL :: R(1:Nvol-1), C(1:Nvol-1), work(1:4*Nvol-4), ferr, berr, rcond, tmp(2:Nvol) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer :: vvol, innout, idof, iflag, ii, issym, irz, ll, NN, ifail + integer :: vflag, N, iwork(1:Nvol-1), idgesvx, pvol, order, IDGESV + integer :: iocons + integer, allocatable:: IPIV(:) + real(wp) :: det, lfactor, Bt00(1:Mvol, 0:1, -1:2) + real(wp) :: R(1:Nvol-1), C(1:Nvol-1), work(1:4*Nvol-4), ferr, berr, rcond, tmp(2:Nvol) LOGICAL :: Lonlysolution, LcomputeDerivatives, dfp100_logical - REAL, allocatable :: dBdmpf(:,:), dBdx2(:) + real(wp), allocatable :: dBdmpf(:,:), dBdx2(:) #ifdef DEBUG - INTEGER :: isymdiff, lr, ml, mode - INTEGER :: jj, tdoc, idoc, tdof, jdof, imn, Ndofgl - REAL :: dvol(-1:+1), evolume, imupf_global(1:Mvol,1:2,-2:2), imupf_local(1:2,-2:2), factor, Btemn_debug(1:mn, 0:1, 1:Mvol, -1:2) - REAL :: position(0:NGdof), force(0:NGdof) - REAL :: Fdof(1:Mvol-1), Xdof(1:Mvol-1) - REAL, allocatable :: fjac(:, :), r_deb(:), Fvec(:), dpfluxout(:) - REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; + integer :: isymdiff, lr, ml, mode + integer :: jj, tdoc, idoc, tdof, jdof, imn, Ndofgl + real(wp) :: dvol(-1:+1), evolume, imupf_global(1:Mvol,1:2,-2:2), imupf_local(1:2,-2:2), factor, Btemn_debug(1:mn, 0:1, 1:Mvol, -1:2) + real(wp) :: position(0:NGdof), force(0:NGdof) + real(wp) :: Fdof(1:Mvol-1), Xdof(1:Mvol-1) + real(wp), allocatable :: fjac(:, :), r_deb(:), Fvec(:), dpfluxout(:) + real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; - CHARACTER :: packorunpack + character :: packorunpack EXTERNAL :: dfp100 #endif @@ -924,7 +1251,13 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) NN = NAdof(vvol) ! shorthand; #ifdef DEBUG - FATAL( dfp200, vvol-1+innout.gt.Mvol, psifactor needs attention ) + + if( vvol-1+innout.gt.Mvol ) then + write(6,'("dfp200 : fatal : myid=",i3," ; vvol-1+innout.gt.Mvol ; psifactor needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : vvol-1+innout.gt.Mvol : psifactor needs attention ;" + endif + #endif @@ -933,10 +1266,22 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) ! Store initial arrays for debug purposes. if( Lcheck.eq.3 .or. Lcheck.eq.4 ) then ! will check volume derivatives; - SALLOCATE( oRbc, (1:mn,0:Mvol), iRbc(1:mn,0:Mvol) ) - SALLOCATE( oZbs, (1:mn,0:Mvol), iZbs(1:mn,0:Mvol) ) - SALLOCATE( oRbs, (1:mn,0:Mvol), iRbs(1:mn,0:Mvol) ) - SALLOCATE( oZbc, (1:mn,0:Mvol), iZbc(1:mn,0:Mvol) ) + + allocate( oRbc(1:mn,0:Mvol), stat=astat ) + oRbc(1:mn,0:Mvol) = iRbc(1:mn,0:Mvol) + + + allocate( oZbs(1:mn,0:Mvol), stat=astat ) + oZbs(1:mn,0:Mvol) = iZbs(1:mn,0:Mvol) + + + allocate( oRbs(1:mn,0:Mvol), stat=astat ) + oRbs(1:mn,0:Mvol) = iRbs(1:mn,0:Mvol) + + + allocate( oZbc(1:mn,0:Mvol), stat=astat ) + oZbc(1:mn,0:Mvol) = iZbc(1:mn,0:Mvol) + endif ! end of if( Lcheck.eq.3 .or. Lcheck.eq.4 ) ; #endif @@ -954,9 +1299,18 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) order = Mvol-1 endif - SALLOCATE(dBdmpf , ( 1:order, 1:order ), zero) - SALLOCATE(dBdx2 , ( 1:order ), zero) - SALLOCATE(IPIV , ( 1:order ), zero) + + allocate( dBdmpf ( 1:order, 1:order ), stat=astat ) + dBdmpf ( 1:order, 1:order ) = zero + + + allocate( dBdx2 ( 1:order ), stat=astat ) + dBdx2 ( 1:order ) = zero + + + allocate( IPIV ( 1:order ), stat=astat ) + IPIV ( 1:order ) = zero + ! Derivatives of helicity multipliers @@ -965,11 +1319,26 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) ! Derivatives of poloidal flux. Need to solve a linear system of 5 equations - by hand for now ! Matrix coefficients evaluation do pvol = 1, Mvol - LREGION(pvol) + + if( Igeometry.eq.1 .or. pvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( pvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + do iocons = 0, 1 if( ( Lcoordinatesingularity .and. iocons.eq.0 ) .or. ( Lvacuumregion .and. iocons.eq.1 ) ) cycle - WCALL(dfp200, lbpol, (pvol, Bt00(1:Mvol, 0:1, -1:2), 2, iocons)) ! Stores derivative in global variable Btemn + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lbpol(pvol, Bt00(1:Mvol, 0:1, -1:2), 2, iocons) + cpuo = MPI_WTIME() + ! Stores derivative in global variable Btemn enddo #ifdef DEBUG if( .false. ) then @@ -1001,7 +1370,17 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) ! RHS coefficients evaluation do pvol = vvol, vvol+1 - LREGION(pvol) + + if( Igeometry.eq.1 .or. pvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( pvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + if( pvol.eq.vvol ) then dBdX%innout = 1 ! take derivative w.r.t outer interface else !pvol.eq.vvol+1 @@ -1010,7 +1389,12 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) do iocons = 0, 1 if( ( Lcoordinatesingularity .and. iocons.eq.0 ) .or. ( Lvacuumregion .and. iocons.eq.1 ) ) cycle - WCALL(dfp200, lbpol, (pvol, Bt00(1:Mvol, 0:1, -1:2), -1, iocons)) ! derivate w.r.t geometry + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lbpol(pvol, Bt00(1:Mvol, 0:1, -1:2), -1, iocons) + cpuo = MPI_WTIME() + ! derivate w.r.t geometry enddo #ifdef DEBUG @@ -1038,12 +1422,22 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) ! Get derivatives of B_theta w.r.t the toroidal flux in vacuum region iocons = 0 - WCALL(dfp200, lbpol, (Mvol, Bt00(1:Mvol, 0:1, -1:2), 1, iocons)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lbpol(Mvol, Bt00(1:Mvol, 0:1, -1:2), 1, iocons) + cpuo = MPI_WTIME() + ! compute d(Itor,Gpol)/dpsip and d(Itor,Gpol)/dpsit ! TODO: this should already be evaluated in mp00ac... ! TODO: THIS COULD BE MOVED OUTSIDE THE LOOPS - iflag = 2 ; WCALL( dfp200, curent, ( Mvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,Mvol) ) ) + iflag = 2 ; + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call curent( Mvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,Mvol) ) + cpuo = MPI_WTIME() + dBdmpf(Mvol-1, Mvol ) = Bt00(Mvol, 0, 1) !dBdpsit dBdmpf(Mvol , Mvol-1) = dItGpdxtp( 1, 2, Mvol) !dIpdpsip @@ -1051,7 +1445,12 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) ! compute d(Itor,Gpol)/dx if( vvol.eq.Mvol-1 ) then ! Plasma interface is perturbed - iflag = -1 ; WCALL( dfp200, curent, ( Mvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,Mvol) ) ) + iflag = -1 ; + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call curent( Mvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,Mvol) ) + cpuo = MPI_WTIME() + dBdx2( Mvol ) = -dItGpdxtp( 1,-1, Mvol) !-dIpdxj else ! Inner interface is perturbed dBdx2( Mvol ) = zero @@ -1078,19 +1477,35 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) endif ! Free memory - DALLOCATE( dBdmpf ) - DALLOCATE( dBdx2 ) - DALLOCATE( IPIV ) + + deallocate(dBdmpf ,stat=astat) + + + deallocate(dBdx2 ,stat=astat) + + + deallocate(IPIV ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! else ! LocalConstraint if( Lconstraint.eq.1 ) then - iflag = -1 ; WCALL( dfp200, tr00ab, ( vvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,vvol) ) ) ! compute d(transform)/dx; + iflag = -1 ; + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call tr00ab( vvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,vvol) ) + cpuo = MPI_WTIME() + ! compute d(transform)/dx; endif if( Lvacuumregion .and. Lconstraint.ge.0 ) then - iflag = -1 ; WCALL( dfp200, curent, ( vvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,vvol) ) ) ! compute d(Itor,Gpol)/dx; + iflag = -1 ; + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call curent( vvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,vvol) ) + cpuo = MPI_WTIME() + ! compute d(Itor,Gpol)/dx; endif dmupfdx(vvol,1,1,idof,innout) = zero ! Prepare array @@ -1103,7 +1518,13 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) if( Lcoordinatesingularity ) then ! solution does not depend on dpflux, and only outer transform is a constraint; det = diotadxup(1,1,vvol) - FATAL( dfp200, abs(det).lt.small, error computing derivatives of mu wrt geometry at fixed transform ) + + if( abs(det).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; abs(det).lt.small ; error computing derivatives of mu wrt geometry at fixed transform ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : abs(det).lt.small : error computing derivatives of mu wrt geometry at fixed transform ;" + endif + dmupfdx(vvol,1,1,idof,innout) = - lfactor * ( diotadxup(1,-1,vvol) ) / det dmupfdx(vvol,1,2,idof,innout) = zero @@ -1111,7 +1532,13 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) else ! if( .not.Lcoordinatesingularity ) ; det = diotadxup(0,1,vvol) * diotadxup(1,2,vvol) - diotadxup(0,2,vvol) * diotadxup(1,1,vvol) - FATAL( dfp200, abs(det).lt.small, error computing derivatives of mu & dpflux wrt geometry at fixed transform ) + + if( abs(det).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; abs(det).lt.small ; error computing derivatives of mu & dpflux wrt geometry at fixed transform ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : abs(det).lt.small : error computing derivatives of mu & dpflux wrt geometry at fixed transform ;" + endif + dmupfdx(vvol,1,1,idof,innout) = - lfactor * ( + diotadxup(1, 2,vvol) * diotadxup(0,-1,vvol) - diotadxup(0, 2,vvol) * diotadxup(1,-1,vvol) ) / det dmupfdx(vvol,1,2,idof,innout) = - lfactor * ( - diotadxup(1, 1,vvol) * diotadxup(0,-1,vvol) + diotadxup(0, 1,vvol) * diotadxup(1,-1,vvol) ) / det @@ -1125,7 +1552,13 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) if ( Lconstraint.eq.0 ) then ! THIS NEEDS ATTENTION; det = dItGpdxtp(0,1,vvol) * dItGpdxtp(1,2,vvol) - dItGpdxtp(0,2,vvol) * dItGpdxtp(1,1,vvol) - FATAL( dfp200, abs(det).lt.small, error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ) + + if( abs(det).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; abs(det).lt.small ; error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : abs(det).lt.small : error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ;" + endif + dmupfdx(vvol,1,1,idof,innout) = - lfactor * ( + dItGpdxtp(1, 2,vvol) * dItGpdxtp(0,-1,vvol) - dItGpdxtp(0, 2,vvol) * dItGpdxtp(1,-1,vvol) ) / det dmupfdx(vvol,1,2,idof,innout) = - lfactor * ( - dItGpdxtp(1, 1,vvol) * dItGpdxtp(0,-1,vvol) + dItGpdxtp(0, 1,vvol) * dItGpdxtp(1,-1,vvol) ) / det @@ -1133,7 +1566,13 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) else if( Lconstraint.eq.1 ) then det = diotadxup(0,1,vvol) * dItGpdxtp(1,2,vvol) - diotadxup(0,2,vvol) * dItGpdxtp(1,1,vvol) - FATAL( dfp200, abs(det).lt.small, error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ) + + if( abs(det).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; abs(det).lt.small ; error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : abs(det).lt.small : error computing derivatives of dtflux & dpflux wrt geometry at fixed Itor and Gpol ;" + endif + dmupfdx(vvol,1,1,idof,innout) = - lfactor * ( + dItGpdxtp(1, 2,vvol) * diotadxup(0,-1,vvol) - diotadxup(0, 2,vvol) * dItGpdxtp(1,-1,vvol) ) / det dmupfdx(vvol,1,2,idof,innout) = - lfactor * ( - dItGpdxtp(1, 1,vvol) * diotadxup(0,-1,vvol) + diotadxup(0, 1,vvol) * dItGpdxtp(1,-1,vvol) ) / det @@ -1158,9 +1597,24 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) dBdX%L = .false. if( LocalConstraint ) then - WCALL(dfp200, deallocate_geometry_matrices, (LcomputeDerivatives)) - WCALL(dfp200, deallocate_Beltrami_matrices, (LcomputeDerivatives)) - WCALL(dfp200, intghs_workspace_destroy, ())) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call deallocate_geometry_matrices(LcomputeDerivatives) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call deallocate_Beltrami_matrices(LcomputeDerivatives) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call intghs_workspace_destroy() + cpuo = MPI_WTIME() + endif do isymdiff = -2, 2 ! symmetric fourth-order, finite-difference used to approximate derivatives; @@ -1188,16 +1642,26 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) Xdof(1:Mvol-1) = zero; if( LocalConstraint ) then - SALLOCATE( Fvec, (1:Mvol-1), zero) + + allocate( Fvec(1:Mvol-1), stat=astat ) + Fvec(1:Mvol-1) = zero + Ndofgl = 0; Fvec(1:Mvol-1) = 0; dfp100_logical = .FALSE.; Xdof(1:Mvol-1) = dpflux(2:Mvol) + xoffset ! Solve for field dBdX%L = .false. ! No need for derivatives in this context - WCALL(dfp200, dfp100, (Ndofgl, Xdof, Fvec, dfp100_logical) ) - DALLOCATE( Fvec ) + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call dfp100(Ndofgl, Xdof, Fvec, dfp100_logical) + cpuo = MPI_WTIME() + + + + deallocate(Fvec ,stat=astat) + ! -------------------------------------------------------------------------------------------------- ! Global constraint - call the master thread calls hybrd1 on dfp100, others call dfp100_loop. @@ -1214,13 +1678,27 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) Ndofgl = Mvol-1 endif - SALLOCATE(dpfluxout, (1:Ndofgl), zero ) - SALLOCATE( Fvec, (1:Ndofgl), zero ) - SALLOCATE( IPIV, (1:Mvol-1), zero ) + + allocate( dpfluxout(1:Ndofgl), stat=astat ) + dpfluxout(1:Ndofgl) = zero + + + allocate( Fvec(1:Ndofgl), stat=astat ) + Fvec(1:Ndofgl) = zero + + + allocate( IPIV(1:Mvol-1), stat=astat ) + IPIV(1:Mvol-1) = zero + dfp100_logical = .TRUE. - WCALL(dfp200, dfp100, (Ndofgl, Xdof(1:Mvol-1), Fvec(1:Ndofgl), dfp100_logical)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call dfp100(Ndofgl, Xdof(1:Mvol-1), Fvec(1:Ndofgl), dfp100_logical) + cpuo = MPI_WTIME() + ! Only one cpu with this test - thus no need for broadcast dpfluxout = Fvec @@ -1232,15 +1710,31 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) dtflux(Mvol) = dtflux(Mvol ) - dpfluxout(Mvol ) endif - DALLOCATE(IPIV) - DALLOCATE(Fvec) - DALLOCATE(dpfluxout) + + deallocate(IPIV,stat=astat) + + + deallocate(Fvec,stat=astat) + + + deallocate(dpfluxout,stat=astat) + endif if( LocalConstraint ) then - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ll = Lrad(vvol) ! shorthand NN = NAdof(vvol) ! shorthand; @@ -1265,9 +1759,24 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) if( LocalConstraint ) then ! reallocate matrices for next iteration - WCALL(dfp200, intghs_workspace_init, (vvol)) - WCALL(dfp200, allocate_Beltrami_matrices, (vvol,LcomputeDerivatives)) - WCALL(dfp200, allocate_geometry_matrices, (vvol,LcomputeDerivatives)) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call intghs_workspace_init(vvol) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call allocate_Beltrami_matrices(vvol,LcomputeDerivatives) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call allocate_geometry_matrices(vvol,LcomputeDerivatives) + cpuo = MPI_WTIME() + endif 8294 continue @@ -1282,7 +1791,7 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) + 1 * imupf_global(1:Mvol,1:2,-2) ) / ( 12 * dRZ ) endif - cput = GETTIME + cput = MPI_WTIME() write(ounit,3003) if( LocalConstraint ) then @@ -1322,7 +1831,12 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! vflag = 1 ! this flag instructs volume to continue even if the volume is invalid; - WCALL( dfp200, volume, ( vvol, vflag ) ) ! compute derivative of volume; wrt to harmonic described by dBdX structure; + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call volume( vvol, vflag ) + cpuo = MPI_WTIME() + ! compute derivative of volume; wrt to harmonic described by dBdX structure; #ifdef DEBUG @@ -1330,7 +1844,7 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) dvol(0) = dvolume - cput = GETTIME + cput = MPI_WTIME() write(ounit,1001) cput-cpus, myid, vvol, im(ii), in(ii), irz, issym, innout, "analytic", dvolume 1001 format("dfp200 : ",f10.2," : myid=",i3," ; vvol=",i3," ; (",i3," ,",i3,") ; irz=",i2," ; issym=",i2," ; innout=",i2,& @@ -1349,7 +1863,12 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) endif vflag = 1 ! this flag instructs volume to continue even if the volume is invalid; - WCALL( dfp200, volume, ( vvol, vflag ) ) ! compute volume; this corrupts calculation of dvolume; + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call volume( vvol, vflag ) + cpuo = MPI_WTIME() + ! compute volume; this corrupts calculation of dvolume; dvol(isymdiff) = vvolume(vvol) @@ -1357,10 +1876,16 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) evolume = abs( ( dvol(+1)-dvol(-1) ) / dRZ - dvol(0) ) ! error in finite-difference calculation and analytic derivative; - cput = GETTIME + cput = MPI_WTIME() write(ounit,1001) cput-cpus, myid, vvol, im(ii), in(ii), irz, issym, innout, "finite-d", ( dvol(+1)-dvol(-1) ) / dRZ, evolume - FATAL( dfp200, evolume.gt.dRZ, unacceptable error in volume derivative ) + + if( evolume.gt.dRZ ) then + write(6,'("dfp200 : fatal : myid=",i3," ; evolume.gt.dRZ ; unacceptable error in volume derivative ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : evolume.gt.dRZ : unacceptable error in volume derivative ;" + endif + iRbc(1:mn,0:Mvol) = oRbc(1:mn,0:Mvol) iZbs(1:mn,0:Mvol) = oZbs(1:mn,0:Mvol) @@ -1378,10 +1903,18 @@ subroutine evaluate_dmupfdx(innout, idof, ii, issym, irz) #ifdef DEBUG if( Lcheck.eq.3 .or. Lcheck.eq.4 ) then - DALLOCATE(oRbc) - DALLOCATE(oZbs) - DALLOCATE(oRbs) - DALLOCATE(oZbc) + + deallocate(oRbc,stat=astat) + + + deallocate(oZbs,stat=astat) + + + deallocate(oRbs,stat=astat) + + + deallocate(oZbc,stat=astat) + endif #endif @@ -1410,7 +1943,7 @@ end subroutine evaluate_dmupfdx !> @param Ntz !> @param LcomputeDerivatives subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, dRR, dZZ, dII, dLL, dPP, Ntz, LcomputeDerivatives) - + use mod_kinds, only: wp => dp ! Evaluate the derivative of the square of the magnetic field modulus. Add spectral constraint derivatives if ! required. @@ -1460,20 +1993,28 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, dRodR, dRodZ, dZodR, dZodZ, dBdX, & xoffset - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !------ LOGICAL, intent(in) :: LComputeDerivatives -INTEGER :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz -REAL :: lss, DDl, MMl -REAL :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) -REAL :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) +integer :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz +real(wp) :: lss, DDl, MMl +real(wp) :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) +real(wp) :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) #ifdef DEBUG -INTEGER :: isymdiff, ll, NN, ndofgl, pvol -REAL :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) -REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; -REAL, allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) -CHARACTER :: packorunpack +integer :: isymdiff, ll, NN, ndofgl, pvol +real(wp) :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) +real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; +real(wp), allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) +character :: packorunpack #endif do iocons = 0, 1 @@ -1492,7 +2033,12 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, iflag = 1 do ideriv=1, 2 !call evaluate_Bsquare(iocons, lvol, dBB, dAt, dAz, XX, YY, length, DDl, MMl, ideriv)! In a subroutine; called somewhere else when semi global constraint - WCALL(dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + enddo call tfft( Nt, Nz, dBB(1:Ntz,1), dBB(1:Ntz,2), & ! derivatives of B^2 wrt mu and dpflux; @@ -1529,7 +2075,12 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, ! ----------------------------------- ideriv = 0; iflag=1 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + ! dFFdRZ CONSTRUCTION @@ -1539,10 +2090,21 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, ! --------------------- ideriv = -1; iflag=0 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + ! Add derivatives of pressure as well - FATAL( dfp200, vvolume(lvol).lt.small, shall divide by vvolume(lvol)**(gamma+one) ) + + if( vvolume(lvol).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; vvolume(lvol).lt.small ; shall divide by vvolume(lvol)**(gamma+one) ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : vvolume(lvol).lt.small : shall divide by vvolume(lvol)**(gamma+one) ;" + endif + ! Derivatives of force wrt geometry; In real space. ijreal(1:Ntz) = - adiabatic(lvol) * pscale * gamma * dvolume / vvolume(lvol)**(gamma+one) + dBB(1:Ntz,-1) @@ -1560,7 +2122,13 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, if( innout.eq.1 .and. iocons.eq.1 ) then ! include derivatives of spectral constraints; #ifdef DEBUG - FATAL( dfp200, abs(DDl).lt.small, divide by zero on spectral constraint ) + + if( abs(DDl).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; abs(DDl).lt.small ; divide by zero on spectral constraint ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : abs(DDl).lt.small : divide by zero on spectral constraint ;" + endif + #endif if( issym.eq.0 ) then ! take derivatives wrt Rbc and Zbs; @@ -1717,7 +2285,13 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, mn, im(1:mn), in(1:mn), evmn(1:mn), odmn(1:mn), comn(1:mn), simn(1:mn), ifail ) ! evmn and odmn are available as workspace; - FATAL( dfp200, lvol-1+innout.gt.Mvol, psifactor needs attention ) + + if( lvol-1+innout.gt.Mvol ) then + write(6,'("dfp200 : fatal : myid=",i3," ; lvol-1+innout.gt.Mvol ; psifactor needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : lvol-1+innout.gt.Mvol : psifactor needs attention ;" + endif + idoc = 0 @@ -1753,7 +2327,13 @@ subroutine evaluate_dBB(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, endif ! end of if( NOTstellsym) ; #ifdef DEBUG - FATAL( dfp200, idoc.ne.LGdof, counting error ) + + if( idoc.ne.LGdof ) then + write(6,'("dfp200 : fatal : myid=",i3," ; idoc.ne.LGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : idoc.ne.LGdof : counting error ;" + endif + #endif enddo ! end of do iocons; @@ -1762,7 +2342,7 @@ end subroutine evaluate_dBB subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, dRR, dZZ, dII, dLL, dPP, Ntz) - + use mod_kinds, only: wp => dp ! Evaluate the derivative of the square of the magnetic field modulus. Add spectral constraint derivatives if ! required. @@ -1813,19 +2393,27 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt dRodR, dRodZ, dZodR, dZodZ, dBdX, & xoffset - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !------ -INTEGER :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz -REAL :: lss, DDl, MMl -REAL :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) -REAL :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) +integer :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz +real(wp) :: lss, DDl, MMl +real(wp) :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) +real(wp) :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) #ifdef DEBUG -INTEGER :: isymdiff, ll, NN, ndofgl, pvol -REAL :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) -REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; -REAL, allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) -CHARACTER :: packorunpack +integer :: isymdiff, ll, NN, ndofgl, pvol +real(wp) :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) +real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; +real(wp), allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) +character :: packorunpack #endif @@ -1840,7 +2428,12 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt ! ----------------------------------- ideriv = 0; iflag=1 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + ! hessian_dFFdRZ CONSTRUCTION ! =================== @@ -1849,10 +2442,21 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt ! --------------------- ideriv = -1; iflag=0 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + ! Add derivatives of pressure as well - FATAL( dfp200, vvolume(lvol).lt.small, shall divide by vvolume(lvol)**(gamma+one) ) + + if( vvolume(lvol).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; vvolume(lvol).lt.small ; shall divide by vvolume(lvol)**(gamma+one) ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : vvolume(lvol).lt.small : shall divide by vvolume(lvol)**(gamma+one) ;" + endif + ! Derivatives of force wrt geometry; In real space. ijreal(1:Ntz) = - adiabatic(lvol) * pscale * gamma * dvolume / vvolume(lvol)**(gamma+one) + dBB(1:Ntz,-1)*Rij(1:Ntz,0,0) @@ -1870,7 +2474,13 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt mn, im(1:mn), in(1:mn), efmn(1:mn), ofmn(1:mn), cfmn(1:mn), sfmn(1:mn), ifail ) - FATAL( dfp200, lvol-1+innout.gt.Mvol, psifactor needs attention ) + + if( lvol-1+innout.gt.Mvol ) then + write(6,'("dfp200 : fatal : myid=",i3," ; lvol-1+innout.gt.Mvol ; psifactor needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : lvol-1+innout.gt.Mvol : psifactor needs attention ;" + endif + idoc = 0 @@ -1904,7 +2514,13 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt endif ! end of if( NOTstellsym) ; #ifdef DEBUG - FATAL( dfp200, idoc.ne.LGdof, counting error ) + + if( idoc.ne.LGdof ) then + write(6,'("dfp200 : fatal : myid=",i3," ; idoc.ne.LGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : idoc.ne.LGdof : counting error ;" + endif + #endif enddo ! end of do iocons; @@ -1912,7 +2528,7 @@ subroutine hessian_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, lengt end subroutine hessian_dFFdRZ subroutine hessian3D_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, length, dRR, dZZ, dII, dLL, dPP, Ntz) - + use mod_kinds, only: wp => dp ! Evaluate the derivative of the square of the magnetic field modulus. Add spectral constraint derivatives if ! required. @@ -1965,21 +2581,29 @@ subroutine hessian3D_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, len dRodR, dRodZ, dZodR, dZodZ, dBdX, & xoffset - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !------ -INTEGER :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz -REAL :: lss, DDl, MMl -REAL :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) -REAL :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) -REAL :: ddFcol1(1:Ntz), ddFcol2(1:Ntz),ddFcol3(1:Ntz), ddFcol4(1:Ntz) +integer :: iocons, lvol, ideriv, id, iflag, Lcurvature, innout, issym, irz, ii, ifail, idoc, idof, Ntz +real(wp) :: lss, DDl, MMl +real(wp) :: dAt(1:Ntz,-1:2), dAz(1:Ntz,-1:2), XX(1:Ntz), YY(1:Ntz), dBB(1:Ntz,-1:2), dII(1:Ntz), dLL(1:Ntz) +real(wp) :: dPP(1:Ntz), length(1:Ntz), dRR(1:Ntz,-1:2), dZZ(1:Ntz,-1:2), constraint(1:Ntz) +real(wp) :: ddFcol1(1:Ntz), ddFcol2(1:Ntz),ddFcol3(1:Ntz), ddFcol4(1:Ntz) #ifdef DEBUG -INTEGER :: isymdiff, ll, NN, ndofgl, pvol -REAL :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) -REAL, allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; -REAL, allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) -CHARACTER :: packorunpack +integer :: isymdiff, ll, NN, ndofgl, pvol +real(wp) :: Fvec(1:Mvol-1), Xdof(1:Mvol-1) +real(wp), allocatable :: oRbc(:,:), oZbs(:,:), oRbs(:,:), oZbc(:,:) ! original geometry; +real(wp), allocatable :: idBB(:,:), iforce(:,:), iposition(:,:) +character :: packorunpack #endif @@ -1994,7 +2618,12 @@ subroutine hessian3D_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, len ! ----------------------------------- ideriv = 0; iflag=0 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl,iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl,iflag) + cpuo = MPI_WTIME() + ! hessian_dFFdRZ CONSTRUCTION @@ -2004,10 +2633,21 @@ subroutine hessian3D_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, len ! --------------------- ideriv = -1; iflag=1 - WCALL( dfp200, lforce, (lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) ) + + cput = MPI_WTIME() + Tdfp200 = Tdfp200 + ( cput-cpuo ) + call lforce(lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag) + cpuo = MPI_WTIME() + ! Add derivatives of pressure as well - FATAL( dfp200, vvolume(lvol).lt.small, shall divide by vvolume(lvol)**(gamma+one) ) + + if( vvolume(lvol).lt.small ) then + write(6,'("dfp200 : fatal : myid=",i3," ; vvolume(lvol).lt.small ; shall divide by vvolume(lvol)**(gamma+one) ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : vvolume(lvol).lt.small : shall divide by vvolume(lvol)**(gamma+one) ;" + endif + ! Derivatives of force wrt geometry; In real space. ijreal(1:Ntz) = - adiabatic(lvol) * pscale * gamma * dvolume / vvolume(lvol)**(gamma+one) + dBB(1:Ntz,-1) @@ -2129,7 +2769,13 @@ subroutine hessian3D_dFFdRZ(lvol, idof, innout, issym, irz, ii, dBB, XX, YY, len dII(1:Ntz) = zero ! no angle/spectral width constraint #ifdef DEBUG - FATAL( dfp200, idoc.ne.LGdof, counting error ) + + if( idoc.ne.LGdof ) then + write(6,'("dfp200 : fatal : myid=",i3," ; idoc.ne.LGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "dfp200 : idoc.ne.LGdof : counting error ;" + endif + #endif enddo ! end of do iocons; diff --git a/src/global.f90 b/src/global.F90 similarity index 53% rename from src/global.f90 rename to src/global.F90 index e61be2a7..1dcc91ad 100644 --- a/src/global.f90 +++ b/src/global.F90 @@ -34,39 +34,39 @@ !> \ingroup grp_global !> \brief some constants used throughout the code module constants - + use mod_kinds, only: wp => dp implicit none - REAL, parameter :: zero = 0.0 !< 0 - REAL, parameter :: one = 1.0 !< 1 - REAL, parameter :: two = 2.0 !< 2 - REAL, parameter :: three = 3.0 !< 3 - REAL, parameter :: four = 4.0 !< 4 - REAL, parameter :: five = 5.0 !< 5 - REAL, parameter :: six = 6.0 !< 6 - REAL, parameter :: seven = 7.0 !< 7 - REAL, parameter :: eight = 8.0 !< 8 - REAL, parameter :: nine = 9.0 !< 9 - REAL, parameter :: ten = 10.0 !< 10 - - REAL, parameter :: eleven = 11.0 !< 11 - REAL, parameter :: twelve = 12.0 !< 12 - - REAL, parameter :: hundred = 100.0 !< 100 - REAL, parameter :: thousand = 1000.0 !< 1000 - - REAL, parameter :: half = one / two !< 1/2 - REAL, parameter :: third = one / three !< 1/3 - REAL, parameter :: quart = one / four !< 1/4 - REAL, parameter :: fifth = one / five !< 1/5 - REAL, parameter :: sixth = one / six !< 1/6 - - REAL, parameter :: pi2 = 6.28318530717958623 !< \f$2\pi\f$ - REAL, parameter :: pi = pi2 / two !< \f$\pi\f$ - REAL, parameter :: mu0 = 2.0E-07 * pi2 !< \f$4\pi\cdot10^{-7}\f$ - REAL, parameter :: goldenmean = 1.618033988749895 !< golden mean = \f$( 1 + \sqrt 5 ) / 2\f$ ; - - REAL, parameter :: version = 3.20 !< version of SPEC + real(wp), parameter :: zero = 0.0 !< 0 + real(wp), parameter :: one = 1.0 !< 1 + real(wp), parameter :: two = 2.0 !< 2 + real(wp), parameter :: three = 3.0 !< 3 + real(wp), parameter :: four = 4.0 !< 4 + real(wp), parameter :: five = 5.0 !< 5 + real(wp), parameter :: six = 6.0 !< 6 + real(wp), parameter :: seven = 7.0 !< 7 + real(wp), parameter :: eight = 8.0 !< 8 + real(wp), parameter :: nine = 9.0 !< 9 + real(wp), parameter :: ten = 10.0 !< 10 + + real(wp), parameter :: eleven = 11.0 !< 11 + real(wp), parameter :: twelve = 12.0 !< 12 + + real(wp), parameter :: hundred = 100.0 !< 100 + real(wp), parameter :: thousand = 1000.0 !< 1000 + + real(wp), parameter :: half = one / two !< 1/2 + real(wp), parameter :: third = one / three !< 1/3 + real(wp), parameter :: quart = one / four !< 1/4 + real(wp), parameter :: fifth = one / five !< 1/5 + real(wp), parameter :: sixth = one / six !< 1/6 + + real(wp), parameter :: pi2 = 6.28318530717958623 !< \f$2\pi\f$ + real(wp), parameter :: pi = pi2 / two !< \f$\pi\f$ + real(wp), parameter :: mu0 = 2.0E-07 * pi2 !< \f$4\pi\cdot10^{-7}\f$ + real(wp), parameter :: goldenmean = 1.618033988749895 !< golden mean = \f$( 1 + \sqrt 5 ) / 2\f$ ; + + real(wp), parameter :: version = 3.20 !< version of SPEC end module constants @@ -75,14 +75,14 @@ end module constants !> \brief platform-dependant numerical resolution !> \ingroup grp_global module numerical - + use mod_kinds, only: wp => dp implicit none - REAL, parameter :: machprec = 1.11e-16 !< machine precision: 0.5*epsilon(one) for 64 bit double precision - REAL, parameter :: vsmall = 100*machprec !< very small number - REAL, parameter :: small = 10000*machprec !< small number - REAL, parameter :: sqrtmachprec = sqrt(machprec) !< square root of machine precision - REAL, parameter :: logtolerance = 1.0e-32 !< this is used to avoid taking alog10(zero); see e.g. dforce; + real(wp), parameter :: machprec = 1.11e-16 !< machine precision: 0.5*epsilon(one) for 64 bit double precision + real(wp), parameter :: vsmall = 100*machprec !< very small number + real(wp), parameter :: small = 10000*machprec !< small number + real(wp), parameter :: sqrtmachprec = sqrt(machprec) !< square root of machine precision + real(wp), parameter :: logtolerance = 1.0e-32 !< this is used to avoid taking alog10(zero); see e.g. dforce; end module numerical @@ -91,27 +91,27 @@ end module numerical !> \brief central definition of file units to avoid conflicts !> \ingroup grp_global module fileunits - + use mod_kinds, only: wp => dp implicit none - INTEGER :: iunit = 10 !< input; used in global/readin:ext.sp, global/wrtend:ext.sp.end - INTEGER :: ounit = 6 !< screen output; - INTEGER :: gunit = 13 !< wall geometry; used in wa00aa + integer :: iunit = 10 !< input; used in global/readin:ext.sp, global/wrtend:ext.sp.end + integer :: ounit = 6 !< screen output; + integer :: gunit = 13 !< wall geometry; used in wa00aa - INTEGER :: aunit = 11 !< vector potential; used in ra00aa:.ext.AtAzmn; - INTEGER :: dunit = 12 !< derivative matrix; used in newton:.ext.GF; - INTEGER :: hunit = 14 !< eigenvalues of Hessian; under re-construction; - INTEGER :: munit = 14 !< matrix elements of Hessian; - INTEGER :: lunit = 20 !< local unit; used in lunit+myid: pp00aa:.ext.poincare,.ext.transform; - INTEGER :: vunit = 15 !< for examination of adaptive quadrature; used in casing:.ext.vcint; + integer :: aunit = 11 !< vector potential; used in ra00aa:.ext.AtAzmn; + integer :: dunit = 12 !< derivative matrix; used in newton:.ext.GF; + integer :: hunit = 14 !< eigenvalues of Hessian; under re-construction; + integer :: munit = 14 !< matrix elements of Hessian; + integer :: lunit = 20 !< local unit; used in lunit+myid: pp00aa:.ext.poincare,.ext.transform; + integer :: vunit = 15 !< for examination of adaptive quadrature; used in casing:.ext.vcint; contains subroutine mute(action) implicit none - INTEGER,intent(in) :: action - INTEGER, parameter :: iopen = 1, iclose = 0, null = 37 - INTEGER :: ios + integer,intent(in) :: action + integer, parameter :: iopen = 1, iclose = 0, null = 37 + integer :: ios character(len=*), parameter :: nullfile="/dev/null" ! open a tmp file for screen output @@ -131,61 +131,61 @@ end module fileunits !> \brief timing variables !> \ingroup grp_global module cputiming - - REAL :: Tmanual = 0.0, manualT = 0.0 - REAL :: Trzaxis = 0.0, rzaxisT = 0.0 - REAL :: Tpackxi = 0.0, packxiT = 0.0 - REAL :: Tvolume = 0.0, volumeT = 0.0 - REAL :: Tcoords = 0.0, coordsT = 0.0 - REAL :: Tbasefn = 0.0, basefnT = 0.0 - REAL :: Tmemory = 0.0, memoryT = 0.0 - REAL :: Tmetrix = 0.0, metrixT = 0.0 - REAL :: Tma00aa = 0.0, ma00aaT = 0.0 - REAL :: Tmatrix = 0.0, matrixT = 0.0 - REAL :: Tspsmat = 0.0, spsmatT = 0.0 - REAL :: Tspsint = 0.0, spsintT = 0.0 - REAL :: Tmp00ac = 0.0, mp00acT = 0.0 - REAL :: Tma02aa = 0.0, ma02aaT = 0.0 - REAL :: Tpackab = 0.0, packabT = 0.0 - REAL :: Ttr00ab = 0.0, tr00abT = 0.0 - REAL :: Tcurent = 0.0, curentT = 0.0 - REAL :: Tdf00ab = 0.0, df00abT = 0.0 - REAL :: Tlforce = 0.0, lforceT = 0.0 - REAL :: Tintghs = 0.0, intghsT = 0.0 - REAL :: Tmtrxhs = 0.0, mtrxhsT = 0.0 - REAL :: Tlbpol = 0.0, lbpolT = 0.0 - REAL :: Tbrcast = 0.0, brcastT = 0.0 - REAL :: Tdfp100 = 0.0, dfp100T = 0.0 - REAL :: Tdfp200 = 0.0, dfp200T = 0.0 - REAL :: Tdforce = 0.0, dforceT = 0.0 - REAL :: Tnewton = 0.0, newtonT = 0.0 - REAL :: Tcasing = 0.0, casingT = 0.0 - REAL :: Tbnorml = 0.0, bnormlT = 0.0 - REAL :: Tjo00aa = 0.0, jo00aaT = 0.0 - REAL :: Tpp00aa = 0.0, pp00aaT = 0.0 - REAL :: Tpp00ab = 0.0, pp00abT = 0.0 - REAL :: Tbfield = 0.0, bfieldT = 0.0 - REAL :: Tstzxyz = 0.0, stzxyzT = 0.0 - REAL :: Thesian = 0.0, hesianT = 0.0 - REAL :: Tra00aa = 0.0, ra00aaT = 0.0 - REAL :: Tnumrec = 0.0, numrecT = 0.0 - REAL :: Tdcuhre = 0.0, dcuhreT = 0.0 - REAL :: Tminpack = 0.0, minpackT = 0.0 - REAL :: Tiqpack = 0.0, iqpackT = 0.0 - REAL :: Trksuite = 0.0, rksuiteT = 0.0 - REAL :: Ti1mach = 0.0, i1machT = 0.0 - REAL :: Td1mach = 0.0, d1machT = 0.0 - REAL :: Tilut = 0.0, ilutT = 0.0 - REAL :: Titers = 0.0, itersT = 0.0 - REAL :: Tsphdf5 = 0.0, sphdf5T = 0.0 - REAL :: Tpreset = 0.0, presetT = 0.0 - REAL :: Tglobal = 0.0, globalT = 0.0 - REAL :: Txspech = 0.0, xspechT = 0.0 - REAL :: Tinputlist = 0.0, inputlistT = 0.0 - - REAL :: Treadin = 0.0 + use mod_kinds, only: wp => dp + real(wp) :: Tmanual = 0.0, manualT = 0.0 + real(wp) :: Trzaxis = 0.0, rzaxisT = 0.0 + real(wp) :: Tpackxi = 0.0, packxiT = 0.0 + real(wp) :: Tvolume = 0.0, volumeT = 0.0 + real(wp) :: Tcoords = 0.0, coordsT = 0.0 + real(wp) :: Tbasefn = 0.0, basefnT = 0.0 + real(wp) :: Tmemory = 0.0, memoryT = 0.0 + real(wp) :: Tmetrix = 0.0, metrixT = 0.0 + real(wp) :: Tma00aa = 0.0, ma00aaT = 0.0 + real(wp) :: Tmatrix = 0.0, matrixT = 0.0 + real(wp) :: Tspsmat = 0.0, spsmatT = 0.0 + real(wp) :: Tspsint = 0.0, spsintT = 0.0 + real(wp) :: Tmp00ac = 0.0, mp00acT = 0.0 + real(wp) :: Tma02aa = 0.0, ma02aaT = 0.0 + real(wp) :: Tpackab = 0.0, packabT = 0.0 + real(wp) :: Ttr00ab = 0.0, tr00abT = 0.0 + real(wp) :: Tcurent = 0.0, curentT = 0.0 + real(wp) :: Tdf00ab = 0.0, df00abT = 0.0 + real(wp) :: Tlforce = 0.0, lforceT = 0.0 + real(wp) :: Tintghs = 0.0, intghsT = 0.0 + real(wp) :: Tmtrxhs = 0.0, mtrxhsT = 0.0 + real(wp) :: Tlbpol = 0.0, lbpolT = 0.0 + real(wp) :: Tbrcast = 0.0, brcastT = 0.0 + real(wp) :: Tdfp100 = 0.0, dfp100T = 0.0 + real(wp) :: Tdfp200 = 0.0, dfp200T = 0.0 + real(wp) :: Tdforce = 0.0, dforceT = 0.0 + real(wp) :: Tnewton = 0.0, newtonT = 0.0 + real(wp) :: Tcasing = 0.0, casingT = 0.0 + real(wp) :: Tbnorml = 0.0, bnormlT = 0.0 + real(wp) :: Tjo00aa = 0.0, jo00aaT = 0.0 + real(wp) :: Tpp00aa = 0.0, pp00aaT = 0.0 + real(wp) :: Tpp00ab = 0.0, pp00abT = 0.0 + real(wp) :: Tbfield = 0.0, bfieldT = 0.0 + real(wp) :: Tstzxyz = 0.0, stzxyzT = 0.0 + real(wp) :: Thesian = 0.0, hesianT = 0.0 + real(wp) :: Tra00aa = 0.0, ra00aaT = 0.0 + real(wp) :: Tnumrec = 0.0, numrecT = 0.0 + real(wp) :: Tdcuhre = 0.0, dcuhreT = 0.0 + real(wp) :: Tminpack = 0.0, minpackT = 0.0 + real(wp) :: Tiqpack = 0.0, iqpackT = 0.0 + real(wp) :: Trksuite = 0.0, rksuiteT = 0.0 + real(wp) :: Ti1mach = 0.0, i1machT = 0.0 + real(wp) :: Td1mach = 0.0, d1machT = 0.0 + real(wp) :: Tilut = 0.0, ilutT = 0.0 + real(wp) :: Titers = 0.0, itersT = 0.0 + real(wp) :: Tsphdf5 = 0.0, sphdf5T = 0.0 + real(wp) :: Tpreset = 0.0, presetT = 0.0 + real(wp) :: Tglobal = 0.0, globalT = 0.0 + real(wp) :: Txspech = 0.0, xspechT = 0.0 + real(wp) :: Tinputlist = 0.0, inputlistT = 0.0 + + real(wp) :: Treadin = 0.0 ! REAL :: Twritin = 0.0 ! redundant; - REAL :: Twrtend = 0.0 + real(wp) :: Twrtend = 0.0 end module cputiming @@ -194,26 +194,26 @@ end module cputiming !> \brief type definitions for custom datatypes !> \ingroup grp_global module typedefns - + use mod_kinds, only: wp => dp !> \brief used for quantities which have different resolutions in different volumes, e.g. the vector potential type subgrid - REAL, allocatable :: s(:) !< coefficients - INTEGER, allocatable :: i(:) !< indices + real(wp), allocatable :: s(:) !< coefficients + integer, allocatable :: i(:) !< indices end type subgrid type MatrixLU - REAL, allocatable :: mat(:,:) - INTEGER, allocatable :: ipivot(:) + real(wp), allocatable :: mat(:,:) + integer, allocatable :: ipivot(:) end type MatrixLU !> \brief \f${\rm d}\mathbf{B}/{\rm d}\mathbf{X}\f$ (?) type derivative LOGICAL :: L !< what is this? - INTEGER :: vol !< Used in coords(); required for global constraint force gradient evaluation - INTEGER :: innout !< what is this? - INTEGER :: ii !< what is this? - INTEGER :: irz !< what is this? - INTEGER :: issym !< what is this? + integer :: vol !< Used in coords(); required for global constraint force gradient evaluation + integer :: innout !< what is this? + integer :: ii !< what is this? + integer :: irz !< what is this? + integer :: issym !< what is this? end type derivative end module typedefns @@ -222,7 +222,7 @@ end module typedefns !> \brief global variable storage used as "workspace" throughout the code module allglobal - + use mod_kinds, only: wp => dp use constants use typedefns @@ -230,44 +230,44 @@ module allglobal !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!``-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER :: myid !< MPI rank of current CPU - INTEGER :: ncpu !< number of MPI tasks - INTEGER :: IsMyVolumeValue !< flag to indicate if a CPU is operating on its assigned volume - REAL :: cpus !< initial time - INTEGER :: MPI_COMM_SPEC !< SPEC MPI communicator + integer :: myid !< MPI rank of current CPU + integer :: ncpu !< number of MPI tasks + integer :: IsMyVolumeValue !< flag to indicate if a CPU is operating on its assigned volume + real(wp) :: cpus !< initial time + integer :: MPI_COMM_SPEC !< SPEC MPI communicator LOGICAL :: skip_write = .false. ! flag to disable any HDF5-related calls - REAL :: pi2nfp ! pi2/nfp ; assigned in readin; - REAL :: pi2pi2nfp - REAL :: pi2pi2nfphalf - REAL :: pi2pi2nfpquart + real(wp) :: pi2nfp ! pi2/nfp ; assigned in readin; + real(wp) :: pi2pi2nfp + real(wp) :: pi2pi2nfphalf + real(wp) :: pi2pi2nfpquart !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - CHARACTER(LEN=1000) :: ext ! extension of input filename, i.e., "G3V01L1Fi.001" for an input file G3V01L1Fi.001.sp + character(len=255) :: ext ! extension of input filename, i.e., "G3V01L1Fi.001" for an input file G3V01L1Fi.001.sp - REAL :: ForceErr !< total force-imbalance - REAL :: Energy !< MHD energy + real(wp) :: ForceErr !< total force-imbalance + real(wp) :: Energy !< MHD energy - REAL , allocatable :: IPDt(:), IPDtDpf(:,:) !< Toroidal pressure-driven current + real(wp) , allocatable :: IPDt(:), IPDtDpf(:,:) !< Toroidal pressure-driven current - INTEGER :: Mvol + integer :: Mvol LOGICAL :: YESstellsym !< internal shorthand copies of Istellsym, which is an integer input; LOGICAL :: NOTstellsym !< internal shorthand copies of Istellsym, which is an integer input; LOGICAL :: YESMatrixFree, NOTMatrixFree !< to use matrix-free method or not - REAL , allocatable :: cheby(:,:) !< local workspace for evaluation of Chebychev polynomials - REAL , allocatable :: zernike(:,:,:) !< local workspace for evaluation of Zernike polynomials + real(wp) , allocatable :: cheby(:,:) !< local workspace for evaluation of Chebychev polynomials + real(wp) , allocatable :: zernike(:,:,:) !< local workspace for evaluation of Zernike polynomials - REAL , allocatable :: TT(:,:,:) !< derivatives of Chebyshev polynomials at the inner and outer interfaces; - REAL , allocatable :: RTT(:,:,:,:) !< derivatives of Zernike polynomials at the inner and outer interfaces; + real(wp) , allocatable :: TT(:,:,:) !< derivatives of Chebyshev polynomials at the inner and outer interfaces; + real(wp) , allocatable :: RTT(:,:,:,:) !< derivatives of Zernike polynomials at the inner and outer interfaces; - REAL , allocatable :: RTM(:,:) !< \f$r^m\f$ term of Zernike polynomials at the origin - REAL , allocatable :: ZernikeDof(:) !< Zernike degree of freedom for each \f$m\f$ + real(wp) , allocatable :: RTM(:,:) !< \f$r^m\f$ term of Zernike polynomials at the origin + real(wp) , allocatable :: ZernikeDof(:) !< Zernike degree of freedom for each \f$m\f$ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -275,9 +275,9 @@ module allglobal !> Enhanced resolution is required for the metric elements, \f$g_{ij}/\sqrt g\f$, which is given by mne, ime, and ine. !> The Fourier resolution here is determined by \c lMpol=2*Mpol and \c lNtor=2*Ntor. !> @{ - INTEGER :: mne !< enhanced resolution for metric elements - INTEGER, allocatable :: ime(:) !< enhanced poloidal mode numbers for metric elements - INTEGER, allocatable :: ine(:) !< enhanced toroidal mode numbers for metric elements + integer :: mne !< enhanced resolution for metric elements + integer, allocatable :: ime(:) !< enhanced poloidal mode numbers for metric elements + integer, allocatable :: ine(:) !< enhanced toroidal mode numbers for metric elements !> @} !> \addtogroup grp_enh_res_sfl Enhanced resolution for transformation to straight-field line angle @@ -285,19 +285,19 @@ module allglobal !> which is given by mns, ims and ins. !> The Fourier resolution here is determined by \c iMpol and \c iNtor. !> @{ - INTEGER :: mns !< enhanced resolution for straight field line transformation - INTEGER, allocatable :: ims(:) !< enhanced poloidal mode numbers for straight field line transformation - INTEGER, allocatable :: ins(:) !< enhanced toroidal mode numbers for straight field line transformation + integer :: mns !< enhanced resolution for straight field line transformation + integer, allocatable :: ims(:) !< enhanced poloidal mode numbers for straight field line transformation + integer, allocatable :: ins(:) !< enhanced toroidal mode numbers for straight field line transformation !> @} - INTEGER :: lMpol !< what is this? - INTEGER :: lNtor !< what is this? - INTEGER :: sMpol !< what is this? - INTEGER :: sNtor !< what is this? + integer :: lMpol !< what is this? + integer :: lNtor !< what is this? + integer :: sMpol !< what is this? + integer :: sNtor !< what is this? !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - REAL :: xoffset = 1.0 !< used to normalize NAG routines (which ones exacly where?) + real(wp) :: xoffset = 1.0 !< used to normalize NAG routines (which ones exacly where?) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -305,7 +305,7 @@ module allglobal LOGICAL :: IconstraintOK !< Used to break iteration loops of slaves in the global constraint minimization. - REAL , allocatable :: beltramierror(:,:) !< to store the integral of |curlB-mu*B| computed by jo00aa; + real(wp) , allocatable :: beltramierror(:,:) !< to store the integral of |curlB-mu*B| computed by jo00aa; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -314,20 +314,20 @@ module allglobal !> !> \addtogroup grp_fourier_repr Fourier representation !> @{ - INTEGER :: mn !< total number of Fourier harmonics for coordinates/fields; calculated from Mpol, Ntor in readin() - INTEGER, allocatable :: im(:) !< poloidal mode numbers for Fourier representation - INTEGER, allocatable :: in(:) !< toroidal mode numbers for Fourier representation + integer :: mn !< total number of Fourier harmonics for coordinates/fields; calculated from Mpol, Ntor in readin() + integer, allocatable :: im(:) !< poloidal mode numbers for Fourier representation + integer, allocatable :: in(:) !< toroidal mode numbers for Fourier representation - REAL, allocatable :: halfmm(:) !< I saw this already somewhere... - REAL, allocatable :: regumm(:) !< I saw this already somewhere... + real(wp), allocatable :: halfmm(:) !< I saw this already somewhere... + real(wp), allocatable :: regumm(:) !< I saw this already somewhere... - REAL :: Rscale !< no idea - REAL, allocatable :: psifactor(:,:) !< no idea - REAL, allocatable :: inifactor(:,:) !< no idea + real(wp) :: Rscale !< no idea + real(wp), allocatable :: psifactor(:,:) !< no idea + real(wp), allocatable :: inifactor(:,:) !< no idea - REAL, allocatable :: BBweight(:) !< weight on force-imbalance harmonics; used in dforce() + real(wp), allocatable :: BBweight(:) !< weight on force-imbalance harmonics; used in dforce() - REAL, allocatable :: mmpp(:) !< spectral condensation factors + real(wp), allocatable :: mmpp(:) !< spectral condensation factors !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> @} @@ -336,37 +336,37 @@ module allglobal !> The Fourier harmonics of the interfaces are contained in \c iRbc(1:mn,0:Mvol) and \c iZbs(1:mn,0:Mvol), where !> \c iRbc(l,j), \c iZbs(l,j) contains the Fourier harmonics, \f$R_j\f$, \f$Z_j\f$, of the \f$l\f$-th interface. !> @{ - REAL, allocatable :: iRbc(:,:) !< cosine R harmonics of interface surface geometry; stellarator symmetric - REAL, allocatable :: iZbs(:,:) !< sine Z harmonics of interface surface geometry; stellarator symmetric - REAL, allocatable :: iRbs(:,:) !< sine R harmonics of interface surface geometry; non-stellarator symmetric - REAL, allocatable :: iZbc(:,:) !< cosine Z harmonics of interface surface geometry; non-stellarator symmetric - - REAL, allocatable :: dRbc(:,:) !< cosine R harmonics of interface surface geometry; stellarator symmetric; linear deformation - REAL, allocatable :: dZbs(:,:) !< sine Z harmonics of interface surface geometry; stellarator symmetric; linear deformation - REAL, allocatable :: dRbs(:,:) !< sine R harmonics of interface surface geometry; non-stellarator symmetric; linear deformation - REAL, allocatable :: dZbc(:,:) !< cosine Z harmonics of interface surface geometry; non-stellarator symmetric; linear deformation - - REAL, allocatable :: iRij(:,:) !< interface surface geometry; real space - REAL, allocatable :: iZij(:,:) !< interface surface geometry; real space - REAL, allocatable :: dRij(:,:) !< interface surface geometry; real space - REAL, allocatable :: dZij(:,:) !< interface surface geometry; real space - REAL, allocatable :: tRij(:,:) !< interface surface geometry; real space - REAL, allocatable :: tZij(:,:) !< interface surface geometry; real space - - REAL, allocatable :: iVns(:) !< sine harmonics of vacuum normal magnetic field on interfaces; stellarator symmetric - REAL, allocatable :: iBns(:) !< sine harmonics of plasma normal magnetic field on interfaces; stellarator symmetric - REAL, allocatable :: iVnc(:) !< cosine harmonics of vacuum normal magnetic field on interfaces; non-stellarator symmetric - REAL, allocatable :: iBnc(:) !< cosine harmonics of plasma normal magnetic field on interfaces; non-stellarator symmetric - - REAL, allocatable :: lRbc(:) !< local workspace - REAL, allocatable :: lZbs(:) !< local workspace - REAL, allocatable :: lRbs(:) !< local workspace - REAL, allocatable :: lZbc(:) !< local workspace + real(wp), allocatable :: iRbc(:,:) !< cosine R harmonics of interface surface geometry; stellarator symmetric + real(wp), allocatable :: iZbs(:,:) !< sine Z harmonics of interface surface geometry; stellarator symmetric + real(wp), allocatable :: iRbs(:,:) !< sine R harmonics of interface surface geometry; non-stellarator symmetric + real(wp), allocatable :: iZbc(:,:) !< cosine Z harmonics of interface surface geometry; non-stellarator symmetric + + real(wp), allocatable :: dRbc(:,:) !< cosine R harmonics of interface surface geometry; stellarator symmetric; linear deformation + real(wp), allocatable :: dZbs(:,:) !< sine Z harmonics of interface surface geometry; stellarator symmetric; linear deformation + real(wp), allocatable :: dRbs(:,:) !< sine R harmonics of interface surface geometry; non-stellarator symmetric; linear deformation + real(wp), allocatable :: dZbc(:,:) !< cosine Z harmonics of interface surface geometry; non-stellarator symmetric; linear deformation + + real(wp), allocatable :: iRij(:,:) !< interface surface geometry; real space + real(wp), allocatable :: iZij(:,:) !< interface surface geometry; real space + real(wp), allocatable :: dRij(:,:) !< interface surface geometry; real space + real(wp), allocatable :: dZij(:,:) !< interface surface geometry; real space + real(wp), allocatable :: tRij(:,:) !< interface surface geometry; real space + real(wp), allocatable :: tZij(:,:) !< interface surface geometry; real space + + real(wp), allocatable :: iVns(:) !< sine harmonics of vacuum normal magnetic field on interfaces; stellarator symmetric + real(wp), allocatable :: iBns(:) !< sine harmonics of plasma normal magnetic field on interfaces; stellarator symmetric + real(wp), allocatable :: iVnc(:) !< cosine harmonics of vacuum normal magnetic field on interfaces; non-stellarator symmetric + real(wp), allocatable :: iBnc(:) !< cosine harmonics of plasma normal magnetic field on interfaces; non-stellarator symmetric + + real(wp), allocatable :: lRbc(:) !< local workspace + real(wp), allocatable :: lZbs(:) !< local workspace + real(wp), allocatable :: lRbs(:) !< local workspace + real(wp), allocatable :: lZbc(:) !< local workspace ! local array used for reading interface Fourier harmonics from file; - INTEGER :: num_modes - INTEGER, allocatable :: mmRZRZ(:), nnRZRZ(:) - REAL, allocatable :: allRZRZ(:,:,:) + integer :: num_modes + integer, allocatable :: mmRZRZ(:), nnRZRZ(:) + real(wp), allocatable :: allRZRZ(:,:,:) !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -380,69 +380,69 @@ module allglobal !> \c sg(0:3,Ntz), which contains the Jacobian and its derivatives; !> and \c guv(0:6,0:3,1:Ntz), which contains the metric elements and their derivatives. !> @{ - INTEGER :: Nt !< discrete resolution along \f$\theta\f$ of grid in real space - INTEGER :: Nz !< discrete resolution along \f$\zeta\f$ of grid in real space - INTEGER :: Ntz !< discrete resolution; Ntz=Nt*Nz shorthand - INTEGER :: hNt !< discrete resolution; Ntz=Nt*Nz shorthand - INTEGER :: hNz !< discrete resolution; Ntz=Nt*Nz shorthand - REAL :: soNtz !< one / sqrt (one*Ntz); shorthand - - REAL , allocatable :: Rij(:,:,:) !< real-space grid; R - REAL , allocatable :: Zij(:,:,:) !< real-space grid; Z - REAL , allocatable :: Xij(:,:,:) !< what is this? - REAL , allocatable :: Yij(:,:,:) !< what is this? - REAL , allocatable :: sg(:,:) !< real-space grid; jacobian and its derivatives - REAL , allocatable :: guvij(:,:,:,:) !< real-space grid; metric elements - REAL , allocatable :: gvuij(:,:,:) !< real-space grid; metric elements (?); 10 Dec 15; - REAL , allocatable :: guvijsave(:,:,:,:) !< what is this? - - INTEGER, allocatable :: ki(:,:) !< identification of Fourier modes - INTEGER, allocatable :: kijs(:,:,:) !< identification of Fourier modes - INTEGER, allocatable :: kija(:,:,:) !< identification of Fourier modes - - INTEGER, allocatable :: iotakkii(:) !< identification of Fourier modes - INTEGER, allocatable :: iotaksub(:,:) !< identification of Fourier modes - INTEGER, allocatable :: iotakadd(:,:) !< identification of Fourier modes - INTEGER, allocatable :: iotaksgn(:,:) !< identification of Fourier modes - - REAL , allocatable :: efmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: ofmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: cfmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: sfmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: evmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: odmn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: comn(:) !< Fourier harmonics; dummy workspace - REAL , allocatable :: simn(:) !< Fourier harmonics; dummy workspace - - REAL , allocatable :: ijreal(:) !< what is this ? - REAL , allocatable :: ijimag(:) !< what is this ? - REAL , allocatable :: jireal(:) !< what is this ? - REAL , allocatable :: jiimag(:) !< what is this ? - - REAL , allocatable :: jkreal(:) !< what is this ? - REAL , allocatable :: jkimag(:) !< what is this ? - REAL , allocatable :: kjreal(:) !< what is this ? - REAL , allocatable :: kjimag(:) !< what is this ? - - REAL , allocatable :: Bsupumn(:,:,:) !< tangential field on interfaces; \f$\theta\f$-component; required for virtual casing construction of field; 11 Oct 12 - REAL , allocatable :: Bsupvmn(:,:,:) !< tangential field on interfaces; \f$\zeta\f$ -component; required for virtual casing construction of field; 11 Oct 12 - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - REAL , allocatable :: goomne(:,:) !< described in preset() - REAL , allocatable :: goomno(:,:) !< described in preset() - REAL , allocatable :: gssmne(:,:) !< described in preset() - REAL , allocatable :: gssmno(:,:) !< described in preset() - REAL , allocatable :: gstmne(:,:) !< described in preset() - REAL , allocatable :: gstmno(:,:) !< described in preset() - REAL , allocatable :: gszmne(:,:) !< described in preset() - REAL , allocatable :: gszmno(:,:) !< described in preset() - REAL , allocatable :: gttmne(:,:) !< described in preset() - REAL , allocatable :: gttmno(:,:) !< described in preset() - REAL , allocatable :: gtzmne(:,:) !< described in preset() - REAL , allocatable :: gtzmno(:,:) !< described in preset() - REAL , allocatable :: gzzmne(:,:) !< described in preset() - REAL , allocatable :: gzzmno(:,:) !< described in preset() + integer :: Nt !< discrete resolution along \f$\theta\f$ of grid in real space + integer :: Nz !< discrete resolution along \f$\zeta\f$ of grid in real space + integer :: Ntz !< discrete resolution; Ntz=Nt*Nz shorthand + integer :: hNt !< discrete resolution; Ntz=Nt*Nz shorthand + integer :: hNz !< discrete resolution; Ntz=Nt*Nz shorthand + real(wp) :: soNtz !< one / sqrt (one*Ntz); shorthand + + real(wp) , allocatable :: Rij(:,:,:) !< real-space grid; R + real(wp) , allocatable :: Zij(:,:,:) !< real-space grid; Z + real(wp) , allocatable :: Xij(:,:,:) !< what is this? + real(wp) , allocatable :: Yij(:,:,:) !< what is this? + real(wp) , allocatable :: sg(:,:) !< real-space grid; jacobian and its derivatives + real(wp) , allocatable :: guvij(:,:,:,:) !< real-space grid; metric elements + real(wp) , allocatable :: gvuij(:,:,:) !< real-space grid; metric elements (?); 10 Dec 15; + real(wp) , allocatable :: guvijsave(:,:,:,:) !< what is this? + + integer, allocatable :: ki(:,:) !< identification of Fourier modes + integer, allocatable :: kijs(:,:,:) !< identification of Fourier modes + integer, allocatable :: kija(:,:,:) !< identification of Fourier modes + + integer, allocatable :: iotakkii(:) !< identification of Fourier modes + integer, allocatable :: iotaksub(:,:) !< identification of Fourier modes + integer, allocatable :: iotakadd(:,:) !< identification of Fourier modes + integer, allocatable :: iotaksgn(:,:) !< identification of Fourier modes + + real(wp) , allocatable :: efmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: ofmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: cfmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: sfmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: evmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: odmn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: comn(:) !< Fourier harmonics; dummy workspace + real(wp) , allocatable :: simn(:) !< Fourier harmonics; dummy workspace + + real(wp) , allocatable :: ijreal(:) !< what is this ? + real(wp) , allocatable :: ijimag(:) !< what is this ? + real(wp) , allocatable :: jireal(:) !< what is this ? + real(wp) , allocatable :: jiimag(:) !< what is this ? + + real(wp) , allocatable :: jkreal(:) !< what is this ? + real(wp) , allocatable :: jkimag(:) !< what is this ? + real(wp) , allocatable :: kjreal(:) !< what is this ? + real(wp) , allocatable :: kjimag(:) !< what is this ? + + real(wp) , allocatable :: Bsupumn(:,:,:) !< tangential field on interfaces; \f$\theta\f$-component; required for virtual casing construction of field; 11 Oct 12 + real(wp) , allocatable :: Bsupvmn(:,:,:) !< tangential field on interfaces; \f$\zeta\f$ -component; required for virtual casing construction of field; 11 Oct 12 + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + + real(wp) , allocatable :: goomne(:,:) !< described in preset() + real(wp) , allocatable :: goomno(:,:) !< described in preset() + real(wp) , allocatable :: gssmne(:,:) !< described in preset() + real(wp) , allocatable :: gssmno(:,:) !< described in preset() + real(wp) , allocatable :: gstmne(:,:) !< described in preset() + real(wp) , allocatable :: gstmno(:,:) !< described in preset() + real(wp) , allocatable :: gszmne(:,:) !< described in preset() + real(wp) , allocatable :: gszmno(:,:) !< described in preset() + real(wp) , allocatable :: gttmne(:,:) !< described in preset() + real(wp) , allocatable :: gttmno(:,:) !< described in preset() + real(wp) , allocatable :: gtzmne(:,:) !< described in preset() + real(wp) , allocatable :: gtzmno(:,:) !< described in preset() + real(wp) , allocatable :: gzzmne(:,:) !< described in preset() + real(wp) , allocatable :: gzzmno(:,:) !< described in preset() !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -450,56 +450,56 @@ module allglobal !> \addtogroup grp_chebychev_metric Volume-integrated Chebyshev-metrics !> These are allocated in dforce(), defined in ma00aa(), and are used in matrix() to construct the matrices. !> @{ - REAL, allocatable :: DToocc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DToocs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DToosc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DTooss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TTsscc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TTsscs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TTsssc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TTssss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDstcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDstcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDstsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDstss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDszcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDszcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDszsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: TDszss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDttcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDttcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDttsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDttss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDtzcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDtzcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDtzsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDtzss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDzzcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDzzcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDzzsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - REAL, allocatable :: DDzzss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - REAL, allocatable :: Tsc(:,:) !< what is this? - REAL, allocatable :: Tss(:,:) !< what is this? - REAL, allocatable :: Dtc(:,:) !< what is this? - REAL, allocatable :: Dts(:,:) !< what is this? - REAL, allocatable :: Dzc(:,:) !< what is this? - REAL, allocatable :: Dzs(:,:) !< what is this? - REAL, allocatable :: Ttc(:,:) !< what is this? - REAL, allocatable :: Tzc(:,:) !< what is this? - REAL, allocatable :: Tts(:,:) !< what is this? - REAL, allocatable :: Tzs(:,:) !< what is this? - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - REAL, allocatable :: dtflux(:) !< \f$\delta \psi_{toroidal}\f$ in each annulus - REAL, allocatable :: dpflux(:) !< \f$\delta \psi_{poloidal}\f$ in each annulus - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - REAL, allocatable :: sweight(:) !< minimum poloidal length constraint weight + real(wp), allocatable :: DToocc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DToocs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DToosc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DTooss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TTsscc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TTsscs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TTsssc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TTssss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDstcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDstcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDstsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDstss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDszcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDszcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDszsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: TDszss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDttcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDttcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDttsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDttss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDtzcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDtzcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDtzsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDtzss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDzzcc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDzzcs(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDzzsc(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + real(wp), allocatable :: DDzzss(:,:,:,:) !< volume-integrated Chebychev-metrics; see matrix() + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + + real(wp), allocatable :: Tsc(:,:) !< what is this? + real(wp), allocatable :: Tss(:,:) !< what is this? + real(wp), allocatable :: Dtc(:,:) !< what is this? + real(wp), allocatable :: Dts(:,:) !< what is this? + real(wp), allocatable :: Dzc(:,:) !< what is this? + real(wp), allocatable :: Dzs(:,:) !< what is this? + real(wp), allocatable :: Ttc(:,:) !< what is this? + real(wp), allocatable :: Tzc(:,:) !< what is this? + real(wp), allocatable :: Tts(:,:) !< what is this? + real(wp), allocatable :: Tzs(:,:) !< what is this? + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + + real(wp), allocatable :: dtflux(:) !< \f$\delta \psi_{toroidal}\f$ in each annulus + real(wp), allocatable :: dpflux(:) !< \f$\delta \psi_{poloidal}\f$ in each annulus + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + + real(wp), allocatable :: sweight(:) !< minimum poloidal length constraint weight !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -524,34 +524,34 @@ module allglobal !> \c dAzo(0,i)%%s(l) \f$\equiv {\color{Cerulean}A_{\zeta ,o,i,l}}\f$ !> !> @{ - INTEGER, allocatable :: NAdof(:) !< degrees of freedom in Beltrami fields in each annulus - INTEGER, allocatable :: Nfielddof(:) !< degrees of freedom in Beltrami fields in each annulus, field only, no Lagrange multipliers + integer, allocatable :: NAdof(:) !< degrees of freedom in Beltrami fields in each annulus + integer, allocatable :: Nfielddof(:) !< degrees of freedom in Beltrami fields in each annulus, field only, no Lagrange multipliers type(subgrid), allocatable :: Ate(:,:,:) !< magnetic vector potential cosine Fourier harmonics; stellarator-symmetric type(subgrid), allocatable :: Aze(:,:,:) !< magnetic vector potential cosine Fourier harmonics; stellarator-symmetric type(subgrid), allocatable :: Ato(:,:,:) !< magnetic vector potential sine Fourier harmonics; non-stellarator-symmetric type(subgrid), allocatable :: Azo(:,:,:) !< magnetic vector potential sine Fourier harmonics; non-stellarator-symmetric - INTEGER , allocatable :: Lma(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmb(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmc(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmd(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lme(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmf(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmg(:,:) !< Lagrange multipliers (?) - INTEGER , allocatable :: Lmh(:,:) !< Lagrange multipliers (?) - - REAL , allocatable :: Lmavalue(:,:) !< what is this? - REAL , allocatable :: Lmbvalue(:,:) !< what is this? - REAL , allocatable :: Lmcvalue(:,:) !< what is this? - REAL , allocatable :: Lmdvalue(:,:) !< what is this? - REAL , allocatable :: Lmevalue(:,:) !< what is this? - REAL , allocatable :: Lmfvalue(:,:) !< what is this? - REAL , allocatable :: Lmgvalue(:,:) !< what is this? - REAL , allocatable :: Lmhvalue(:,:) !< what is this? - - INTEGER , allocatable :: Fso(:,:) !< what is this? - INTEGER , allocatable :: Fse(:,:) !< what is this? + integer , allocatable :: Lma(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmb(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmc(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmd(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lme(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmf(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmg(:,:) !< Lagrange multipliers (?) + integer , allocatable :: Lmh(:,:) !< Lagrange multipliers (?) + + real(wp) , allocatable :: Lmavalue(:,:) !< what is this? + real(wp) , allocatable :: Lmbvalue(:,:) !< what is this? + real(wp) , allocatable :: Lmcvalue(:,:) !< what is this? + real(wp) , allocatable :: Lmdvalue(:,:) !< what is this? + real(wp) , allocatable :: Lmevalue(:,:) !< what is this? + real(wp) , allocatable :: Lmfvalue(:,:) !< what is this? + real(wp) , allocatable :: Lmgvalue(:,:) !< what is this? + real(wp) , allocatable :: Lmhvalue(:,:) !< what is this? + + integer , allocatable :: Fso(:,:) !< what is this? + integer , allocatable :: Fse(:,:) !< what is this? LOGICAL :: Lcoordinatesingularity !< set by \c LREGION macro; true if inside the innermost volume LOGICAL :: Lplasmaregion !< set by \c LREGION macro; true if inside the plasma region @@ -573,45 +573,45 @@ module allglobal !>
  • These are allocated and deallocated in dforce(), assigned in matrix(), and used in mp00ac() and (?) df00aa().
  • !> !> @{ - REAL, allocatable :: dMA(:,:) !< energy and helicity matrices; quadratic forms - REAL, allocatable :: dMB(:,:) !< energy and helicity matrices; quadratic forms + real(wp), allocatable :: dMA(:,:) !< energy and helicity matrices; quadratic forms + real(wp), allocatable :: dMB(:,:) !< energy and helicity matrices; quadratic forms ! REAL, allocatable :: dMC(:,:) !< energy and helicity matrices; quadratic forms - REAL, allocatable :: dMD(:,:) !< energy and helicity matrices; quadratic forms + real(wp), allocatable :: dMD(:,:) !< energy and helicity matrices; quadratic forms ! REAL, allocatable :: dME(:,:) !< energy and helicity matrices; quadratic forms ! REAL, allocatable :: dMF(:,:) !< energy and helicity matrices; quadratic forms - REAL, allocatable :: dMAS(:) !< sparse version of dMA, data - REAL, allocatable :: dMDS(:) !< sparse version of dMD, data - INTEGER,allocatable :: idMAS(:) !< sparse version of dMA and dMD, indices - INTEGER,allocatable :: jdMAS(:) !< sparse version of dMA and dMD, indices - INTEGER,allocatable :: NdMASmax(:) !< number of elements for sparse matrices - INTEGER,allocatable :: NdMAS(:) !< number of elements for sparse matrices + real(wp), allocatable :: dMAS(:) !< sparse version of dMA, data + real(wp), allocatable :: dMDS(:) !< sparse version of dMD, data + integer,allocatable :: idMAS(:) !< sparse version of dMA and dMD, indices + integer,allocatable :: jdMAS(:) !< sparse version of dMA and dMD, indices + integer,allocatable :: NdMASmax(:) !< number of elements for sparse matrices + integer,allocatable :: NdMAS(:) !< number of elements for sparse matrices - REAL, allocatable :: dMG(: ) !< what is this? + real(wp), allocatable :: dMG(: ) !< what is this? - REAL, allocatable :: AdotX(:) !< the matrix-vector product - REAL, allocatable :: DdotX(:) !< the matrix-vector product + real(wp), allocatable :: AdotX(:) !< the matrix-vector product + real(wp), allocatable :: DdotX(:) !< the matrix-vector product - REAL, allocatable :: solution(:,:) !< this is allocated in dforce; used in mp00ac and ma02aa; and is passed to packab + real(wp), allocatable :: solution(:,:) !< this is allocated in dforce; used in mp00ac and ma02aa; and is passed to packab - REAL, allocatable :: GMRESlastsolution(:,:,:) !< used to store the last solution for restarting GMRES + real(wp), allocatable :: GMRESlastsolution(:,:,:) !< used to store the last solution for restarting GMRES - REAL, allocatable :: MBpsi(:) !< matrix vector products + real(wp), allocatable :: MBpsi(:) !< matrix vector products LOGICAL :: LILUprecond !< whether to use ILU preconditioner for GMRES - REAL, allocatable :: BeltramiInverse(:,:) !< Beltrami inverse matrix + real(wp), allocatable :: BeltramiInverse(:,:) !< Beltrami inverse matrix !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - REAL , allocatable :: diotadxup(:,:,:) !< measured rotational transform on inner/outer interfaces for each volume; d(transform)/dx; (see dforce) - REAL , allocatable :: dItGpdxtp(:,:,:) !< measured toroidal and poloidal current on inner/outer interfaces for each volume; d(Itor,Gpol)/dx; (see dforce) + real(wp) , allocatable :: diotadxup(:,:,:) !< measured rotational transform on inner/outer interfaces for each volume; d(transform)/dx; (see dforce) + real(wp) , allocatable :: dItGpdxtp(:,:,:) !< measured toroidal and poloidal current on inner/outer interfaces for each volume; d(Itor,Gpol)/dx; (see dforce) - REAL , allocatable :: glambda(:,:,:,:) !< save initial guesses for iterative calculation of rotational-transform + real(wp) , allocatable :: glambda(:,:,:,:) !< save initial guesses for iterative calculation of rotational-transform - INTEGER :: lmns !< number of independent degrees of freedom in angle transformation; + integer :: lmns !< number of independent degrees of freedom in angle transformation; - REAL, allocatable :: dlambdaout(:,:,:) + real(wp), allocatable :: dlambdaout(:,:,:) !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -619,45 +619,45 @@ module allglobal !> \addtogroup grp_force_constr Construction of "force" !> The force vector is comprised of \c Bomn and \c Iomn. !> @{ - REAL, allocatable :: Bemn(:,:,:) !< force vector; stellarator-symmetric (?) - REAL, allocatable :: Iomn(:,:) !< force vector; stellarator-symmetric (?) - REAL, allocatable :: Somn(:,:,:) !< force vector; non-stellarator-symmetric (?) - REAL, allocatable :: Pomn(:,:,:) !< force vector; non-stellarator-symmetric (?) - - REAL, allocatable :: Bomn(:,:,:) !< force vector; stellarator-symmetric (?) - REAL, allocatable :: Iemn(:,:) !< force vector; stellarator-symmetric (?) - REAL, allocatable :: Semn(:,:,:) !< force vector; non-stellarator-symmetric (?) - REAL, allocatable :: Pemn(:,:,:) !< force vector; non-stellarator-symmetric (?) - - REAL, allocatable :: BBe(:) !< force vector (?); stellarator-symmetric (?) - REAL, allocatable :: IIo(:) !< force vector (?); stellarator-symmetric (?) - REAL, allocatable :: BBo(:) !< force vector (?); non-stellarator-symmetric (?) - REAL, allocatable :: IIe(:) !< force vector (?); non-stellarator-symmetric (?) + real(wp), allocatable :: Bemn(:,:,:) !< force vector; stellarator-symmetric (?) + real(wp), allocatable :: Iomn(:,:) !< force vector; stellarator-symmetric (?) + real(wp), allocatable :: Somn(:,:,:) !< force vector; non-stellarator-symmetric (?) + real(wp), allocatable :: Pomn(:,:,:) !< force vector; non-stellarator-symmetric (?) + + real(wp), allocatable :: Bomn(:,:,:) !< force vector; stellarator-symmetric (?) + real(wp), allocatable :: Iemn(:,:) !< force vector; stellarator-symmetric (?) + real(wp), allocatable :: Semn(:,:,:) !< force vector; non-stellarator-symmetric (?) + real(wp), allocatable :: Pemn(:,:,:) !< force vector; non-stellarator-symmetric (?) + + real(wp), allocatable :: BBe(:) !< force vector (?); stellarator-symmetric (?) + real(wp), allocatable :: IIo(:) !< force vector (?); stellarator-symmetric (?) + real(wp), allocatable :: BBo(:) !< force vector (?); non-stellarator-symmetric (?) + real(wp), allocatable :: IIe(:) !< force vector (?); non-stellarator-symmetric (?) !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> \addtogroup grp_covar_field_ifaces Covariant field on interfaces: Btemn, Bzemn, Btomn, Bzomn !> The covariant field !> @{ - REAL, allocatable :: Btemn(:,:,:) !< covariant \f$\theta\f$ cosine component of the tangential field on interfaces; stellarator-symmetric - REAL, allocatable :: Bzemn(:,:,:) !< covariant \f$\zeta\f$ cosine component of the tangential field on interfaces; stellarator-symmetric - REAL, allocatable :: Btomn(:,:,:) !< covariant \f$\theta\f$ sine component of the tangential field on interfaces; non-stellarator-symmetric - REAL, allocatable :: Bzomn(:,:,:) !< covariant \f$\zeta\f$ sine component of the tangential field on interfaces; non-stellarator-symmetric + real(wp), allocatable :: Btemn(:,:,:) !< covariant \f$\theta\f$ cosine component of the tangential field on interfaces; stellarator-symmetric + real(wp), allocatable :: Bzemn(:,:,:) !< covariant \f$\zeta\f$ cosine component of the tangential field on interfaces; stellarator-symmetric + real(wp), allocatable :: Btomn(:,:,:) !< covariant \f$\theta\f$ sine component of the tangential field on interfaces; non-stellarator-symmetric + real(wp), allocatable :: Bzomn(:,:,:) !< covariant \f$\zeta\f$ sine component of the tangential field on interfaces; non-stellarator-symmetric !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> \addtogroup grp_covar_field_hessian covariant field for Hessian computation: Bloweremn, Bloweromn !> @{ - REAL, allocatable :: Bloweremn(:,:) !< covariant field for Hessian computation - REAL, allocatable :: Bloweromn(:,:) !< covariant field for Hessian computation + real(wp), allocatable :: Bloweremn(:,:) !< covariant field for Hessian computation + real(wp), allocatable :: Bloweromn(:,:) !< covariant field for Hessian computation !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> \addtogroup grp_geomdof Geometrical degrees-of-freedom: LGdof, NGdof !> The geometrical degrees-of-freedom !> @{ - INTEGER :: LGdof !< geometrical degrees of freedom associated with each interface - INTEGER :: NGdof !< total geometrical degrees of freedom + integer :: LGdof !< geometrical degrees of freedom associated with each interface + integer :: NGdof !< total geometrical degrees of freedom !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -668,18 +668,18 @@ module allglobal !>
  • force-balance across the \f$l\f$-th interface depends on the fields in the adjacent interfaces.
  • !> !> @{ - REAL, allocatable :: dBBdRZ(:,:,:) !< derivative of magnetic field w.r.t. geometry (?) - REAL, allocatable :: dIIdRZ(: ,:) !< derivative of spectral constraints w.r.t. geometry (?) + real(wp), allocatable :: dBBdRZ(:,:,:) !< derivative of magnetic field w.r.t. geometry (?) + real(wp), allocatable :: dIIdRZ(: ,:) !< derivative of spectral constraints w.r.t. geometry (?) - REAL, allocatable :: dFFdRZ(:,:,:,:,:) !< derivatives of B^2 at the interfaces wrt geometry - REAL, allocatable :: dBBdmp(:,:,:,: ) !< derivatives of B^2 at the interfaces wrt mu and dpflux + real(wp), allocatable :: dFFdRZ(:,:,:,:,:) !< derivatives of B^2 at the interfaces wrt geometry + real(wp), allocatable :: dBBdmp(:,:,:,: ) !< derivatives of B^2 at the interfaces wrt mu and dpflux - REAL, allocatable :: HdFFdRZ(:,:,:,:,:) !< derivatives of B^2 at the interfaces wrt geometry 2D Hessian; + real(wp), allocatable :: HdFFdRZ(:,:,:,:,:) !< derivatives of B^2 at the interfaces wrt geometry 2D Hessian; - REAL, allocatable :: denergydrr(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; - REAL, allocatable :: denergydrz(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; - REAL, allocatable :: denergydzr(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; - REAL, allocatable :: denergydzz(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; + real(wp), allocatable :: denergydrr(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; + real(wp), allocatable :: denergydrz(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; + real(wp), allocatable :: denergydzr(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; + real(wp), allocatable :: denergydzz(:,:,:,:,:) !< derivatives of energy at the interfaces wrt geometry 3D Hessian; !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -722,21 +722,21 @@ module allglobal !>
  • A finite-difference estimate is computed if \c Lcheck==4.
  • !> !> @{ - REAL, allocatable :: dmupfdx(:,:,:,:,:) !< derivatives of mu and dpflux wrt geometry at constant interface transform + real(wp), allocatable :: dmupfdx(:,:,:,:,:) !< derivatives of mu and dpflux wrt geometry at constant interface transform !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! LOGICAL :: Lhessianallocated !< flag to indicate that force gradient matrix is allocated (?) - REAL, allocatable :: hessian(:,:) !< force gradient matrix (?) - REAL, allocatable :: dessian(:,:) !< derivative of force gradient matrix (?) + real(wp), allocatable :: hessian(:,:) !< force gradient matrix (?) + real(wp), allocatable :: dessian(:,:) !< derivative of force gradient matrix (?) LOGICAL :: Lhessian2Dallocated !< flag to indicate that 2D Hessian matrix is allocated (?) - REAL, allocatable :: hessian2D(:,:) !< Hessian 2D - REAL, allocatable :: dessian2D(:,:) !< derivative Hessian 2D + real(wp), allocatable :: hessian2D(:,:) !< Hessian 2D + real(wp), allocatable :: dessian2D(:,:) !< derivative Hessian 2D LOGICAL :: Lhessian3Dallocated !< flag to indicate that 2D Hessian matrix is allocated (?) - REAL, allocatable :: hessian3D(:,:) !< Hessian 3D - REAL, allocatable :: dessian3D(:,:) !< derivative Hessian 3D + real(wp), allocatable :: hessian3D(:,:) !< Hessian 3D + real(wp), allocatable :: dessian3D(:,:) !< derivative Hessian 3D !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -749,25 +749,25 @@ module allglobal !> \f} !> !> @{ - REAL , allocatable :: cosi(:,:) !< some precomputed cosines - REAL , allocatable :: sini(:,:) !< some precomputed sines - REAL , allocatable :: gteta(:) !< something related to \f$\sqrt g\f$ and \f$\theta\f$ ? - REAL , allocatable :: gzeta(:) !< something related to \f$\sqrt g\f$ and \f$\zeta\f$ ? + real(wp) , allocatable :: cosi(:,:) !< some precomputed cosines + real(wp) , allocatable :: sini(:,:) !< some precomputed sines + real(wp) , allocatable :: gteta(:) !< something related to \f$\sqrt g\f$ and \f$\theta\f$ ? + real(wp) , allocatable :: gzeta(:) !< something related to \f$\sqrt g\f$ and \f$\zeta\f$ ? - REAL , allocatable :: ajk(:) !< definition of coordinate axis + real(wp) , allocatable :: ajk(:) !< definition of coordinate axis - REAL , allocatable :: dRadR(:,:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dRadZ(:,:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dZadR(:,:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dZadZ(:,:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dRadR(:,:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dRadZ(:,:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dZadR(:,:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dZadZ(:,:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dRodR(:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dRodZ(:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dZodR(:,:,:) !< derivatives of coordinate axis - REAL , allocatable :: dZodZ(:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dRodR(:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dRodZ(:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dZodR(:,:,:) !< derivatives of coordinate axis + real(wp) , allocatable :: dZodZ(:,:,:) !< derivatives of coordinate axis - INTEGER, allocatable :: djkp(:,:) !< for calculating cylindrical volume - INTEGER, allocatable :: djkm(:,:) !< for calculating cylindrical volume + integer, allocatable :: djkp(:,:) !< for calculating cylindrical volume + integer, allocatable :: djkm(:,:) !< for calculating cylindrical volume !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -798,11 +798,11 @@ module allglobal !> are computed and saved in \c volume(0:2,1:Nvol). !> !> @{ - REAL , allocatable :: lBBintegral(:) !< B.B integral - REAL , allocatable :: lABintegral(:) !< A.B integral + real(wp) , allocatable :: lBBintegral(:) !< B.B integral + real(wp) , allocatable :: lABintegral(:) !< A.B integral - REAL , allocatable :: vvolume(:) !< volume integral of \f$\sqrt g\f$; computed in volume - REAL :: dvolume !< derivative of volume w.r.t. interface geometry + real(wp) , allocatable :: vvolume(:) !< volume integral of \f$\sqrt g\f$; computed in volume + real(wp) :: dvolume !< derivative of volume w.r.t. interface geometry !> @} !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -810,20 +810,20 @@ module allglobal !> \addtogroup grp_int_global Internal global variables !> internal global variables; internal logical variables; default values are provided here; these may be changed according to input values !> @{ - INTEGER :: ivol !< labels volume; some subroutines (called by NAG) are fixed argument list but require the volume label + integer :: ivol !< labels volume; some subroutines (called by NAG) are fixed argument list but require the volume label - REAL :: gBzeta !< toroidal (contravariant) field; calculated in bfield; required to convert \f$\dot \theta\f$ to \f$B^\theta\f$, \f$\dot s\f$ to \f$B^s\f$ + real(wp) :: gBzeta !< toroidal (contravariant) field; calculated in bfield; required to convert \f$\dot \theta\f$ to \f$B^\theta\f$, \f$\dot s\f$ to \f$B^s\f$ - INTEGER, allocatable :: Iquad(:) !< internal copy of Nquad + integer, allocatable :: Iquad(:) !< internal copy of Nquad - REAL , allocatable :: gaussianweight(:,:) !< weights for Gaussian quadrature - REAL , allocatable :: gaussianabscissae(:,:) !< abscissae for Gaussian quadrature + real(wp) , allocatable :: gaussianweight(:,:) !< weights for Gaussian quadrature + real(wp) , allocatable :: gaussianabscissae(:,:) !< abscissae for Gaussian quadrature LOGICAL :: LBlinear !< controls selection of Beltrami field solver; depends on LBeltrami LOGICAL :: LBnewton !< controls selection of Beltrami field solver; depends on LBeltrami LOGICAL :: LBsequad !< controls selection of Beltrami field solver; depends on LBeltrami - REAL :: oRZp(1:3) !< used in mg00aa() to determine \f$(s,\theta,\zeta)\f$ given \f$(R,Z,\varphi)\f$ + real(wp) :: oRZp(1:3) !< used in mg00aa() to determine \f$(s,\theta,\zeta)\f$ given \f$(R,Z,\varphi)\f$ !> @} @@ -832,22 +832,22 @@ module allglobal !> \addtogroup grp_misc Miscellaneous !> The following are miscellaneous flags required for the virtual casing field, external (vacuum) field integration, ... !> @{ - INTEGER :: globaljk !< labels position - REAL, allocatable :: Dxyz(:,:) !< computational boundary; position - REAL, allocatable :: Nxyz(:,:) !< computational boundary; normal - REAL, allocatable :: Jxyz(:,:) !< plasma boundary; surface current + integer :: globaljk !< labels position + real(wp), allocatable :: Dxyz(:,:) !< computational boundary; position + real(wp), allocatable :: Nxyz(:,:) !< computational boundary; normal + real(wp), allocatable :: Jxyz(:,:) !< plasma boundary; surface current - REAL :: tetazeta(1:2) !< what is this? + real(wp) :: tetazeta(1:2) !< what is this? - REAL :: virtualcasingfactor = -one / (four*pi) !< this agrees with diagno + real(wp) :: virtualcasingfactor = -one / (four*pi) !< this agrees with diagno - INTEGER :: IBerror !< for computing error in magnetic field + integer :: IBerror !< for computing error in magnetic field - INTEGER :: nfreeboundaryiterations !< number of free-boundary iterations already performed + integer :: nfreeboundaryiterations !< number of free-boundary iterations already performed !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER, parameter :: Node = 2 !< best to make this global for consistency between calling and called routines + integer, parameter :: Node = 2 !< best to make this global for consistency between calling and called routines !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -861,7 +861,6 @@ module allglobal !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! subroutine build_vector_potential(lvol, iocons, aderiv, tderiv) - ! Builds the covariant component of the vector potential and store them in efmn, ofmn, sfmn, cfmn. use constants, only: zero, half @@ -874,24 +873,40 @@ subroutine build_vector_potential(lvol, iocons, aderiv, tderiv) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER :: aderiv ! Derivative of A. -1: w.r.t geometrical degree of freedom +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: aderiv ! Derivative of A. -1: w.r.t geometrical degree of freedom ! 0: no derivatives ! 1: w.r.t mu ! 2: w.r.t pflux - INTEGER :: tderiv ! Derivative of Chebyshev polynomialc. 0: no derivatives + integer :: tderiv ! Derivative of Chebyshev polynomialc. 0: no derivatives ! 1: w.r.t radial coordinate s - INTEGER :: ii, & ! Loop index on Fourier harmonics + integer :: ii, & ! Loop index on Fourier harmonics ll, & ! Loop index on radial resolution mi, & ! Poloidal mode number lvol,& ! Volume number iocons ! inner (0) or outer (1) side of the volume - REAL :: mfactor ! Regularization factor when LcoordinateSingularity + real(wp) :: mfactor ! Regularization factor when LcoordinateSingularity !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - BEGIN(build_vector_potential) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + efmn(1:mn) = zero ; sfmn(1:mn) = zero ; cfmn(1:mn) = zero ; ofmn(1:mn) = zero @@ -958,18 +973,32 @@ subroutine read_inputlists_from_file() use ifport ! for fseek, ftell with Intel compiler #endif - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL :: Lspexist integer :: filepos, seek_status, cpfile, instat, idx_mode character(len=1000) :: line - INTEGER :: mm, nn - REAL, allocatable :: RZRZ(:,:) ! local array used for reading interface Fourier harmonics from file; + integer :: mm, nn + real(wp), allocatable :: RZRZ(:,:) ! local array used for reading interface Fourier harmonics from file; inquire( file=trim(ext)//".sp", exist=Lspexist ) ! check if file exists; - FATAL( readin, .not.Lspexist, the input file does not exist ) ! if not, abort; + + if( .not.Lspexist ) then + write(6,'("readin : fatal : myid=",i3," ; .not.Lspexist ; the input file does not exist ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : .not.Lspexist : the input file does not exist ;" + endif + ! if not, abort; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -980,7 +1009,7 @@ subroutine read_inputlists_from_file() instat = 0 ! initially, no error ! read namelists one after another - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading physicslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading physicslist from ext.sp ;")') cput-cpus endif read(iunit, physicslist, iostat=instat) @@ -992,10 +1021,10 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in physicslist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read physicslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read physicslist from ext.sp ;")') cput-cpus endif - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading numericlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading numericlist from ext.sp ;")') cput-cpus endif read(iunit, numericlist, iostat=instat) @@ -1007,10 +1036,10 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in numericlist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read numericlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read numericlist from ext.sp ;")') cput-cpus endif - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading locallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading locallist from ext.sp ;")') cput-cpus endif read(iunit, locallist, iostat=instat) @@ -1022,10 +1051,10 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in locallist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read locallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read locallist from ext.sp ;")') cput-cpus endif - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading globallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading globallist from ext.sp ;")') cput-cpus endif read(iunit, globallist, iostat=instat) @@ -1037,10 +1066,10 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in globallist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read globallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read globallist from ext.sp ;")') cput-cpus endif - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading diagnosticslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading diagnosticslist from ext.sp ;")') cput-cpus endif read(iunit, diagnosticslist, iostat=instat) @@ -1052,10 +1081,10 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in diagnosticslist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read diagnosticslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read diagnosticslist from ext.sp ;")') cput-cpus endif - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : reading screenlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : reading screenlist from ext.sp ;")') cput-cpus endif read(iunit, screenlist, iostat=instat) @@ -1067,7 +1096,7 @@ subroutine read_inputlists_from_file() write(*,'(A)') 'Invalid line in screenlist: '//trim(line) end if - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : read screenlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : read screenlist from ext.sp ;")') cput-cpus endif ! At this point, the input namelists are read. @@ -1081,9 +1110,18 @@ subroutine read_inputlists_from_file() if (Linitialize .le. 0) then ! duplicate of checks required for below code - FATAL( readin, Nvol.lt.1 .or. Nvol.gt.MNvol, invalid Nvol: may need to recompile with higher MNvol ) - SALLOCATE( RZRZ, (1:4,1:Nvol), zero ) ! temp array for reading input; + if( Nvol.lt.1 .or. Nvol.gt.MNvol ) then + write(6,'("readin : fatal : myid=",i3," ; Nvol.lt.1 .or. Nvol.gt.MNvol ; invalid Nvol: may need to recompile with higher MNvol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Nvol.lt.1 .or. Nvol.gt.MNvol : invalid Nvol: may need to recompile with higher MNvol ;" + endif + + + + allocate( RZRZ(1:4,1:Nvol), stat=astat ) + RZRZ(1:4,1:Nvol) = zero + ! temp array for reading input; ! determine how many modes are specified by reading them once #ifdef IFORT @@ -1107,7 +1145,13 @@ subroutine read_inputlists_from_file() #else call fseek(iunit, filepos, 0, seek_status) #endif - FATAL(inplst, seek_status.ne.0, failed to seek to end of input namelists ) + + if( seek_status.ne.0 ) then + write(6,'("inplst : fatal : myid=",i3," ; seek_status.ne.0 ; failed to seek to end of input namelists ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "inplst : seek_status.ne.0 : failed to seek to end of input namelists ;" + endif + ! now allocate arrays and read... ! Need to free memory, in case preset() called multiple times via python wrappers @@ -1119,7 +1163,9 @@ subroutine read_inputlists_from_file() enddo ! no need for temporary RZRZ anymore - DALLOCATE(RZRZ) + + deallocate(RZRZ,stat=astat) + end if ! Linitialize .le. 0 @@ -1131,31 +1177,37 @@ subroutine read_inputlists_from_file() !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! +!> \brief write all the namelists to example.sp subroutine write_spec_namelist() - ! write all the namelists to example.sp + use mpi use constants use fileunits use inputlist - LOCALS - - LOGICAL :: exist - CHARACTER(LEN=100), PARAMETER :: example = 'example.sp' - - if( myid == 0 ) then - inquire(file=trim(example), EXIST=exist) ! inquire if inputfile existed; - FATAL( global, exist, example input file example.sp already existed ) - open(iunit, file=trim(example), status='unknown', action='write') - write(iunit, physicslist) - write(iunit, numericlist) - write(iunit, locallist) - write(iunit, globallist) - write(iunit, diagnosticslist) - write(iunit, screenlist) - close(iunit) + integer :: ierr + logical :: exists + character(len=*), parameter :: example = 'example.sp' + + if (myid .eq. 0) then + ! inquire if inputfile existed; + inquire(file=trim(example), EXIST=exists) + if (exists) then + write(6,'("example input file example.sp already existed")') + call MPI_ABORT(MPI_COMM_SPEC, 1, ierr) + stop "example input file example.sp already existed" + end if + + ! write template input file + open(iunit, file=trim(example), status='unknown', action='write') + write(iunit, physicslist) + write(iunit, numericlist) + write(iunit, locallist) + write(iunit, globallist) + write(iunit, diagnosticslist) + write(iunit, screenlist) + close(iunit) endif - return end subroutine !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1168,18 +1220,34 @@ subroutine check_inputs() use inputlist use cputiming, only: Treadin - LOCALS - INTEGER :: vvol - REAL :: xx, toroidalflux, toroidalcurrent +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: vvol + real(wp) :: xx, toroidalflux, toroidalcurrent + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(readin) Mvol = Nvol + Lfreebound ! this is just for screen output and initial check; true assignment of Mvol appears outside if( myid.eq.0 ) then ; write(ounit,'("readin : ", 10x ," : ")') - cput = GETTIME + cput = MPI_WTIME() write(ounit,1010) cput-cpus, Igeometry, Istellsym, Lreflect write(ounit,1011) Lfreebound, phiedge, curtor, curpol @@ -1207,17 +1275,83 @@ subroutine check_inputs() endif #endif - FATAL( readin, Igeometry.lt.1 .or. Igeometry.gt.3, invalid geometry ) - FATAL( readin, Nfp.le.0, invalid Nfp ) - FATAL( readin, Mpol.lt.0 .or. Mpol.gt.MMpol, invalid poloidal resolution: may need to recompile with higher MMpol ) - FATAL( readin, Ntor.lt.0 .or. Ntor.gt.MNtor, invalid toroidal resolution: may need to recompile with higher MNtor ) - FATAL( readin, Nvol.lt.1 .or. Nvol.gt.MNvol, invalid Nvol: may need to recompile with higher MNvol ) - FATAL( readin, mupftol.le.zero, mupftol is too small ) - FATAL( readin, abs(one+gamma).lt.vsmall, 1+gamma appears in denominator in dforce ) !< \todo Please check this; SRH: 27 Feb 18; - FATAL( readin, abs(one-gamma).lt.vsmall, 1-gamma appears in denominator in fu00aa ) !< \todo Please check this; SRH: 27 Feb 18; - FATAL( readin, Lconstraint.lt.-1 .or. Lconstraint.gt.3, illegal Lconstraint ) - FATAL( readin, Igeometry.eq.1 .and. rpol.lt.vsmall, poloidal extent of slab too small or negative ) - FATAL( readin, Igeometry.eq.1 .and. rtor.lt.vsmall, toroidal extent of slab too small or negative ) + + if( Igeometry.lt.1 .or. Igeometry.gt.3 ) then + write(6,'("readin : fatal : myid=",i3," ; Igeometry.lt.1 .or. Igeometry.gt.3 ; invalid geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Igeometry.lt.1 .or. Igeometry.gt.3 : invalid geometry ;" + endif + + + if( Nfp.le.0 ) then + write(6,'("readin : fatal : myid=",i3," ; Nfp.le.0 ; invalid Nfp ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Nfp.le.0 : invalid Nfp ;" + endif + + + if( Mpol.lt.0 .or. Mpol.gt.MMpol ) then + write(6,'("readin : fatal : myid=",i3," ; Mpol.lt.0 .or. Mpol.gt.MMpol ; invalid poloidal resolution: may need to recompile with higher MMpol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Mpol.lt.0 .or. Mpol.gt.MMpol : invalid poloidal resolution: may need to recompile with higher MMpol ;" + endif + + + if( Ntor.lt.0 .or. Ntor.gt.MNtor ) then + write(6,'("readin : fatal : myid=",i3," ; Ntor.lt.0 .or. Ntor.gt.MNtor ; invalid toroidal resolution: may need to recompile with higher MNtor ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Ntor.lt.0 .or. Ntor.gt.MNtor : invalid toroidal resolution: may need to recompile with higher MNtor ;" + endif + + + if( Nvol.lt.1 .or. Nvol.gt.MNvol ) then + write(6,'("readin : fatal : myid=",i3," ; Nvol.lt.1 .or. Nvol.gt.MNvol ; invalid Nvol: may need to recompile with higher MNvol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Nvol.lt.1 .or. Nvol.gt.MNvol : invalid Nvol: may need to recompile with higher MNvol ;" + endif + + + if( mupftol.le.zero ) then + write(6,'("readin : fatal : myid=",i3," ; mupftol.le.zero ; mupftol is too small ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : mupftol.le.zero : mupftol is too small ;" + endif + + + if( abs(one+gamma).lt.vsmall ) then + write(6,'("readin : fatal : myid=",i3," ; abs(one+gamma).lt.vsmall ; 1+gamma appears in denominator in dforce ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(one+gamma).lt.vsmall : 1+gamma appears in denominator in dforce ;" + endif + !< \todo Please check this; SRH: 27 Feb 18; + + if( abs(one-gamma).lt.vsmall ) then + write(6,'("readin : fatal : myid=",i3," ; abs(one-gamma).lt.vsmall ; 1-gamma appears in denominator in fu00aa ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(one-gamma).lt.vsmall : 1-gamma appears in denominator in fu00aa ;" + endif + !< \todo Please check this; SRH: 27 Feb 18; + + if( Lconstraint.lt.-1 .or. Lconstraint.gt.3 ) then + write(6,'("readin : fatal : myid=",i3," ; Lconstraint.lt.-1 .or. Lconstraint.gt.3 ; illegal Lconstraint ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Lconstraint.lt.-1 .or. Lconstraint.gt.3 : illegal Lconstraint ;" + endif + + + if( Igeometry.eq.1 .and. rpol.lt.vsmall ) then + write(6,'("readin : fatal : myid=",i3," ; Igeometry.eq.1 .and. rpol.lt.vsmall ; poloidal extent of slab too small or negative ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Igeometry.eq.1 .and. rpol.lt.vsmall : poloidal extent of slab too small or negative ;" + endif + + + if( Igeometry.eq.1 .and. rtor.lt.vsmall ) then + write(6,'("readin : fatal : myid=",i3," ; Igeometry.eq.1 .and. rtor.lt.vsmall ; toroidal extent of slab too small or negative ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Igeometry.eq.1 .and. rtor.lt.vsmall : toroidal extent of slab too small or negative ;" + endif + if( Istellsym.eq.1 ) then Rbs(-MNtor:MNtor,-MMpol:MMpol) = zero @@ -1245,14 +1379,26 @@ subroutine check_inputs() !> \f$Isurf \rightarrow Isurf \cdot \frac{curtor}{\sum_i Isurf_i + Ivolume_i}\f$ !> - FATAL( readin, abs(tflux(Nvol)).lt. vsmall, enclosed toroidal flux cannot be zero ) + + if( abs(tflux(Nvol)).lt. vsmall ) then + write(6,'("readin : fatal : myid=",i3," ; abs(tflux(Nvol)).lt. vsmall ; enclosed toroidal flux cannot be zero ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(tflux(Nvol)).lt. vsmall : enclosed toroidal flux cannot be zero ;" + endif + toroidalflux = tflux(Nvol) ! toroidal flux is a local variable; SRH: 27 Feb 18 tflux(1:Mvol) = tflux(1:Mvol) / toroidalflux ! normalize toroidal flux pflux(1:Mvol) = pflux(1:Mvol) / toroidalflux ! normalize poloidal flux - FATAL( readin, tflux(1).lt.zero, enclosed toroidal flux cannot be zero ) + + if( tflux(1).lt.zero ) then + write(6,'("readin : fatal : myid=",i3," ; tflux(1).lt.zero ; enclosed toroidal flux cannot be zero ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : tflux(1).lt.zero : enclosed toroidal flux cannot be zero ;" + endif + do vvol = 2, Mvol !FATAL( readin, tflux(vvol)-tflux(vvol-1).lt.small, toroidal flux is not monotonic ) enddo @@ -1277,13 +1423,25 @@ subroutine check_inputs() toroidalcurrent = Ivolume(Mvol) + sum(Isurf(1:Mvol-1)) if( curtor.NE.0 ) then - FATAL( readin, toroidalcurrent.EQ.0 , Incompatible current profiles and toroidal linking current) + + if( toroidalcurrent.EQ.0 ) then + write(6,'("readin : fatal : myid=",i3," ; toroidalcurrent.EQ.0 ; Incompatible current profiles and toroidal linking current;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : toroidalcurrent.EQ.0 : Incompatible current profiles and toroidal linking current ;" + endif + Ivolume(1:Mvol) = Ivolume(1:Mvol) * curtor / toroidalcurrent Isurf(1:Mvol-1) = Isurf(1:Mvol-1) * curtor / toroidalcurrent else - FATAL( readin, toroidalcurrent.NE.0, Incompatible current profiles and toroidal linking current) + + if( toroidalcurrent.NE.0 ) then + write(6,'("readin : fatal : myid=",i3," ; toroidalcurrent.NE.0 ; Incompatible current profiles and toroidal linking current;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : toroidalcurrent.NE.0 : Incompatible current profiles and toroidal linking current ;" + endif + ! No rescaling if profiles have an overall zero toroidal current endif @@ -1294,14 +1452,26 @@ subroutine check_inputs() do vvol = 1, Mvol - FATAL( readin, Lrad(vvol ).lt.2, require Chebyshev resolution Lrad > 2 so that Lagrange constraints can be satisfied ) + + if( Lrad(vvol ).lt.2 ) then + write(6,'("readin : fatal : myid=",i3," ; Lrad(vvol ).lt.2 ; require Chebyshev resolution Lrad > 2 so that Lagrange constraints can be satisfied ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Lrad(vvol ).lt.2 : require Chebyshev resolution Lrad > 2 so that Lagrange constraints can be satisfied ;" + endif + enddo if (Igeometry.ge.2 .and. Lrad(1).lt.Mpol) then write(ounit,'("readin : ",f10.2," : Minimum Lrad(1) is Mpol, automatically adjusted it to Mpol+4")') cput-cpus Lrad(1) = Mpol + 4 endif - FATAL( readin, mupfits.le.0, must give ma01aa:hybrj a postive integer value for the maximum iterations = mupfits given on input ) + + if( mupfits.le.0 ) then + write(6,'("readin : fatal : myid=",i3," ; mupfits.le.0 ; must give ma01aa:hybrj a postive integer value for the maximum iterations = mupfits given on input ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : mupfits.le.0 : must give ma01aa:hybrj a postive integer value for the maximum iterations = mupfits given on input ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1319,11 +1489,23 @@ subroutine check_inputs() 1022 format("readin : ", 10x ," : Lsparse="i2" ; Lsvdiota="i2" ; imethod="i2" ; iorder="i2" ; iprecon="i2" ; iotatol="es13.5" ;") 1023 format("readin : ", 10x ," : Lextrap="i2" ; Mregular="i3" ; Lrzaxis="i2" ; Ntoraxis="i2" ;") - FATAL( readin, Ndiscrete.le.0, error ) + + if( Ndiscrete.le.0 ) then + write(6,'("readin : fatal : myid=",i3," ; Ndiscrete.le.0 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Ndiscrete.le.0 : error ;" + endif + !FATAL(readin, Lfreebound.eq.1 .and. Lconstraint.gt.0 .and. Lsparse.eq.0, have not implemented dense Fourier angle transformation in vacuum region ) - FATAL( readin, iotatol.gt.one, illegal value for sparse tolerance ) ! I think that the sparse iota solver is no longer implemented; SRH: 27 Feb 18; + + if( iotatol.gt.one ) then + write(6,'("readin : fatal : myid=",i3," ; iotatol.gt.one ; illegal value for sparse tolerance ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : iotatol.gt.one : illegal value for sparse tolerance ;" + endif + ! I think that the sparse iota solver is no longer implemented; SRH: 27 Feb 18; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1341,12 +1523,48 @@ subroutine check_inputs() 1030 format("readin : ",f10.2," : LBeltrami="i2" ; Linitgues="i2" ; Lmatsolver="i2" ; LGMRESprec="i2" ; NiterGMRES="i4" ; epsGMRES="es13.5" ; epsILU="es13.5" ;" ) - FATAL( readin, LBeltrami.lt.0 .or. LBeltrami.gt.7, error ) - FATAL( readin, Lmatsolver.lt.0 .or. Lmatsolver.gt.3, error ) - FATAL( readin, LGMRESprec.lt.0 .or. LGMRESprec.gt.1, error ) - FATAL( readin, NiterGMRES.lt.0, error ) - FATAL( readin, abs(epsGMRES).le.machprec , error ) - FATAL( readin, abs(epsILU).le.machprec , error ) + + if( LBeltrami.lt.0 .or. LBeltrami.gt.7 ) then + write(6,'("readin : fatal : myid=",i3," ; LBeltrami.lt.0 .or. LBeltrami.gt.7 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : LBeltrami.lt.0 .or. LBeltrami.gt.7 : error ;" + endif + + + if( Lmatsolver.lt.0 .or. Lmatsolver.gt.3 ) then + write(6,'("readin : fatal : myid=",i3," ; Lmatsolver.lt.0 .or. Lmatsolver.gt.3 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Lmatsolver.lt.0 .or. Lmatsolver.gt.3 : error ;" + endif + + + if( LGMRESprec.lt.0 .or. LGMRESprec.gt.1 ) then + write(6,'("readin : fatal : myid=",i3," ; LGMRESprec.lt.0 .or. LGMRESprec.gt.1 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : LGMRESprec.lt.0 .or. LGMRESprec.gt.1 : error ;" + endif + + + if( NiterGMRES.lt.0 ) then + write(6,'("readin : fatal : myid=",i3," ; NiterGMRES.lt.0 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : NiterGMRES.lt.0 : error ;" + endif + + + if( abs(epsGMRES).le.machprec ) then + write(6,'("readin : fatal : myid=",i3," ; abs(epsGMRES).le.machprec ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(epsGMRES).le.machprec : error ;" + endif + + + if( abs(epsILU).le.machprec ) then + write(6,'("readin : fatal : myid=",i3," ; abs(epsILU).le.machprec ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(epsILU).le.machprec : error ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1366,13 +1584,43 @@ subroutine check_inputs() 1043 format("readin : ", 10x ," : mfreeits="i4" ; gBntol="es13.5" ; gBnbld="es13.5" ;") 1044 format("readin : ", 10x ," : vcasingeps="es13.5" ; vcasingtol="es13.5" ; vcasingits="i6" ; vcasingper="i6" ;") - FATAL( readin, escale .lt.zero , error ) - FATAL( readin, pcondense .lt.one , error ) - FATAL( readin, abs(c05xtol).le.machprec , error ) - FATAL( readin, c05factor .le.zero , error ) + + if( escale .lt.zero ) then + write(6,'("readin : fatal : myid=",i3," ; escale .lt.zero ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : escale .lt.zero : error ;" + endif + + + if( pcondense .lt.one ) then + write(6,'("readin : fatal : myid=",i3," ; pcondense .lt.one ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : pcondense .lt.one : error ;" + endif + + + if( abs(c05xtol).le.machprec ) then + write(6,'("readin : fatal : myid=",i3," ; abs(c05xtol).le.machprec ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : abs(c05xtol).le.machprec : error ;" + endif + + + if( c05factor .le.zero ) then + write(6,'("readin : fatal : myid=",i3," ; c05factor .le.zero ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : c05factor .le.zero : error ;" + endif + !FATAL( readin, mfreeits .lt.zero , error ) - FATAL( readin, Igeometry.eq.3 .and. pcondense.le.zero, pcondense must be positive ) + + if( Igeometry.eq.3 .and. pcondense.le.zero ) then + write(6,'("readin : fatal : myid=",i3," ; Igeometry.eq.3 .and. pcondense.le.zero ; pcondense must be positive ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Igeometry.eq.3 .and. pcondense.le.zero : pcondense must be positive ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1386,7 +1634,13 @@ subroutine check_inputs() 1050 format("readin : ",f10.2," : odetol="es10.2" ; nPpts="i6" ;") 1051 format("readin : ", 10x ," : LHevalues="L2" ; LHevectors="L2" ; LHmatrix="L2" ; Lperturbed="i2" ; dpp="i3" ; dqq="i3" ; dRZ="es16.8" ; Lcheck="i3" ; Ltiming="L2" ;") - FATAL( readin, odetol.le.zero, input error ) + + if( odetol.le.zero ) then + write(6,'("readin : fatal : myid=",i3," ; odetol.le.zero ; input error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : odetol.le.zero : input error ;" + endif + !FATAL( readin, absreq.le.zero, input error ) !FATAL( readin, relreq.le.zero, input error ) !FATAL( readin, absacc.le.zero, input error ) @@ -1400,7 +1654,12 @@ subroutine check_inputs() write(ounit,'("readin : ", 10x ," : ")') - RETURN(readin) + +9999 continue + cput = MPI_WTIME() + Treadin = Treadin + ( cput-cpuo ) + return + end subroutine ! check_inputs @@ -1411,164 +1670,376 @@ subroutine broadcast_inputs use fileunits use inputlist - LOCALS - ClBCAST( ext , 100, 0 ) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + + call MPI_BCAST(ext ,100,MPI_CHARACTER,0 ,MPI_COMM_SPEC,ierr) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast physicslist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting physicslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting physicslist from ext.sp ;")') cput-cpus endif - IlBCAST( Igeometry , 1, 0 ) - IlBCAST( Istellsym , 1, 0 ) - IlBCAST( Lfreebound , 1, 0 ) - RlBCAST( phiedge , 1, 0 ) - RlBCAST( curtor , 1, 0 ) - RlBCAST( curpol , 1, 0 ) - RlBCAST( gamma , 1, 0 ) - IlBCAST( Nfp , 1, 0 ) - IlBCAST( Nvol , 1, 0 ) - IlBCAST( Mpol , 1, 0 ) - IlBCAST( Ntor , 1, 0 ) - IlBCAST( Lrad , MNvol+1, 0 ) - RlBCAST( tflux , MNvol+1, 0 ) - RlBCAST( pflux , MNvol+1, 0 ) - RlBCAST( helicity , MNvol , 0 ) - RlBCAST( pscale , 1, 0 ) - RlBCAST( pressure , MNvol+1, 0 ) - IlBCAST( Ladiabatic , 1, 0 ) - RlBCAST( adiabatic , MNvol+1, 0 ) - RlBCAST( mu , MNvol+1, 0 ) - RlBCAST( Ivolume , MNvol+1, 0 ) - RlBCAST( Isurf , MNvol+1, 0 ) - IlBCAST( Lconstraint, 1, 0 ) - IlBCAST( pl , MNvol , 0 ) - IlBCAST( ql , MNvol , 0 ) - IlBCAST( pr , MNvol , 0 ) - IlBCAST( qr , MNvol , 0 ) - RlBCAST( iota , MNvol , 0 ) - IlBCAST( lp , MNvol , 0 ) - IlBCAST( lq , MNvol , 0 ) - IlBCAST( rp , MNvol , 0 ) - IlBCAST( rq , MNvol , 0 ) - RlBCAST( oita , MNvol , 0 ) - RlBCAST( mupftol , 1, 0 ) - IlBCAST( mupfits , 1, 0 ) - IlBCAST( Lreflect , 1, 0 ) - RlBCAST( rpol , 1, 0 ) - RlBCAST( rtor , 1, 0 ) + + call MPI_BCAST( Igeometry , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Istellsym , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lfreebound , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(phiedge ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(curtor ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(curpol ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(gamma ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Nfp , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Nvol , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Mpol , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Ntor , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lrad , MNvol+1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(tflux ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(pflux ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(helicity ,MNvol ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(pscale ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(pressure ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Ladiabatic , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(adiabatic ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(mu ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Ivolume ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Isurf ,MNvol+1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Lconstraint, 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( pl , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( ql , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( pr , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( qr , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(iota ,MNvol ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( lp , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( lq , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( rp , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( rq , MNvol , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(oita ,MNvol ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(mupftol ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( mupfits , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lreflect , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(rpol ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(rtor ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast numericlist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting numericlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting numericlist from ext.sp ;")') cput-cpus endif - IlBCAST( Linitialize, 1, 0 ) - IlBCAST( LautoinitBn, 1, 0 ) - IlBCAST( Lzerovac , 1, 0 ) - IlBCAST( Ndiscrete , 1, 0 ) - IlBCAST( Nquad , 1, 0 ) - IlBCAST( iMpol , 1, 0 ) - IlBCAST( iNtor , 1, 0 ) - IlBCAST( Lsparse , 1, 0 ) - IlBCAST( Lsvdiota , 1, 0 ) - IlBCAST( imethod , 1, 0 ) - IlBCAST( iorder , 1, 0 ) - IlBCAST( iprecon , 1, 0 ) - RlBCAST( iotatol , 1, 0 ) - IlBCAST( Lextrap , 1, 0 ) - IlBCAST( Mregular , 1, 0 ) - IlBCAST( Lrzaxis , 1, 0 ) - IlBCAST( Ntoraxis , 1, 0 ) + + call MPI_BCAST( Linitialize, 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( LautoinitBn, 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lzerovac , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Ndiscrete , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Nquad , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( iMpol , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( iNtor , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lsparse , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lsvdiota , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( imethod , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( iorder , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( iprecon , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(iotatol ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Lextrap , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Mregular , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lrzaxis , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Ntoraxis , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast globallist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting globallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting globallist from ext.sp ;")') cput-cpus endif - IlBCAST( Lfindzero , 1 , 0 ) - RlBCAST( escale , 1 , 0 ) - RlBCAST( opsilon , 1 , 0 ) - RlBCAST( pcondense , 1 , 0 ) - RlBCAST( epsilon , 1 , 0 ) - RlBCAST( wpoloidal , 1 , 0 ) - RlBCAST( upsilon , 1 , 0 ) - RlBCAST( forcetol , 1 , 0 ) - RlBCAST( c05xmax , 1 , 0 ) - RlBCAST( c05xtol , 1 , 0 ) - RlBCAST( c05factor , 1 , 0 ) - LlBCAST( LreadGF , 1 , 0 ) - IlBCAST( mfreeits , 1 , 0 ) - RlBCAST( gBntol , 1 , 0 ) - RlBCAST( gBnbld , 1 , 0 ) - RlBCAST( vcasingeps, 1 , 0 ) - RlBCAST( vcasingtol, 1 , 0 ) - IlBCAST( vcasingits, 1 , 0 ) - IlBCAST( vcasingper, 1 , 0 ) + + call MPI_BCAST( Lfindzero , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(escale ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(opsilon ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(pcondense ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(epsilon ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(wpoloidal ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(upsilon ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(forcetol ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(c05xmax ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(c05xtol ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(c05factor ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(LreadGF ,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( mfreeits , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(gBntol ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(gBnbld ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(vcasingeps,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(vcasingtol,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( vcasingits, 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( vcasingper, 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast locallist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting locallist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting locallist from ext.sp ;")') cput-cpus endif - IlBCAST( LBeltrami , 1, 0 ) - IlBCAST( Linitgues , 1, 0 ) - RlBCAST( maxrndgues , 1, 0) - IlBCAST( Lmatsolver , 1, 0 ) - IlBCAST( NiterGMRES , 1, 0 ) - RlBCAST( epsGMRES , 1, 0 ) - IlBCAST( LGMRESprec , 1, 0 ) - RlBCAST( epsILU , 1, 0 ) + + call MPI_BCAST( LBeltrami , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Linitgues , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(maxrndgues ,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Lmatsolver , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( NiterGMRES , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(epsGMRES ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( LGMRESprec , 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(epsILU ,1,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! IlBCAST( Lposdef , 1, 0 ) ! redundant; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast diagnosticslist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting diagnosticslist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting diagnosticslist from ext.sp ;")') cput-cpus endif - RlBCAST( odetol , 1 , 0 ) + + call MPI_BCAST(odetol ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + !RlBCAST( absreq , 1 , 0 ) !RlBCAST( relreq , 1 , 0 ) !RlBCAST( absacc , 1 , 0 ) !RlBCAST( epsr , 1 , 0 ) - IlBCAST( nPpts , 1 , 0 ) - RlBCAST( Ppts , 1 , 0 ) - IlBCAST( nPtrj , MNvol+1, 0 ) - LlBCAST( LHevalues , 1 , 0 ) - LlBCAST( LHevectors, 1 , 0 ) - LlBCAST( Ltransform, 1 , 0 ) - LlBCAST( LHmatrix , 1 , 0 ) - IlBCAST( Lperturbed, 1 , 0 ) - IlBCAST( dpp , 1 , 0 ) - IlBCAST( dqq , 1 , 0 ) - IlBCAST( Lerrortype, 1 , 0 ) - IlBCAST( Ngrid , 1 , 0 ) - RlBCAST( dRZ , 1 , 0 ) - IlBCAST( Lcheck , 1 , 0 ) - LlBCAST( Ltiming , 1 , 0 ) + + call MPI_BCAST( nPpts , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(Ppts ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( nPtrj , MNvol+1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(LHevalues ,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(LHevectors,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Ltransform,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(LHmatrix ,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Lperturbed, 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( dpp , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( dqq , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Lerrortype, 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST( Ngrid , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(dRZ ,1 ,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST( Lcheck , 1 , MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + + + call MPI_BCAST(Ltiming ,1 ,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! !> **broadcast screenlist** - if( Wreadin ) then ; cput = GETTIME ; write(ounit,'("readin : ",f10.2," : broadcasting screenlist from ext.sp ;")') cput-cpus + if( Wreadin ) then ; cput = MPI_WTIME() ; write(ounit,'("readin : ",f10.2," : broadcasting screenlist from ext.sp ;")') cput-cpus endif ! BSCREENLIST ! broadcast screenlist; this is expanded by Makefile; do not remove; - LlBCAST( Wreadin, 1, 0 ) - LlBCAST( Wwrtend, 1, 0 ) - LlBCAST( Wmacros, 1, 0 ) + + call MPI_BCAST(Wreadin,1,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Wwrtend,1,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Wmacros,1,MPI_LOGICAL,0 ,MPI_COMM_SPEC,ierr) + end subroutine ! broadcast_inputs @@ -1589,28 +2060,44 @@ subroutine wrtend !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - - INTEGER :: vvol !< iteration variable over all nested volumes - INTEGER :: imn !< iteration variable for all Fourier harmonics - INTEGER :: ii !< iteration variable for all Fourier harmonics - INTEGER :: jj !< iteration variable - INTEGER :: kk !< iteration variable - INTEGER :: jk !< iteration variable - INTEGER :: Lcurvature !< curvature flag (?) - INTEGER :: mm !< current poloidal mode number - INTEGER :: nn !< current toroidal mode number - REAL :: lss !< (?) - REAL :: teta !< (?) - REAL :: zeta !< (?) - REAL :: st(1:Node) !< (?) - REAL :: Bst(1:Node) !< (?) - REAL :: BR !< (?) - REAL :: BZ !< (?) - REAL :: BP !< (?) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: vvol !< iteration variable over all nested volumes + integer :: imn !< iteration variable for all Fourier harmonics + integer :: ii !< iteration variable for all Fourier harmonics + integer :: jj !< iteration variable + integer :: kk !< iteration variable + integer :: jk !< iteration variable + integer :: Lcurvature !< curvature flag (?) + integer :: mm !< current poloidal mode number + integer :: nn !< current toroidal mode number + + real(wp) :: lss !< (?) + real(wp) :: teta !< (?) + real(wp) :: zeta !< (?) + real(wp) :: st(1:Node) !< (?) + real(wp) :: Bst(1:Node) !< (?) + real(wp) :: BR !< (?) + real(wp) :: BZ !< (?) + real(wp) :: BP !< (?) + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(wrtend) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1619,14 +2106,14 @@ subroutine wrtend !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; opening/writing ext.sp.end ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; opening/writing ext.sp.end ;")') cput-cpus, myid endif #endif open(iunit,file=trim(ext)//".sp.end",status="unknown") ! restart input file; #ifdef DEBUG - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing physicslist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing physicslist ;")') cput-cpus, myid endif #endif @@ -1774,7 +2261,7 @@ subroutine wrtend write(iunit,'("/")') - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing numericlist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing numericlist ;")') cput-cpus, myid endif write(iunit,'("&numericlist")') @@ -1797,7 +2284,7 @@ subroutine wrtend write(iunit,'(" Ntoraxis = ",i9 )') Ntoraxis write(iunit,'("/")') - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing locallist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing locallist ;")') cput-cpus, myid endif write(iunit,'("&locallist")') @@ -1813,7 +2300,7 @@ subroutine wrtend !write(iunit,'(" Nmaxexp = ",i9 )') Nmaxexp write(iunit,'("/")') - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing globallist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing globallist ;")') cput-cpus, myid endif write(iunit,'("&globallist")') @@ -1838,7 +2325,7 @@ subroutine wrtend write(iunit,'(" vcasingper = ",i9 )') vcasingper write(iunit,'("/")') - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing diagnosticslist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing diagnosticslist ;")') cput-cpus, myid endif write(iunit,'("&diagnosticslist")') @@ -1861,7 +2348,7 @@ subroutine wrtend write(iunit,'(" Ltiming = ",L9 )') Ltiming write(iunit,'("/")') - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing screenlist ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; writing screenlist ;")') cput-cpus, myid endif write(iunit,'("&screenlist")') @@ -1872,10 +2359,34 @@ subroutine wrtend write(iunit,'("/")') #ifdef DEBUG - FATAL( wrtend, .not.allocated(iRbc), error ) - FATAL( wrtend, .not.allocated(iZbs), error ) - FATAL( wrtend, .not.allocated(iRbs), error ) - FATAL( wrtend, .not.allocated(iZbc), error ) + + if( .not.allocated(iRbc) ) then + write(6,'("wrtend : fatal : myid=",i3," ; .not.allocated(iRbc) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wrtend : .not.allocated(iRbc) : error ;" + endif + + + if( .not.allocated(iZbs) ) then + write(6,'("wrtend : fatal : myid=",i3," ; .not.allocated(iZbs) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wrtend : .not.allocated(iZbs) : error ;" + endif + + + if( .not.allocated(iRbs) ) then + write(6,'("wrtend : fatal : myid=",i3," ; .not.allocated(iRbs) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wrtend : .not.allocated(iRbs) : error ;" + endif + + + if( .not.allocated(iZbc) ) then + write(6,'("wrtend : fatal : myid=",i3," ; .not.allocated(iZbc) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wrtend : .not.allocated(iZbc) : error ;" + endif + #endif ! write initial guess of interface geometry @@ -1885,13 +2396,18 @@ subroutine wrtend close(iunit) #ifdef DEBUG - if( Wwrtend ) then ; cput = GETTIME ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; wrote ext.sp.end ;")') cput-cpus, myid + if( Wwrtend ) then ; cput = MPI_WTIME() ; write(ounit,'("wrtend : ",f10.2," : myid=",i3," ; wrote ext.sp.end ;")') cput-cpus, myid endif #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(wrtend) + +9999 continue + cput = MPI_WTIME() + Twrtend = Twrtend + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1907,9 +2423,17 @@ end subroutine wrtend !> @param vvol volume to check subroutine IsMyVolume(vvol) -LOCALS -INTEGER, intent(in) :: vvol +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + +integer, intent(in) :: vvol !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1927,9 +2451,17 @@ end subroutine IsMyVolume !> \brief Returns which MPI node is associated to a given volume. subroutine WhichCpuID(vvol, cpu_id) -LOCALS -INTEGER :: vvol, cpu_id +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + +integer :: vvol, cpu_id !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1945,7 +2477,7 @@ end module allglobal !> \brief Interface to FFTW library module fftw_interface ! JAB; 25 Jul 17 - + use mod_kinds, only: wp => dp use, intrinsic :: iso_c_binding implicit none diff --git a/src/h5utils.F90 b/src/h5utils.F90 new file mode 100644 index 00000000..a853a0a1 --- /dev/null +++ b/src/h5utils.F90 @@ -0,0 +1,1073 @@ +module h5utils +use hdf5 +use mod_kinds, only: wp => dp +use allglobal, only: MPI_COMM_SPEC, myid +implicit none + +logical, parameter :: hdfDebug = .false. !< global flag to enable verbal diarrhea commenting HDF5 operations +integer, parameter :: internalHdf5Msg = 0 !< 1: print internal HDF5 error messages; 0: only error messages from sphdf5 + +character(LEN=*), parameter :: aname = "description" !< Attribute name for descriptive info + +! private +! public internalHdf5Msg, & +! HDEFGRP, & +! HCLOSEGRP, & +! H5DESCR, & +! H5DESCR_CDSET, & +! HWRITELV, & +! HWRITELV_LO, & +! HWRITEIV, & +! HWRITEIV_LO, & +! HWRITERV, & +! HWRITERV_LO, & +! HWRITERA, & +! HWRITERA_LO, & +! HWRITERC, & +! HWRITERC_LO + +contains + +!> Define a HDF5 group or opens it if it already exists. +!> +!> @param file_id file in which to define the group +!> @param name name of the new group +!> @param group_id id of the newly-created group +subroutine HDEFGRP(file_id, name, group_id) + integer(hid_t), intent(in) :: file_id + character(len=*), intent(in) :: name + integer(hid_t), intent(out) :: group_id + + logical :: grp_exists !< flags used to signal if a group already exists + integer :: hdfier !< error flag for HDF5 library + + call h5lexists_f(file_id, name, grp_exists, hdfier) + + if (grp_exists) then + ! if the group already exists, open it + call h5gopen_f(file_id, name, group_id, hdfier) + if (hdfier .ne. 0) then + write(6, '("sphdf5 : "10x" : error calling h5gopen_f from hdefgrp")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5gopen_f from hdefgrp" + endif + else + ! if group does not exist, create it + call h5gcreate_f(file_id, name, group_id, hdfier) + if (hdfier .ne. 0) then + write(6, '("sphdf5 : "10x" : error calling h5gcreate_f from hdefgrp")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5gcreate_f from hdefgrp" + endif + endif +end subroutine ! HDEFGRP + +!> Close a HDF5 group. +!> +!> @param group_id HDF5 group to close +subroutine HCLOSEGRP(group_id) + integer(hid_t), intent(in) :: group_id + + integer :: hdfier !< error flag for HDF5 library + + call h5gclose_f(group_id, hdfier) + if (hdfier .ne. 0) then + write(6, '("sphdf5 : "10x" : error calling h5gclose_f from hclosegrp")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5gclose_f from hclosegrp" + endif +end subroutine ! HCLOSEGRP + +!> Describe an already-open HDF5 object identified by group_id +!> with text given in description and leave it open +!> +!> @param group_id ID of group to describe +!> @param description description for group +subroutine H5DESCR(group_id, description) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: description + + integer :: hdfier !< error flag for HDF5 library + + integer(HID_T) :: attr_id !< Attribute identifier + integer(HID_T) :: aspace_id !< Attribute Dataspace identifier + integer(HID_T) :: atype_id !< Attribute Datatype identifier + + integer, parameter :: arank = 1 !< Attribure rank + integer(HSIZE_T), dimension(arank) :: adims = (/1/) !< Attribute dimension + integer(SIZE_T) :: attr_len !< Length of the attribute string + + attr_len = len(description) + + !> Create scalar data space for the attribute. + call h5screate_simple_f(arank, adims, aspace_id, hdfier) + if (hdfier .ne. 0) then + write(6, '("sphdf5 : "10x" : error calling h5screate_simple_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5screate_simple_f from h5descr" + endif + + !> Create datatype for the attribute. + call h5tcopy_f(H5T_NATIVE_CHARACTER, atype_id, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5tcopy_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5tcopy_f from h5descr" + endif + + ! set size of datatype for attribute + call h5tset_size_f(atype_id, attr_len, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5tset_size_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5tset_size_f from h5descr" + endif + + !> create descriptive attribute + call h5acreate_f(group_id, aname, atype_id, aspace_id, attr_id, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5acreate_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5acreate_f from h5descr" + endif + + !> Write the attribute data. + call h5awrite_f(attr_id, atype_id, description, adims, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5awrite_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5awrite_f from h5descr" + endif + + !> Close the attribute. + call h5aclose_f(attr_id, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5aclose_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5aclose_f from h5descr" + endif + + !> Close the attribute datatype. + call h5tclose_f(atype_id, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5tclose_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5tclose_f from h5descr" + endif + + !> Terminate access to the data space. + call h5sclose_f(aspace_id, hdfier) + if (hdfier .ne. 0) then + write(6,'("sphdf5 : "10x" : error calling h5sclose_f from h5descr")') + call MPI_ABORT(MPI_COMM_SPEC, 1) + stop "sphdf5 : error calling h5sclose_f from h5descr" + endif +end subroutine ! H5DESCR + +!> Describe an already-open HDF5 dataset identified by dset_id +!> with text given in _2 and close it at the end +!> +!> @param dset_id ID of dataset to describe +!> @param description description for dataset +subroutine H5DESCR_CDSET(dset_id, description) + integer(hid_t), intent(in) :: dset_id + character(len=*), intent(in) :: description + + integer :: hdfier !< error flag for HDF5 library + + integer(HID_T) :: aspace_id !< Attribute Dataspace identifier + integer(HID_T) :: atype_id !< Attribute Datatype identifier + integer(HID_T) :: attr_id !< Attribute identifier + + integer(SIZE_T) :: attr_len !< Length of the attribute string + integer, parameter :: arank = 1 !< Attribure rank + integer(HSIZE_T), dimension(arank) :: adims = (/1/) !< Attribute dimension + + attr_len = len(description) + + ! Create scalar data space for the attribute. + call h5screate_simple_f(arank, adims, aspace_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from h5descr_cdset at $3:$4 ;" + endif + + ! Create datatype for the attribute. + call h5tcopy_f(H5T_NATIVE_CHARACTER, atype_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5tcopy_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5tcopy_f from h5descr_cdset at $3:$4 ;" + endif + + call h5tset_size_f(atype_id, attr_len, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5tset_size_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5tset_size_f from h5descr_cdset at $3:$4 ;" + endif + + ! create descriptive attribute + call h5acreate_f(dset_id, aname, atype_id, aspace_id, attr_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5acreate_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5acreate_f from h5descr_cdset at $3:$4 ;" + endif + + ! Write the attribute data. + call h5awrite_f(attr_id, atype_id, description, adims, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5awrite_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5awrite_f from h5descr_cdset at $3:$4 ;" + endif + + ! Close the attribute. + call h5aclose_f(attr_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5aclose_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5aclose_f from h5descr_cdset at $3:$4 ;" + endif + + ! Close the attribute datatype. + call h5tclose_f(atype_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5tclose_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5tclose_f from h5descr_cdset at $3:$4 ;" + endif + + ! Terminate access to the data space. + call h5sclose_f(aspace_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5sclose_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5sclose_f from h5descr_cdset at $3:$4 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! terminate dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from h5descr_cdset at $3:$4 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from h5descr_cdset at $3:$4 ;" + endif +end subroutine ! H5DESCR_CDSET + +!> write logical variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 +!> example: hwritelv( grpInputGlobal, 1, LreadGF, (/ LreadGF /) ) ! scalar +!> example: hwritelv( grpInput, 5, success, success(1:5) ) ! rank-1 +subroutine HWRITELV(group_id, name, num_data, data) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + logical, dimension(:), intent(in) :: data + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + integer(hid_t) :: dset_id !< default dataset ID used in macros + + onedims(1) = num_data + + if (num_data .le. 0) then + write(6, '("sphdf5 : "10x" : error calling hwriteiv ; ",a," : ",i1," .le. 0")') name, num_data + else + + call h5screate_simple_f(1, onedims, space_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwritelv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then + write(*,*) "dataset",name,"does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then + write(*,*) "dataset",name,"exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwritelv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, merge(1, 0, data) , onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwritelv at $5:$6 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! close dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from hwritelv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITELV + +! write logical variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave dataset open for e.g. adding an attribute; _5 and _6 should be __FILE__ and __LINE__ +! example: hwritelv_lo( grpInputGlobal, 1, LreadGF, (/ LreadGF /) ) ! scalar +! example: hwritelv_lo( grpInput, 5, success, success(1:5) ) ! rank-1 +! and close it using h5descr_cdset( /input/global/LreadGF, reading flag for GF ) +subroutine HWRITELV_LO(group_id, name, num_data, data, dset_id) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + logical, dimension(:), intent(in) :: data + integer(hid_t), intent(out) :: dset_id !< default dataset ID used in macros + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + + onedims(1) = num_data + + if (num_data .le. 0) then + write(6, '("sphdf5 : "10x" : error calling hwriteiv ; ",a," : ",i1," .le. 0")') name, num_data + else + + call h5screate_simple_f(1, onedims, space_id, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwritelv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then + write(*,*) "dataset",name,"does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then + write(*,*) "dataset",name,"exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwritelv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, merge(1, 0, data) , onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritelv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwritelv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITELV_LO + +! write integer variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1; _5 and _6 should be __FILE__ and __LINE__ +! example: hwriteiv( grpInputPhysics, 1, Igeometry, (/ Igeometry /) ) ! scalar +! example: hwriteiv( grpInputPhysics, Mvol, Lrad, Lrad(1:Mvol) ) ! rank-1 +subroutine HWRITEIV(group_id, name, num_data, data) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + integer, dimension(:), intent(in) :: data + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + integer(hid_t) :: dset_id !< default dataset ID used in macros + + onedims(1) = num_data + + if (num_data .le. 0) then + write(6,'("sphdf5 : "10x" : error calling hwriteiv ; $3 : $2.le.0 at $5:$6 ;")') + else + + call h5screate_simple_f( 1, onedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriteiv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriteiv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, data, onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriteiv at $5:$6 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! terminate dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from hwriteiv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITEIV + +! write integer variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave the dataset open for e.g. adding and attribute +! example: hwriteiv( grpInputPhysics, 1, Igeometry, (/ Igeometry /) ) ! scalar +! example: hwriteiv( grpInputPhysics, Mvol, Lrad, Lrad(1:Mvol) ) ! rank-1 +! and close it using h5descr_cdset( /input/physics/Igeometry, geometry identifier ) +subroutine HWRITEIV_LO(group_id, name, num_data, data, dset_id) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + integer, dimension(:), intent(in) :: data + integer(hid_t), intent(out) :: dset_id !< default dataset ID used in macros + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + + onedims(1) = num_data + + if (num_data .le. 0) then + write(6,'("sphdf5 : "10x" : error calling hwriteiv ; $3 : $2.le.0 at $5:$6 ;")') + else + + call h5screate_simple_f( 1, onedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriteiv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriteiv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, data, onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriteiv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriteiv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITEIV_LO + +! write real variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1; _5 and _6 should be __LINE__ and __FILE__ +! example: hwriterv( grpInputPhysics, 1, phiedge, (/ phiedge /) ) ! scalar +! example: hwriterv( grpInputPhysics, Mvol, tflux, tflux(1:Mvol) ) ! rank-1 +subroutine HWRITERV(group_id, name, num_data, data) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + real(wp), dimension(:), intent(in) :: data + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + integer(hid_t) :: dset_id !< default dataset ID used in macros + + onedims(1) = num_data + + if( num_data.le.0 ) then + write(6,'("sphdf5 : "10x" : error calling hwriterv ; $3 : $2.le.0 at $5:$6 ;")') + else + + call h5screate_simple_f( 1, onedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriterv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriterv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriterv at $5:$6 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! terminate dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from hwriterv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITERV + +! write real variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave it open, e.g. for adding an attribute; _5 and _6 should be __FILE__ and __LINE__ +! example: hwriterv( grpInputPhysics, 1, phiedge, (/ phiedge /) ) ! scalar +! example: hwriterv( grpInputPhysics, Mvol, tflux, tflux(1:Mvol) ) ! rank-1 +! and close it with h5descr_cdset( /input/physics/phiedge, total enclosed toroidal flux ) +subroutine HWRITERV_LO(group_id, name, num_data, data, dset_id) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_data + real(wp), dimension(:), intent(in) :: data + integer(hid_t), intent(out) :: dset_id !< default dataset ID used in macros + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: onedims(1) !< dimension specifier for one-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + + onedims(1) = num_data + + if( num_data.le.0 ) then + write(6,'("sphdf5 : "10x" : error calling hwriterv ; $3 : $2.le.0 at $5:$6 ;")') + else + + call h5screate_simple_f( 1, onedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriterv at $5:$6 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriterv at $5:$6 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, onedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterv at $5:$6 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriterv at $5:$6 ;" + endif + + endif +end subroutine ! HWRITERV_LO + +! write real array _5 (_2 rows, _3 columns) into a dataset named _4 into group _1; _6 and _7 should be __FILE__ and __LINE__ +! example: hwritera( grpInputPhysics, (2*Ntor+1), (2*Mpol+1), Rbc, Rbc(-Ntor:Ntor,-Mpol:Mpol) ) +subroutine HWRITERA(group_id, name, num_rows, num_cols, data) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_rows, num_cols + real(wp), dimension(:,:), intent(in) :: data + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: twodims(2) !< dimension specifier for two-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + integer(hid_t) :: dset_id !< default dataset ID used in macros + + twodims(1:2) = (/ num_rows, num_cols /) + + if(num_rows .le. 0 .or. num_cols .le. 0) then + write(6,'("sphdf5 : "10x" : error calling hwritera ; $4 : $2.le.0 .or. $3.le.0 at $6:$7 ;")') + else + + call h5screate_simple_f( 2, twodims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwritera at $6:$7 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwritera at $6:$7 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, twodims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwritera at $6:$7 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! terminate dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from hwritera at $6:$7 ;" + endif + + endif +end subroutine ! HWRITERA + +! write real array _5 (_2 rows, _3 columns) into a dataset named _4 into group _1 and leave it open, e.g. for adding an attribute; _6 and _7 should be __FILE__ and __LINE__ +! example: hwritera( grpInputPhysics, (2*Ntor+1), (2*Mpol+1), Rbc, Rbc(-Ntor:Ntor,-Mpol:Mpol) ) +! and close it then via h5descr_cdset( /input/physics/Rbc, boundary R cosine Fourier coefficients ) +subroutine HWRITERA_LO(group_id, name, num_rows, num_cols, data, dset_id) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_rows, num_cols + real(wp), dimension(:,:), intent(in) :: data + integer(hid_t), intent(out) :: dset_id !< default dataset ID used in macros + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: twodims(2) !< dimension specifier for two-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + + twodims(1:2) = (/ num_rows, num_cols /) + + if(num_rows .le. 0 .or. num_cols .le. 0) then + write(6,'("sphdf5 : "10x" : error calling hwritera ; $4 : $2.le.0 .or. $3.le.0 at $6:$7 ;")') + else + + call h5screate_simple_f( 2, twodims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwritera at $6:$7 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwritera at $6:$7 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, twodims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritera at $6:$7 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwritera at $6:$7 ;" + endif + + endif +end subroutine ! HWRITERA_LO + +! write real cube _6 (_2 rows, _3 columns, _4 pages) into a dataset named _5 into group _1; _7 and _8 should containt __FILE__ and __LINE__ +! example: hwriterc( grpOutput, (Mrad+1), 2, 2, TT, TT(0:Mrad,0:1,0:1) ) +subroutine HWRITERC(group_id, name, num_rows, num_cols, num_pages, data) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_rows, num_cols, num_pages + real(wp), dimension(:,:,:), intent(in) :: data + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: threedims(3) !< dimension specifier for three-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + integer(hid_t) :: dset_id !< default dataset ID used in macros + + threedims(1:3) = (/ num_rows, num_cols, num_pages /) + + if(num_rows.le.0 .or. num_cols.le.0 .or. num_pages.le.0 ) then + + write(6,'("sphdf5 : "10x" : error calling hwriterc ; $5 : $2.le.0 .or. $3.le.0 .or. $4.le.0 at $7:$8 ;")') + + else + + call h5screate_simple_f( 3, threedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriterc at $7:$8 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriterc at $7:$8 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, threedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriterc at $7:$8 ;" + endif + + call h5dclose_f(dset_id, hdfier) ! terminate dataset; + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dclose_f from hwriterc at $7:$8 ;" + endif + + endif +end subroutine ! HWRITERC + +! write real cube _6 (_2 rows, _3 columns, _4 pages) into a dataset named _5 into group _1 and leave open for e.g. adding an attribute; _7 and _8 should be __FILE__ and __LINE__ +! example: hwriterc( grpOutput, (Mrad+1), 2, 2, TT, TT(0:Mrad,0:1,0:1) ) +! and close it with h5descr_cdset( /output/TT, something abbreviated by TT ) +subroutine HWRITERC_LO(group_id, name, num_rows, num_cols, num_pages, data, dset_id) + integer(hid_t), intent(in) :: group_id + character(len=*), intent(in) :: name + integer, intent(in) :: num_rows, num_cols, num_pages + real(wp), dimension(:,:,:), intent(in) :: data + integer(hid_t), intent(out) :: dset_id !< default dataset ID used in macros + + integer :: hdfier !< error flag for HDF5 library + logical :: var_exists !< flags used to signal if a variable already exists + + integer(hsize_t) :: threedims(3) !< dimension specifier for three-dimensional data used in macros + integer(hid_t) :: space_id !< default dataspace ID used in macros + + threedims(1:3) = (/ num_rows, num_cols, num_pages /) + + if(num_rows.le.0 .or. num_cols.le.0 .or. num_pages.le.0 ) then + + write(6,'("sphdf5 : "10x" : error calling hwriterc ; $5 : $2.le.0 .or. $3.le.0 .or. $4.le.0 at $7:$8 ;")') + + else + + call h5screate_simple_f( 3, threedims, space_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5screate_simple_f from hwriterc at $7:$8 ;" + endif + + ! temporarily disable error printing to not confuse users + call h5eset_auto_f(0, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" + endif + + ! check if dataset can be opened + call h5dopen_f(group_id, name, dset_id, hdfier) + if (hdfier.lt.0) then + var_exists = .false. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 does not exist yet, creating it" + endif + else + var_exists = .true. + if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 exists already, opening it" + endif + endif + + ! re-establish previous state of error printing to be sensitive to "real" errors + call h5eset_auto_f(internalHdf5Msg, hdfier) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" + endif + + ! if the dataset does not exist already, create it. Otherwise, it should be open already + if (.not.var_exists) then + call h5dcreate_f(group_id, name, H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dcreate_f from hwriterc at $7:$8 ;" + endif + endif + + call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, data, threedims, hdfier ) + if( hdfier.ne.0 ) then + write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterc at $7:$8 ;")') + call MPI_ABORT( MPI_COMM_SPEC, 1 ) + stop "sphdf5 : error calling h5dwrite_f from hwriterc at $7:$8 ;" + endif + + endif +end subroutine ! HWRITERC_LO + +end module ! h5utils diff --git a/src/hesian.f90 b/src/hesian.F90 similarity index 77% rename from src/hesian.f90 rename to src/hesian.F90 index d4fb9e33..2a1126b3 100644 --- a/src/hesian.f90 +++ b/src/hesian.F90 @@ -10,7 +10,7 @@ !> @param[in] mn number of Fourier harmonics !> @param[in] LGdof what is this? subroutine hesian( NGdof, position, Mvol, mn, LGdof ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, ten @@ -36,59 +36,75 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) hessian2D,dessian2D,Lhessian2Dallocated, & Lhessian3Dallocated,denergydrr, denergydrz,denergydzr,denergydzz, & LocalConstraint - + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: NGdof, Mvol, mn, LGdof - REAL :: position(0:NGdof) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: NGdof, Mvol, mn, LGdof + real(wp) :: position(0:NGdof) LOGICAL :: LComputeDerivatives, LComputeAxis - REAL :: force(0:NGdof), gradient(0:NGdof) + real(wp) :: force(0:NGdof), gradient(0:NGdof) - REAL :: xx(0:NGdof,-2:2), ff(0:NGdof,-2:2), df(1:NGdof)!, deriv + real(wp) :: xx(0:NGdof,-2:2), ff(0:NGdof,-2:2), df(1:NGdof)!, deriv - INTEGER :: vvol, idof, ii, mi, ni, irz, issym, isymdiff, lvol, ieval(1:1), igdof, ifd - REAL :: oldEnergy(-2:2), error, cpul + integer :: vvol, idof, ii, mi, ni, irz, issym, isymdiff, lvol, ieval(1:1), igdof, ifd + real(wp) :: oldEnergy(-2:2), error, cpul - REAL :: oldBB(1:Mvol,-2:2), oBBdRZ(1:Mvol,0:1,1:LGdof), ohessian(1:NGdof,1:NGdof) + real(wp) :: oldBB(1:Mvol,-2:2), oBBdRZ(1:Mvol,0:1,1:LGdof), ohessian(1:NGdof,1:NGdof) - REAL :: oRbc(1:mn,0:Mvol), oZbs(1:mn,0:Mvol), oRbs(1:mn,0:Mvol), oZbc(1:mn,0:Mvol), determinant + real(wp) :: oRbc(1:mn,0:Mvol), oZbs(1:mn,0:Mvol), oRbs(1:mn,0:Mvol), oZbc(1:mn,0:Mvol), determinant - CHARACTER :: pack + character :: pack - CHARACTER :: svol*3 + character :: svol*3 ! LOGICAL :: Lderiv ! for parallel / series construction of Hessian; ! INTEGER :: lvol, jvol, ivol, innout, imn, irz, jmn, jrz, tdoc, tdof, ilocaldof, jlocaldof ! for parallel / series construction of Hessian; - INTEGER :: tdof, tdoc, jvol, jj, jrz, jssym + integer :: tdof, tdoc, jvol, jj, jrz, jssym - INTEGER :: Lwork, LDA, Ldvi, Ldvr, if02ebf - REAL :: evalr(1:NGdof), evali(1:NGdof) + integer :: Lwork, LDA, Ldvi, Ldvr, if02ebf + real(wp) :: evalr(1:NGdof), evali(1:NGdof) ! REAL :: evecr(1:NGdof,1:NGdof), eveci(1:NGdof,1:NGdof) - REAL :: evecr(1:NGdof,1:NGdof), eveci(1:NGdof,1:NGdof),revecr(1:NGdof,1:2*NGdof), evecl(1:NGdof,1:NGdof) - REAL :: work(1:4*NGdof) ! for construction of evalues/evectors; - CHARACTER :: JOB + real(wp) :: evecr(1:NGdof,1:NGdof), eveci(1:NGdof,1:NGdof),revecr(1:NGdof,1:2*NGdof), evecl(1:NGdof,1:NGdof) + real(wp) :: work(1:4*NGdof) ! for construction of evalues/evectors; + character :: JOB - INTEGER :: iev, jev, M1, M2, irank(1:NGdof), im01daf ! for construction of evalues/evectors; - CHARACTER :: order + integer :: iev, jev, M1, M2, irank(1:NGdof), im01daf ! for construction of evalues/evectors; + character :: order - REAL :: dRZ != 1.0e-03 + real(wp) :: dRZ != 1.0e-03 - REAL :: lmu(1:Mvol), lpflux(1:Mvol), lhelicity(1:Mvol) ! original profiles; 20 Jun 14; + real(wp) :: lmu(1:Mvol), lpflux(1:Mvol), lhelicity(1:Mvol) ! original profiles; 20 Jun 14; - INTEGER :: IA - INTEGER :: idgesvx, idgetrf, ipiv(1:Ngdof), iwork4(1:NGdof) - CHARACTER :: equed - REAL :: perturbation(1:LGdof) - REAL :: rhs(1:NGdof), solution(0:NGdof) - REAL :: rworka(1:NGdof), rworkb(1:NGdof), AA(1:NGdof,1:NGdof) - REAL :: Rdgesvx(1:NGdof), Cdgesvx(1:NGdof), AF(1:NGdof,1:NGdof), work4(1:4*NGdof), rcond, ferr, berr, sgn + integer :: IA + integer :: idgesvx, idgetrf, ipiv(1:Ngdof), iwork4(1:NGdof) + character :: equed + real(wp) :: perturbation(1:LGdof) + real(wp) :: rhs(1:NGdof), solution(0:NGdof) + real(wp) :: rworka(1:NGdof), rworkb(1:NGdof), AA(1:NGdof,1:NGdof) + real(wp) :: Rdgesvx(1:NGdof), Cdgesvx(1:NGdof), AF(1:NGdof,1:NGdof), work4(1:4*NGdof), rcond, ferr, berr, sgn + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(hesian) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -105,7 +121,13 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) oldBB(1:Mvol,0) = lBBintegral(1:Mvol) - FATAL( hesian, .not.allocated(dBBdRZ), need to revise logic in preset where dBBdRZ is allocated ) + + if( .not.allocated(dBBdRZ) ) then + write(6,'("hesian : fatal : myid=",i3," ; .not.allocated(dBBdRZ) ; need to revise logic in preset where dBBdRZ is allocated ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : .not.allocated(dBBdRZ) : need to revise logic in preset where dBBdRZ is allocated ;" + endif + oBBdRZ(1:Mvol,0:1,1:LGdof) = dBBdRZ(1:Mvol,0:1,1:LGdof) @@ -114,7 +136,13 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) oRbs(1:mn,0:Mvol) = iRbs(1:mn,0:Mvol) oZbc(1:mn,0:Mvol) = iZbc(1:mn,0:Mvol) - FATAL( hesian, Lfreebound.eq.1, this routine needs attention ) + + if( Lfreebound.eq.1 ) then + write(6,'("hesian : fatal : myid=",i3," ; Lfreebound.eq.1 ; this routine needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : Lfreebound.eq.1 : this routine needs attention ;" + endif + do vvol = 1, Mvol-1 ! loop over volumes; 26 Feb 13; @@ -159,11 +187,21 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) pack = 'P' !; position(0) = zero ! this is not used; 11 Aug 14; LComputeAxis = .true. LComputeDerivatives = .false. !; position(0) = zero ! this is not used; 11 Aug 14; - WCALL( hesian, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) ) + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() - WCALL( hesian, dforce, ( NGdof, position(0:NGdof), gradient(0:NGdof), LComputeDerivatives, LComputeAxis ) ) ! re-calculate Beltrami fields; + + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), gradient(0:NGdof), LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + ! re-calculate Beltrami fields; oldBB(1:Mvol,isymdiff) = lBBintegral(1:Mvol) @@ -175,14 +213,20 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) oldEnergy(0) = ( - 1 * oldEnergy(2) + 8 * oldEnergy(1) - 8 * oldEnergy(-1) + 1 * oldEnergy(-2) ) / ( 12 * dRZ ) - cput = GETTIME + cput = MPI_WTIME() write(ounit,1000) cput-cpus !12345678901234567 write(ounit,1000) cput-cpus, myid, vvol, irz, mi, ni, "finite-difference", oldBB(vvol:vvol+1,0) write(ounit,1000) cput-cpus, myid, vvol, irz, mi, ni, "analytic ", (/ oBBdRZ(vvol,1,idof), oBBdRZ(vvol+1,0,idof) /) / psifactor(ii,vvol) write(ounit,1001) cput-cpus, myid, vvol, irz, mi, ni, oldEnergy(0) write(ounit,1001) cput-cpus, myid, vvol, irz, mi, ni, ( oBBdRZ(vvol,1,idof) + oBBdRZ(vvol+1,0,idof) ) / psifactor(ii,vvol) ! ENERGY GRADIENT; - FATAL( hesian, Igeometry.eq.1, Cartesian geometry does not need regularization factor ) + + if( Igeometry.eq.1 ) then + write(6,'("hesian : fatal : myid=",i3," ; Igeometry.eq.1 ; Cartesian geometry does not need regularization factor ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : Igeometry.eq.1 : Cartesian geometry does not need regularization factor ;" + endif + 1000 format("hesian : ",f10.2," : ":"myid=",i3," ; ":"vvol=",i3," ; ":"irz="i2" ; (",i3," ,",i3," ) ; "a17" ["es15.7","es15.7" ]") 1001 format("hesian : ",f10.2," : ":"myid=",i3," ; ":"vvol=",i3," ; ":"irz="i2" ; (",i3," ,",i3," ) ; "es15.7" ; ") @@ -201,30 +245,59 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) iZbc(1:mn,0:Mvol) = oZbc(1:mn,0:Mvol) pack = 'P' !; position(0) = zero ! this is not used; 11 Aug 14; - WCALL( hesian, packxi,( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) ) + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - if(LHmatrix .and. Igeometry.eq.2) then - SALLOCATE( HdFFdRZ , (1:LGdof,0:1,1:LGdof,0:1,1:Mvol), zero ) + if(LHmatrix .and. Igeometry.eq.2) then + + allocate( HdFFdRZ (1:LGdof,0:1,1:LGdof,0:1,1:Mvol), stat=astat ) + HdFFdRZ (1:LGdof,0:1,1:LGdof,0:1,1:Mvol) = zero + endif - SALLOCATE( dBBdmp , (1:LGdof,1:Mvol,0:1, 1:2), zero ) - - SALLOCATE( denergydrr, (1:LGdof,1:Mvol,0:1,1:LGdof,0:1), zero) + + allocate( dBBdmp (1:LGdof,1:Mvol,0:1, 1:2), stat=astat ) + dBBdmp (1:LGdof,1:Mvol,0:1, 1:2) = zero + + + + allocate( denergydrr(1:LGdof,1:Mvol,0:1,1:LGdof,0:1), stat=astat ) + denergydrr(1:LGdof,1:Mvol,0:1,1:LGdof,0:1) = zero + !SALLOCATE( denergydrz, (1:LGdof,1:Mvol,0:1,1:LGdof,0:1), zero) - SALLOCATE( denergydzr, (1:LGdof,1:Mvol,0:1,1:LGdof,0:1), zero) + + allocate( denergydzr(1:LGdof,1:Mvol,0:1,1:LGdof,0:1), stat=astat ) + denergydzr(1:LGdof,1:Mvol,0:1,1:LGdof,0:1) = zero + !SALLOCATE( denergydzz, (1:LGdof,1:Mvol,0:1,1:LGdof,0:1), zero) if( LocalConstraint ) then - SALLOCATE( dmupfdx, (1:Mvol, 1:1, 1:2, 1:LGdof, 0:1), zero ) + + allocate( dmupfdx(1:Mvol, 1:1, 1:2, 1:LGdof, 0:1), stat=astat ) + dmupfdx(1:Mvol, 1:1, 1:2, 1:LGdof, 0:1) = zero + else - SALLOCATE( dmupfdx, (1:Mvol, 1:Mvol-1, 1:2, 1:LGdof, 0:1), zero) + + allocate( dmupfdx(1:Mvol, 1:Mvol-1, 1:2, 1:LGdof, 0:1), stat=astat ) + dmupfdx(1:Mvol, 1:Mvol-1, 1:2, 1:LGdof, 0:1) = zero + endif - SALLOCATE( hessian2D, (1:NGdof,1:NGdof), zero ) - SALLOCATE( dessian2D, (1:NGdof,1:LGdof), zero ) ! part of hessian that depends on boundary variations; 18 Dec 14; + + allocate( hessian2D(1:NGdof,1:NGdof), stat=astat ) + hessian2D(1:NGdof,1:NGdof) = zero + + + allocate( dessian2D(1:NGdof,1:LGdof), stat=astat ) + dessian2D(1:NGdof,1:LGdof) = zero + ! part of hessian that depends on boundary variations; 18 Dec 14; !if (LHmatrix) then Lhessian3Dallocated = .true. @@ -235,8 +308,13 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) LComputeDerivatives = .true. !; position(0) = zero ! this is not used; 11 Aug 14; LComputeAxis = .false. - WCALL( hesian, dforce, ( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis) ) ! calculate force-imbalance & hessian; - + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis) + cpuo = MPI_WTIME() + ! calculate force-imbalance & hessian; + ohessian(1:NGdof,1:NGdof) = hessian2D(1:NGdof,1:NGdof) ! internal copy; 22 Apr 15; @@ -268,7 +346,7 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) do vvol = 1, Mvol-1 ! loop over internal interfaces; - cput = GETTIME + cput = MPI_WTIME() ! write(lunit+myid,'("hesian : ", 10x ," : ")') ! write(lunit+myid,'("hesian : ",f10.2," : myid=",i3," ; vvol=",i3," ; dRZ=",es9.1," ;")') cput-cpus, myid, vvol, dRZ ! write(lunit+myid,'("hesian : ", 10x ," : ")') @@ -298,7 +376,12 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) LComputeDerivatives = .false. LComputeAxis = .true. - WCALL( hesian, dforce, ( NGdof, xx(0:NGdof,isymdiff), ff(0:NGdof,isymdiff), LComputeDerivatives, LComputeAxis) ) ! force-imbalance; + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call dforce( NGdof, xx(0:NGdof,isymdiff), ff(0:NGdof,isymdiff), LComputeDerivatives, LComputeAxis) + cpuo = MPI_WTIME() + ! force-imbalance; enddo ! end of do isymdiff; 20 Jun 14; @@ -323,10 +406,22 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) tdoc = tdoc + 1 - FATAL( hesian, tdoc.lt. 1, needs attention ) - FATAL( hesian, tdoc.gt.NGdof, needs attention ) - cput = GETTIME + if( tdoc.lt. 1 ) then + write(6,'("hesian : fatal : myid=",i3," ; tdoc.lt. 1 ; needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : tdoc.lt. 1 : needs attention ;" + endif + + + if( tdoc.gt.NGdof ) then + write(6,'("hesian : fatal : myid=",i3," ; tdoc.gt.NGdof ; needs attention ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : tdoc.gt.NGdof : needs attention ;" + endif + + + cput = MPI_WTIME() error = abs( df(tdoc)-hessian(tdoc,tdof) ) @@ -378,7 +473,12 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) enddo ! end of do vvol; pack = 'U' !; position(0) = zero ! this is not used; 11 Aug 14; - WCALL( hesian, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) ) + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) + cpuo = MPI_WTIME() + mu(1:Nvol) = lmu(1:Nvol) ; pflux(1:Nvol) = lpflux(1:Nvol) ; helicity(1:Nvol) = lhelicity(1:Nvol) @@ -418,7 +518,7 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) if( LHmatrix ) then - if( myid.eq.0 ) then ; cput = GETTIME ; write(ounit,'("hesian : ",f10.2," : LHmatrix="L2" ;")')cput-cpus, LHmatrix ; + if( myid.eq.0 ) then ; cput = MPI_WTIME() ; write(ounit,'("hesian : ",f10.2," : LHmatrix="L2" ;")')cput-cpus, LHmatrix ; open(munit, file="."//trim(ext)//".GF.ma", status="unknown", form="unformatted") write(munit) NGdof write(munit) ohessian(1:NGdof,1:NGdof) @@ -431,7 +531,7 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) ! if( myid.eq.0 .and. ( LHevalues .or. LHevectors ) ) then ! the call to dforce below requires all cpus; 04 Dec 14; if( ( LHevalues .or. LHevectors ) ) then - if( myid.eq.0 ) then ; cput = GETTIME ; write(ounit,'("hesian : ",f10.2," : LHevalues="L2" , LHevectors="L2" ;")')cput-cpus, LHevalues, LHevectors + if( myid.eq.0 ) then ; cput = MPI_WTIME() ; write(ounit,'("hesian : ",f10.2," : LHevalues="L2" , LHevectors="L2" ;")')cput-cpus, LHevalues, LHevectors endif evalr(1:NGdof) = zero ; evecr(1:NGdof,1:NGdof) = zero @@ -441,10 +541,10 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) else ; JOB='N' ; Ldvr = 1 ; Ldvi = 1 ! provide dummy values when eigenvectors are not required; 04 Dec 14; endif - cpul = GETTIME + cpul = MPI_WTIME() if02ebf = 1 ; LDA = NGdof ; Lwork = 4*NGdof - + hessian2D(1:NGdof,1:NGdof) = ohessian(1:NGdof,1:NGdof) !#ifdef NAG18 @@ -459,7 +559,7 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) eveci(1:Ldvr,1:NGdof) = revecr(1:Ldvr,NGdof+1:2*NGdof) if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() if (if02ebf < 0) then write(ounit,'("hesian : ",f10.2," : DGEEV error the "i2" th argument had illegal value ; time="f10.2"s ;")') cput-cpus, -if02ebf, cput-cpul else if (if02ebf > 0) then @@ -593,7 +693,13 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) perturbation(1:LGdof) = zero - FATAL( hesian, Igeometry.gt.2 .or. NOTstellsym, only for stellarator-symmetric cylindrical ) + + if( Igeometry.gt.2 .or. NOTstellsym ) then + write(6,'("hesian : fatal : myid=",i3," ; Igeometry.gt.2 .or. NOTstellsym ; only for stellarator-symmetric cylindrical ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : Igeometry.gt.2 .or. NOTstellsym : only for stellarator-symmetric cylindrical ;" + endif + do ii = 1, mn if( im(ii).eq.dqq .and. in(ii).eq.dpp ) perturbation(ii) = one ! impose arbitrary perturbation; 18 Dec 14; @@ -608,11 +714,11 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) else rhs(1:NGdof) = - matmul( dessian(1:NGdof,1:LGdof), perturbation(1:LGdof) ) - + hessian2D(1:NGdof,1:NGdof) = ohessian(1:NGdof,1:NGdof) call dgesvx( 'N', 'N', NGdof, 1, hessian2D(1:NGdof,1:NGdof), NGdof, AF(1:NGdof,1:NGdof), & ! Linear solver; 09 Nov 17; - NGdof, ipiv(1:NGdof), equed, Rdgesvx(1:NGdof), Cdgesvx(1:NGdof), & + NGdof, ipiv(1:NGdof), equed, Rdgesvx(1:NGdof), Cdgesvx(1:NGdof), & rhs(1:NGdof), NGdof, solution(1:NGdof), NGdof, rcond, ferr, berr, & work4(1:4*NGdof), iwork4(1:NGdof), idgesvx ) @@ -620,11 +726,22 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) case( 0 ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; linear perturbation ; idgesvx="i3" ;")') myid, idgesvx case( 1: ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; singular matrix ; idgesvx="i3" ;")') myid, idgesvx case( :-1 ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; input error ; idgesvx="i3" ;")') myid, idgesvx - case default ; FATAL( hesian, .true., illegal ifail returned from dgesvx ) + case default ; + if( .true. ) then + write(6,'("hesian : fatal : myid=",i3," ; .true. ; illegal ifail returned from dgesvx ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : .true. : illegal ifail returned from dgesvx ;" + endif + end select pack = 'U' ! unpack geometrical degrees-of-freedom; 13 Sep 13; - WCALL( hesian, packxi, ( NGdof, solution(0:NGdof), Mvol, mn, dRbc(1:mn,0:Mvol), dZbs(1:mn,0:Mvol), dRbs(1:mn,0:Mvol), dZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) ) + + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + call packxi( NGdof, solution(0:NGdof), Mvol, mn, dRbc(1:mn,0:Mvol), dZbs(1:mn,0:Mvol), dRbs(1:mn,0:Mvol), dZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) + cpuo = MPI_WTIME() + dRbc(1:mn,Mvol) = perturbation(1:LGdof) @@ -650,11 +767,11 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( myid.eq.0 ) then - + hessian2D(1:NGdof,1:NGdof) = ohessian(1:NGdof,1:NGdof) - + call dgetrf( NGdof, NGdof, hessian2D(1:NGdof,1:NGdof), NGdof, ipiv(1:NGdof), idgetrf ) - + determinant = one do iev = 1,NGdof @@ -675,34 +792,59 @@ subroutine hesian( NGdof, position, Mvol, mn, LGdof ) case( 0 ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; idgetrf="i3" ; ; determinant="es13.5" ;")') myid, idgetrf, determinant case( 1: ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; idgetrf="i3" ; singular ; determinant="es13.5" ;")') myid, idgetrf, determinant case( :-1 ) ; write(ounit,'("hesian : " 10x " : myid="i3" ; idgetrf="i3" ; input error ; determinant="es13.5" ;")') myid, idgetrf, determinant - case default ; FATAL( hesian, .true., illegal ifail returned from dgetrf ) + case default ; + if( .true. ) then + write(6,'("hesian : fatal : myid=",i3," ; .true. ; illegal ifail returned from dgetrf ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "hesian : .true. : illegal ifail returned from dgetrf ;" + endif + end select endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - if(LHmatrix .and. Igeometry.eq.2) then - DALLOCATE(HdFFdRZ) + if(LHmatrix .and. Igeometry.eq.2) then + + deallocate(HdFFdRZ,stat=astat) + endif - - DALLOCATE(dBBdmp) - DALLOCATE(dmupfdx) - DALLOCATE(denergydrr) + + + deallocate(dBBdmp,stat=astat) + + + deallocate(dmupfdx,stat=astat) + + + deallocate(denergydrr,stat=astat) + !DALLOCATE(denergydrz) - DALLOCATE(denergydzr) + + deallocate(denergydzr,stat=astat) + !DALLOCATE(denergydzz) Lhessian3Dallocated=.false. - DALLOCATE(hessian2D) + + deallocate(hessian2D,stat=astat) + write(ounit,*) 5656 - DALLOCATE(dessian2D) + + deallocate(dessian2D,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(hesian) + +9999 continue + cput = MPI_WTIME() + Thesian = Thesian + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/inputlist.f90 b/src/inputlist.F90 similarity index 84% rename from src/inputlist.f90 rename to src/inputlist.F90 index 30c0b04b..d5b0d54a 100644 --- a/src/inputlist.f90 +++ b/src/inputlist.F90 @@ -5,42 +5,42 @@ !> \addtogroup grp_global !> @{ module inputlist - + use mod_kinds, only: wp => dp implicit none ! The following parameters set the maximum allowed resolution: - INTEGER, parameter :: MNvol = 256 !< The maximum value of \c Nvol is \c MNvol=256. - INTEGER, parameter :: MMpol = 128 !< The maximum value of \c Mpol is \c MNpol=64. - INTEGER, parameter :: MNtor = 128 !< The maximum value of \c Ntor is \c MNtor=64. + integer, parameter :: MNvol = 256 !< The maximum value of \c Nvol is \c MNvol=256. + integer, parameter :: MMpol = 128 !< The maximum value of \c Mpol is \c MNpol=64. + integer, parameter :: MNtor = 128 !< The maximum value of \c Ntor is \c MNtor=64. !> \addtogroup grp_global_physicslist physicslist !> \brief The namelist \c physicslist controls the geometry, profiles, and numerical resolution. !> @{ ! note that all variables in namelist need to be broadcasted in readin - INTEGER :: Igeometry = 3 !< selects Cartesian, cylindrical or toroidal geometry; + integer :: Igeometry = 3 !< selects Cartesian, cylindrical or toroidal geometry; !<
      !<
    • \c Igeometry=1 : Cartesian; geometry determined by \f$R\f$;
    • !<
    • \c Igeometry=2 : cylindrical; geometry determined by \f$R\f$;
    • !<
    • \c Igeometry=3 : toroidal; geometry determined by \f$R\f$ *and* \f$Z\f$;
    • !<
    - INTEGER :: Istellsym = 1 !< stellarator symmetry is enforced if \c Istellsym==1 - INTEGER :: Lfreebound = 0 !< compute vacuum field surrounding plasma - REAL :: phiedge = 1.0 !< total enclosed toroidal magnetic flux; - REAL :: curtor = 0.0 !< total enclosed (toroidal) plasma current; - REAL :: curpol = 0.0 !< total enclosed (poloidal) linking current; - REAL :: gamma = 0.0 !< adiabatic index; cannot set \f$|\gamma| = 1\f$ - INTEGER :: Nfp = 1 !< field periodicity + integer :: Istellsym = 1 !< stellarator symmetry is enforced if \c Istellsym==1 + integer :: Lfreebound = 0 !< compute vacuum field surrounding plasma + real(wp) :: phiedge = 1.0 !< total enclosed toroidal magnetic flux; + real(wp) :: curtor = 0.0 !< total enclosed (toroidal) plasma current; + real(wp) :: curpol = 0.0 !< total enclosed (poloidal) linking current; + real(wp) :: gamma = 0.0 !< adiabatic index; cannot set \f$|\gamma| = 1\f$ + integer :: Nfp = 1 !< field periodicity !<
      !<
    • all Fourier representations are of the form \f$\cos(m\theta-n N \zeta)\f$, \f$\sin(m\theta-n N \zeta)\f$, where \f$N\equiv\f$\c Nfp
    • !<
    • constraint: \c Nfp >= 1
    • !<
    - INTEGER :: Nvol = 1 !< number of volumes + integer :: Nvol = 1 !< number of volumes !<
      !<
    • each volume \f${\cal V}_l\f$ is bounded by the \f${\cal I}_{l-1}\f$ and \f${\cal I}_{l}\f$ interfaces
    • !<
    • note that in cylindrical or toroidal geometry, \f${\cal I}_{0}\f$ is the degenerate coordinate axis
    • !<
    • constraint: \c Nvol<=MNvol
    • !<
    - INTEGER :: Mpol = 0 !< number of poloidal Fourier harmonics + integer :: Mpol = 0 !< number of poloidal Fourier harmonics !<
      !<
    • all Fourier representations of doubly-periodic functions are of the form !< \f{eqnarray}{ f(\theta,\zeta) & = & \sum_{n=0}^{\texttt{Ntor}} f_{0,n}\cos(-n \, \texttt{Nfp} \, \zeta) @@ -49,7 +49,7 @@ module inputlist !< Internally these "double" summations are written as a "single" summation, !< e.g. \f$f(\theta,\zeta) = \sum_j f_j \cos(m_j\theta-n_j\zeta)\f$.
    • !<
    - INTEGER :: Ntor = 0 !< number of toroidal Fourier harmonics + integer :: Ntor = 0 !< number of toroidal Fourier harmonics !<
      !<
    • all Fourier representations of doubly-periodic functions are of the form !< \f{eqnarray}{ f(\theta,\zeta) & = & \sum_{n=0}^{\texttt{Ntor}} f_{0,n}\cos(-n \, \texttt{Nfp} \, \zeta) @@ -58,11 +58,11 @@ module inputlist !< Internally these "double" summations are written as a "single" summation, !< e.g. \f$f(\theta,\zeta) = \sum_j f_j \cos(m_j\theta-n_j\zeta)\f$.
    • !<
    - INTEGER :: Lrad(1:MNvol+1) = 4 !< Chebyshev resolution in each volume + integer :: Lrad(1:MNvol+1) = 4 !< Chebyshev resolution in each volume !<
      !<
    • constraint : \c Lrad(1:Mvol) >= 2
    • !<
    - INTEGER :: Lconstraint = -1 !< selects constraints; primarily used in ma02aa() and mp00ac(). + integer :: Lconstraint = -1 !< selects constraints; primarily used in ma02aa() and mp00ac(). !<
      !<
    • if \c Lconstraint==-1, then in the plasma regions \f$\Delta\psi_t\f$, \f$\mu\f$ and \f$\Delta \psi_p\f$ are *not* varied !< and in the vacuum region (only for free-boundary) \f$\Delta\psi_t\f$ and \f$\Delta \psi_p\f$ are *not* varied, and \f$\mu = 0\f$.
    • @@ -83,7 +83,7 @@ module inputlist !< (excepted in the inner most volume, where the volume current is irrelevant). !< Not implemented yet in free boundary. !<
    - REAL :: tflux(1:MNvol+1) = 0.0 !< toroidal flux, \f$\psi_t\f$, enclosed by each interface + real(wp) :: tflux(1:MNvol+1) = 0.0 !< toroidal flux, \f$\psi_t\f$, enclosed by each interface !<
      !<
    • For each of the plasma volumes, this is a constraint: \c tflux is *not* varied
    • !<
    • For the vacuum region (only if \c Lfreebound==1), \c tflux may be allowed to vary to match constraints
    • @@ -91,16 +91,16 @@ module inputlist !< so that \c tflux is arbitrary up to a scale factor !<
    • \sa phiedge
    • !<
    - REAL :: pflux(1:MNvol+1) = 0.0 !< poloidal flux, \f$\psi_p\f$, enclosed by each interface - REAL :: helicity(1:MNvol) = 0.0 !< helicity, \f${\cal K}\f$, in each volume, \f${\cal V}_i\f$ + real(wp) :: pflux(1:MNvol+1) = 0.0 !< poloidal flux, \f$\psi_p\f$, enclosed by each interface + real(wp) :: helicity(1:MNvol) = 0.0 !< helicity, \f${\cal K}\f$, in each volume, \f${\cal V}_i\f$ !<
      !<
    • on exit, \c helicity is set to the computed values of \f${\cal K} \equiv \int {\bf A}\cdot{\bf B}\;dv\f$
    • !<
    - REAL :: pscale = 0.0 !< pressure scale factor + real(wp) :: pscale = 0.0 !< pressure scale factor !<
      !<
    • the initial pressure profile is given by \c pscale \f$*\f$ \c pressure
    • !<
    - REAL :: pressure(1:MNvol+1) = 0.0 !< pressure in each volume + real(wp) :: pressure(1:MNvol+1) = 0.0 !< pressure in each volume !<
      !<
    • The pressure is *not* held constant, but \f$p_l V_l^\gamma = P_l\f$ *is* held constant, !< where \f$P_l\f$ is determined by the initial pressures and the initial volumes, \f$V_l\f$.
    • @@ -108,108 +108,108 @@ module inputlist !<
    • On output, the pressure is given by \f$p_l = P_l/V_l^\gamma\f$, where \f$V_l\f$ is the final volume.
    • !<
    • \c pressure is only used in calculation of interface force-balance.
    • !<
    - INTEGER :: Ladiabatic = 0 !< logical flag + integer :: Ladiabatic = 0 !< logical flag !<
      !<
    • If \c Ladiabatic==0, the adiabatic constants are determined by the initial pressure and volume.
    • !<
    • If \c Ladiabatic==1, the adiabatic constants are determined by the given input \c adiabatic.
    • !<
    - REAL :: adiabatic(1:MNvol+1) = 0.0 !< adiabatic constants in each volume + real(wp) :: adiabatic(1:MNvol+1) = 0.0 !< adiabatic constants in each volume !<
      !<
    • The pressure is *not* held constant, but \f$p_l V_l^\gamma = P_l \equiv\f$\c adiabatic is constant.
    • !<
    • Note that if \c gamma==0.0, then \c pressure==adiabatic.
    • !<
    • \c pressure is only used in calculation of interface force-balance.
    • !<
    - REAL :: mu(1:MNvol+1) = 0.0 !< helicity-multiplier, \f$\mu\f$, in each volume - REAL :: Ivolume(1:MNvol+1) = 0.0 !< Toroidal current constraint normalized by \f$\mu_0\f$ (\f$I_{volume} = \mu_0\cdot [A]\f$), in each volume. + real(wp) :: mu(1:MNvol+1) = 0.0 !< helicity-multiplier, \f$\mu\f$, in each volume + real(wp) :: Ivolume(1:MNvol+1) = 0.0 !< Toroidal current constraint normalized by \f$\mu_0\f$ (\f$I_{volume} = \mu_0\cdot [A]\f$), in each volume. !< This is a cumulative quantity: \f$I_{\mathcal{V},i} = \int_0^{\psi_{t,i}} \mathbf{J}\cdot\mathbf{dS}\f$. !< Physically, it represents the sum of all non-pressure driven currents. - REAL :: Isurf(1:MNvol) = 0.0 !< Toroidal current normalized by \f$\mu_0\f$ at each interface (cumulative). This is the sum of all pressure driven currents. - INTEGER :: pl(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + real(wp) :: Isurf(1:MNvol) = 0.0 !< Toroidal current normalized by \f$\mu_0\f$ at each interface (cumulative). This is the sum of all pressure driven currents. + integer :: pl(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2 \f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (inside) interface rotational-transform is defined by \c iota . - INTEGER :: ql(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: ql(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2 \f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (inside) interface rotational-transform is defined by \c iota . - INTEGER :: pr(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: pr(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2 \f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (inside) interface rotational-transform is defined by \c iota . - INTEGER :: qr(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: qr(0:MNvol) = 0 !< "inside" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2 \f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (inside) interface rotational-transform is defined by \c iota . - REAL :: iota(0:MNvol) = 0.0 !< rotational-transform, \f$\mbox{$\,\iota\!\!$-}\f$, on inner side of each interface + real(wp) :: iota(0:MNvol) = 0.0 !< rotational-transform, \f$\mbox{$\,\iota\!\!$-}\f$, on inner side of each interface !<
      !<
    • only relevant if illogical input for \c ql and \c qr are provided
    • !<
    - INTEGER :: lp(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: lp(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2\f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (outer) interface rotational-transform is defined by \c oita . - INTEGER :: lq(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: lq(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2\f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (outer) interface rotational-transform is defined by \c oita . - INTEGER :: rp(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: rp(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2\f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (outer) interface rotational-transform is defined by \c oita . - INTEGER :: rq(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, + integer :: rq(0:MNvol) = 0 !< "outer" interface rotational-transform is \f$\mbox{$\,\iota\!\!$-} = (p_l+\gamma p_r)/(q_l+\gamma q_r)\f$, !< where \f$\gamma\f$ is the golden mean, \f$\gamma = (1 + \sqrt 5 ) / 2\f$. !< !< If both \f$q_l = 0\f$ *and* \f$q_r = 0\f$, then the (outer) interface rotational-transform is defined by \c oita . - REAL :: oita(0:MNvol) = 0.0 !< rotational-transform, \f$\mbox{$\,\iota\!\!$-}\f$, on outer side of each interface + real(wp) :: oita(0:MNvol) = 0.0 !< rotational-transform, \f$\mbox{$\,\iota\!\!$-}\f$, on outer side of each interface !<
      !<
    • only relevant if illogical input for \c ql and \c qr are provided
    • !<
    - REAL :: mupftol = 1.0e-14 !< accuracy to which \f$\mu\f$ and \f$\Delta\psi_p\f$ are required + real(wp) :: mupftol = 1.0e-14 !< accuracy to which \f$\mu\f$ and \f$\Delta\psi_p\f$ are required !<
      !<
    • only relevant if constraints on transform, enclosed currents etc. are to be satisfied iteratively, see \c Lconstraint
    • !<
    - INTEGER :: mupfits = 8 !< an upper limit on the transform/helicity constraint iterations; + integer :: mupfits = 8 !< an upper limit on the transform/helicity constraint iterations; !<
      !<
    • only relevant if constraints on transform, enclosed currents etc. are to be satisfied iteratively, see \c Lconstraint
    • !<
    • constraint: \c mupfits > 0
    • !<
    - REAL :: rpol = 1.0 !< poloidal extent of slab (effective radius) + real(wp) :: rpol = 1.0 !< poloidal extent of slab (effective radius) !<
      !<
    • only relevant if \c Igeometry==1
    • !<
    • poloidal size is \f$L = 2\pi*\f$\c rpol
    • !<
    - REAL :: rtor = 1.0 !< toroidal extent of slab (effective radius) + real(wp) :: rtor = 1.0 !< toroidal extent of slab (effective radius) !<
      !<
    • only relevant if \c Igeometry==1
    • !<
    • toroidal size is \f$L = 2\pi*\f$\c rtor
    • !<
    - INTEGER :: Lreflect = 0 !< =1 reflect the upper and lower bound in slab, =0 do not reflect - - REAL :: Rac( 0:MNtor ) = 0.0 !< stellarator symmetric coordinate axis; - REAL :: Zas( 0:MNtor ) = 0.0 !< stellarator symmetric coordinate axis; - REAL :: Ras( 0:MNtor ) = 0.0 !< non-stellarator symmetric coordinate axis; - REAL :: Zac( 0:MNtor ) = 0.0 !< non-stellarator symmetric coordinate axis; - - REAL :: Rbc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components; - REAL :: Zbs(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components; - REAL :: Rbs(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components; - REAL :: Zbc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components; - - REAL :: Rwc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components of wall; - REAL :: Zws(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components of wall; - REAL :: Rws(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components of wall; - REAL :: Zwc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components of wall; - - REAL :: Vns(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric normal field at boundary; vacuum component; - REAL :: Bns(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric normal field at boundary; plasma component; - REAL :: Vnc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric normal field at boundary; vacuum component; - REAL :: Bnc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric normal field at boundary; plasma component; + integer :: Lreflect = 0 !< =1 reflect the upper and lower bound in slab, =0 do not reflect + + real(wp) :: Rac( 0:MNtor ) = 0.0 !< stellarator symmetric coordinate axis; + real(wp) :: Zas( 0:MNtor ) = 0.0 !< stellarator symmetric coordinate axis; + real(wp) :: Ras( 0:MNtor ) = 0.0 !< non-stellarator symmetric coordinate axis; + real(wp) :: Zac( 0:MNtor ) = 0.0 !< non-stellarator symmetric coordinate axis; + + real(wp) :: Rbc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components; + real(wp) :: Zbs(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components; + real(wp) :: Rbs(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components; + real(wp) :: Zbc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components; + + real(wp) :: Rwc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components of wall; + real(wp) :: Zws(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric boundary components of wall; + real(wp) :: Rws(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components of wall; + real(wp) :: Zwc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric boundary components of wall; + + real(wp) :: Vns(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric normal field at boundary; vacuum component; + real(wp) :: Bns(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< stellarator symmetric normal field at boundary; plasma component; + real(wp) :: Vnc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric normal field at boundary; vacuum component; + real(wp) :: Bnc(-MNtor:MNtor,-MMpol:MMpol) = 0.0 !< non-stellarator symmetric normal field at boundary; plasma component; !> @} !> \addtogroup grp_global_numerics numericlist !> \brief The namelist \c numericlist controls internal resolution parameters that the user rarely needs to consider. !> @{ - INTEGER :: Linitialize = 0 !< Used to initialize geometry using a regularization / extrapolation method + integer :: Linitialize = 0 !< Used to initialize geometry using a regularization / extrapolation method !<
      !<
    • if \c Linitialize = \f$-I\f$ , where \f$I\f$ is a positive integer, !< the geometry of the \f$i=1,N_V-I\f$ surfaces constructed by an extrapolation
    • @@ -228,39 +228,39 @@ module inputlist !< are *always* given by the \c Rwc and \c Zws given in \c physicslist. !<
    • if \c Linitialize = 1, 2, it is not required to provide the geometry of the interfaces after the namelists
    • !<
    - INTEGER :: LautoinitBn = 1 !< Used to initialize \f$B_{ns}\f$ using an initial fixed-boundary calculation + integer :: LautoinitBn = 1 !< Used to initialize \f$B_{ns}\f$ using an initial fixed-boundary calculation !<
      !<
    • only relevant if \c Lfreebound = 1
    • !<
    • user-supplied \c Bns will only be considered if \c LautoinitBn = 0
    • !<
    - INTEGER :: Lzerovac = 0 !< Used to adjust vacuum field to cancel plasma field on computational boundary + integer :: Lzerovac = 0 !< Used to adjust vacuum field to cancel plasma field on computational boundary !<
      !<
    • only relevant if \c Lfreebound = 1
    • !<
    - INTEGER :: Ndiscrete = 2 !< resolution of the real space grid on which fast Fourier transforms are performed is given by \c Ndiscrete*Mpol*4 + integer :: Ndiscrete = 2 !< resolution of the real space grid on which fast Fourier transforms are performed is given by \c Ndiscrete*Mpol*4 !<
      !<
    • constraint \c Ndiscrete>0
    • !<
    - INTEGER :: Nquad = -1 !< Resolution of the Gaussian quadrature + integer :: Nquad = -1 !< Resolution of the Gaussian quadrature !<
      !<
    • The resolution of the Gaussian quadrature, \f$\displaystyle \int \!\! f(s) ds = \sum_k \omega_k f(s_k)\f$, !< in each volume is given by \c Iquad\f$_v\f$,
    • !<
    • \c Iquad\f$_v\f$ is set in preset()
    • !<
    - INTEGER :: iMpol = -4 !< Fourier resolution of straight-fieldline angle on interfaces + integer :: iMpol = -4 !< Fourier resolution of straight-fieldline angle on interfaces !<
      !<
    • the rotational-transform on the interfaces is determined by a transformation to the straight-fieldline angle, !< with poloidal resolution given by \c iMpol
    • !<
    • if \c iMpol<=0, then \c iMpol = Mpol - iMpol
    • !<
    - INTEGER :: iNtor = -4 !< Fourier resolution of straight-fieldline angle on interfaces; + integer :: iNtor = -4 !< Fourier resolution of straight-fieldline angle on interfaces; !<
      !<
    • the rotational-transform on the interfaces is determined by a transformation to the straight-fieldline angle, !< with toroidal resolution given by \c iNtor
    • !<
    • if \c iNtor<=0 then \c iNtor = Ntor - iNtor
    • !<
    • if \c Ntor==0, then the toroidal resolution of the angle transformation is set \c lNtor = 0
    • !<
    - INTEGER :: Lsparse = 0 !< controls method used to solve for rotational-transform on interfaces + integer :: Lsparse = 0 !< controls method used to solve for rotational-transform on interfaces !<
      !<
    • if \c Lsparse = 0, the transformation to the straight-fieldline angle is computed in Fourier space !< using a dense matrix solver, \c F04AAF
    • @@ -270,13 +270,13 @@ module inputlist !< using a sparse matrix solver, \c F11DEF !<
    • if \c Lsparse = 3, the different methods for constructing the straight-fieldline angle are compared
    • !<
    - INTEGER :: Lsvdiota = 0 !< controls method used to solve for rotational-transform on interfaces; + integer :: Lsvdiota = 0 !< controls method used to solve for rotational-transform on interfaces; !< only relevant if \c Lsparse = 0 !<
      !<
    • if \c Lsvdiota = 0, use standard linear solver to construct straight fieldline angle transformation
    • !<
    • if \c Lsvdiota = 1, use SVD method to compute rotational-transform
    • !<
    - INTEGER :: imethod = 3 !< controls iterative solution to sparse matrix + integer :: imethod = 3 !< controls iterative solution to sparse matrix !< arising in real-space transformation to the straight-fieldline angle; !< only relevant if \c Lsparse.eq.2; \see tr00ab() for details !<
      @@ -284,7 +284,7 @@ module inputlist !<
    • if \c imethod = 2, the method is \c CGS
    • !<
    • if \c imethod = 3, the method is \c BICGSTAB
    • !<
    - INTEGER :: iorder = 2 !< controls real-space grid resolution for constructing the straight-fieldline angle; + integer :: iorder = 2 !< controls real-space grid resolution for constructing the straight-fieldline angle; !< only relevant if \c Lsparse>0 !< !< determines order of finite-difference approximation to the derivatives @@ -293,7 +293,7 @@ module inputlist !<
  • if \c iorder = 4,
  • !<
  • if \c iorder = 6,
  • !< - INTEGER :: iprecon = 0 !< controls iterative solution to sparse matrix arising in real-space transformation + integer :: iprecon = 0 !< controls iterative solution to sparse matrix arising in real-space transformation !< to the straight-fieldline angle; !< only relevant if \c Lsparse.eq.2; \see tr00ab() for details !<
      @@ -301,19 +301,19 @@ module inputlist !<
    • if \c iprecon = 1, the preconditioner is `J'
    • !<
    • if \c iprecon = 2, the preconditioner is `S'
    • !<
    - REAL :: iotatol = -1.0 !< tolerance required for iterative construction of straight-fieldline angle; + real(wp) :: iotatol = -1.0 !< tolerance required for iterative construction of straight-fieldline angle; !< only relevant if \c Lsparse.ge.2 - INTEGER :: Lextrap = 0 !< geometry of innermost interface is defined by extrapolation - INTEGER :: Mregular = -1 !< maximum regularization factor + integer :: Lextrap = 0 !< geometry of innermost interface is defined by extrapolation + integer :: Mregular = -1 !< maximum regularization factor !<
      !<
    • if \c Mregular.ge.2, then \c regumm \f$_i\f$ = \c Mregular \f$/ 2 \f$ where \c m \f$_i > \f$ \c Mregular
    • !<
    - INTEGER :: Lrzaxis = 1 !< controls the guess of geometry axis in the innermost volume or initialization of interfaces + integer :: Lrzaxis = 1 !< controls the guess of geometry axis in the innermost volume or initialization of interfaces !<
      !<
    • if \c iprecon = 1, the centroid is used
    • !<
    • if \c iprecon = 2, the Jacobian \f$m=1\f$ harmonic elimination method is used
    • !<
    - INTEGER :: Ntoraxis = 3 !< the number of \f$n\f$ harmonics used in the Jacobian \f$m=1\f$ harmonic elimination method; + integer :: Ntoraxis = 3 !< the number of \f$n\f$ harmonics used in the Jacobian \f$m=1\f$ harmonic elimination method; !< only relevant if \c Lrzaxis.ge.1 . !> @} @@ -323,7 +323,7 @@ module inputlist !> The transformation to straight-fieldline coordinates is singular when the rotational-transform of the interfaces is rational; !> however, the rotational-transform is still well defined. !> @{ - INTEGER :: LBeltrami = 4 !< Control flag for solution of Beltrami equation + integer :: LBeltrami = 4 !< Control flag for solution of Beltrami equation !<
      !<
    • if \c LBeltrami = 1,3,5 or 7, (SQP) then the Beltrami field in each volume is constructed !< by minimizing the magnetic energy with the constraint of fixed helicity; @@ -352,7 +352,7 @@ module inputlist !<
    • if \c LBeltrami = 7, all three methods will be employed; !<
    !< - INTEGER :: Linitgues = 1 !< controls how initial guess for Beltrami field is constructed + integer :: Linitgues = 1 !< controls how initial guess for Beltrami field is constructed !<
      !<
    • only relevant for routines that require an initial guess for the Beltrami fields, such as the SQP and Newton methods, !< or the sparse linear solver; @@ -361,13 +361,13 @@ module inputlist !<
    • if \c Linitgues = 2, the initial guess for the Beltrami field is read from file !<
    • if \c Linitgues = 3, the initial guess for the Beltrami field will be randomized with the maximum \c maxrndgues !<
    - INTEGER :: Lposdef = 0 !< redundant; - REAL :: maxrndgues = 1.0 !< the maximum random number of the Beltrami field if \c Linitgues = 3 - INTEGER :: Lmatsolver = 3 !< 1 for LU factorization, 2 for GMRES, 3 for GMRES matrix-free - INTEGER :: NiterGMRES = 200 !< number of max iteration for GMRES - REAL :: epsGMRES = 1e-14 !< the precision of GMRES - INTEGER :: LGMRESprec = 1 !< type of preconditioner for GMRES, 1 for ILU sparse matrix - REAL :: epsILU = 1e-12 !< the precision of incomplete LU factorization for preconditioning + integer :: Lposdef = 0 !< redundant; + real(wp) :: maxrndgues = 1.0 !< the maximum random number of the Beltrami field if \c Linitgues = 3 + integer :: Lmatsolver = 3 !< 1 for LU factorization, 2 for GMRES, 3 for GMRES matrix-free + integer :: NiterGMRES = 200 !< number of max iteration for GMRES + real(wp) :: epsGMRES = 1e-14 !< the precision of GMRES + integer :: LGMRESprec = 1 !< type of preconditioner for GMRES, 1 for ILU sparse matrix + real(wp) :: epsILU = 1e-12 !< the precision of incomplete LU factorization for preconditioning !> @} !> \addtogroup grp_global_global globallist @@ -385,39 +385,39 @@ module inputlist !> where \f$\psi_v\equiv\f$ normalized toroidal flux, \c tflux, and \f$\omega\equiv\f$ \c wpoloidal. !> !> @{ - INTEGER :: Lfindzero = 0 !< use Newton methods to find zero of force-balance, which is computed by dforce() + integer :: Lfindzero = 0 !< use Newton methods to find zero of force-balance, which is computed by dforce() !<
      !<
    • if \c Lfindzero = 0, then dforce() is called once !< to compute the Beltrami fields consistent with the given geometry and constraints
    • !<
    • if \c Lfindzero = 1, then call \c C05NDF (uses function values only), which iteratively calls dforce()
    • !<
    • if \c Lfindzero = 2, then call \c C05PDF (uses derivative information), which iteratively calls dforce()
    • !<
    - REAL :: escale = 0.0 !< controls the weight factor, \c BBweight, in the force-imbalance harmonics + real(wp) :: escale = 0.0 !< controls the weight factor, \c BBweight, in the force-imbalance harmonics !<
      !<
    • \c BBweight(i) \f$\displaystyle \equiv \texttt{opsilon} \times \exp\left[-\texttt{escale} \times (m_i^2+n_i^2) \right]\f$
    • !<
    • defined in preset() ; used in dforce()
    • !<
    • also see Eqn.\f$(\ref{eq:forcebalancemn_global})\f$
    • !<
    - REAL :: opsilon = 1.0 !< weighting of force-imbalance + real(wp) :: opsilon = 1.0 !< weighting of force-imbalance !<
      !<
    • used in dforce(); also see Eqn.\f$(\ref{eq:forcebalancemn_global})\f$
    • !<
    - REAL :: pcondense = 2.0 !< spectral condensation parameter + real(wp) :: pcondense = 2.0 !< spectral condensation parameter !<
      !<
    • used in preset() to define \c mmpp(i) \f$\equiv m_i^p\f$, where \f$p\equiv \f$ \c pcondense
    • !<
    • the angle freedom is exploited to minimize \f$\displaystyle \texttt{epsilon} \sum_{i} m_i^p (R_{i}^2+Z_{i}^2)\f$ !< with respect to tangential variations in the interface geometry
    • !<
    • also see Eqn.\f$(\ref{eq:spectralbalancemn_global})\f$
    • !<
    - REAL :: epsilon = 0.0 !< weighting of spectral-width constraint + real(wp) :: epsilon = 0.0 !< weighting of spectral-width constraint !<
      !<
    • used in dforce(); also see Eqn.\f$(\ref{eq:spectralbalancemn_global})\f$
    • !<
    - REAL :: wpoloidal = 1.0 !< "star-like" poloidal angle constraint radial exponential factor + real(wp) :: wpoloidal = 1.0 !< "star-like" poloidal angle constraint radial exponential factor !< used in preset() to construct \c sweight - REAL :: upsilon = 1.0 !< weighting of "star-like" poloidal angle constraint + real(wp) :: upsilon = 1.0 !< weighting of "star-like" poloidal angle constraint !< used in preset() to construct \c sweight - REAL :: forcetol = 1.0e-10 !< required tolerance in force-balance error; only used as an initial check + real(wp) :: forcetol = 1.0e-10 !< required tolerance in force-balance error; only used as an initial check !<
      !<
    • if the initially supplied interfaces are consistent with force-balance to within \c forcetol !< then the geometry of the interfaces is not altered
    • @@ -426,13 +426,13 @@ module inputlist !<
    • to force execution of either \c C05NDF or \c C05PDF, regardless of the initial force imbalance, !< set \c forcetol < 0
    • !<
    - REAL :: c05xmax = 1.0e-06 !< required tolerance in position, \f${\bf x} \equiv \{ R_{i,v}, Z_{i,v}\}\f$ - REAL :: c05xtol = 1.0e-12 !< required tolerance in position, \f${\bf x} \equiv \{ R_{i,v}, Z_{i,v}\}\f$ + real(wp) :: c05xmax = 1.0e-06 !< required tolerance in position, \f${\bf x} \equiv \{ R_{i,v}, Z_{i,v}\}\f$ + real(wp) :: c05xtol = 1.0e-12 !< required tolerance in position, \f${\bf x} \equiv \{ R_{i,v}, Z_{i,v}\}\f$ !<
      !<
    • used by both \c C05NDF and \c C05PDF; see the NAG documents for further details on how the error is defined
    • !<
    • constraint \c c05xtol > 0.0
    • !<
    - REAL :: c05factor = 1.0e-02 !< used to control initial step size in + real(wp) :: c05factor = 1.0e-02 !< used to control initial step size in !< \c C05NDF and \c C05PDF !<
      !<
    • constraint \c c05factor > 0.0
    • @@ -443,19 +443,19 @@ module inputlist !<
    • only used if \c Lfindzero = 2
    • !<
    • only used in newton()
    • !<
    - INTEGER :: mfreeits = 0 !< maximum allowed free-boundary iterations + integer :: mfreeits = 0 !< maximum allowed free-boundary iterations !<
      !<
    • only used if \c Lfreebound = 1
    • !<
    • only used in xspech()
    • !<
    - REAL :: bnstol = 1.0e-06 !< redundant; - REAL :: bnsblend = 0.666 !< redundant; - REAL :: gBntol = 1.0e-06 !< required tolerance in free-boundary iterations + real(wp) :: bnstol = 1.0e-06 !< redundant; + real(wp) :: bnsblend = 0.666 !< redundant; + real(wp) :: gBntol = 1.0e-06 !< required tolerance in free-boundary iterations !<
      !<
    • only used if \c Lfreebound = 1
    • !<
    • only used in xspech()
    • !<
    - REAL :: gBnbld = 0.666 !< normal blend + real(wp) :: gBnbld = 0.666 !< normal blend !<
      !<
    • The "new" magnetic field at the computational boundary produced by the plasma currents is updated using a Picard scheme: !< \f{eqnarray}{ ({\bf B}\cdot{\bf n})^{j+1} = \texttt{gBnbld} \times ({\bf B}\cdot{\bf n})^{j} @@ -465,27 +465,27 @@ module inputlist !<
    • only used if \c Lfreebound = 1
    • !<
    • only used in xspech()
    • !<
    - REAL :: vcasingeps = 1.e-12 !< regularization of Biot-Savart; see bnorml(), casing() - REAL :: vcasingtol = 1.e-08 !< accuracy on virtual casing integral; see bnorml(), casing() - INTEGER :: vcasingits = 8 !< minimum number of calls to adaptive virtual casing routine; see casing() - INTEGER :: vcasingper = 1 !< periods of integragion in adaptive virtual casing routine; see casing() - INTEGER :: mcasingcal = 8 !< minimum number of calls to adaptive virtual casing routine; see casing(); redundant; + real(wp) :: vcasingeps = 1.e-12 !< regularization of Biot-Savart; see bnorml(), casing() + real(wp) :: vcasingtol = 1.e-08 !< accuracy on virtual casing integral; see bnorml(), casing() + integer :: vcasingits = 8 !< minimum number of calls to adaptive virtual casing routine; see casing() + integer :: vcasingper = 1 !< periods of integragion in adaptive virtual casing routine; see casing() + integer :: mcasingcal = 8 !< minimum number of calls to adaptive virtual casing routine; see casing(); redundant; !> @} !> \addtogroup grp_global_diagnostics diagnosticslist !> \brief The namelist \c diagnosticslist controls post-processor diagnostics, such as Poincaré plot resolution, etc. !> @{ - REAL :: odetol = 1.0e-07 !< o.d.e. integration tolerance for all field line tracing routines - REAL :: absreq = 1.0e-08 !< redundant - REAL :: relreq = 1.0e-08 !< redundant - REAL :: absacc = 1.0e-04 !< redundant - REAL :: epsr = 1.0e-08 !< redundant - INTEGER :: nPpts = 0 !< number of toroidal transits used (per trajectory) in following field lines + real(wp) :: odetol = 1.0e-07 !< o.d.e. integration tolerance for all field line tracing routines + real(wp) :: absreq = 1.0e-08 !< redundant + real(wp) :: relreq = 1.0e-08 !< redundant + real(wp) :: absacc = 1.0e-04 !< redundant + real(wp) :: epsr = 1.0e-08 !< redundant + integer :: nPpts = 0 !< number of toroidal transits used (per trajectory) in following field lines !< for constructing Poincaré plots; !< if \c nPpts<1, no Poincaré plot is constructed; - REAL :: Ppts = 0.0 !< stands for Poincare plot theta start. Chose at which angle (normalized over \f$\pi\f$) the Poincare field-line tracing start. - INTEGER :: nPtrj(1:MNvol+1) = -1 !< number of trajectories in each annulus to be followed in constructing Poincaré plot + real(wp) :: Ppts = 0.0 !< stands for Poincare plot theta start. Chose at which angle (normalized over \f$\pi\f$) the Poincare field-line tracing start. + integer :: nPtrj(1:MNvol+1) = -1 !< number of trajectories in each annulus to be followed in constructing Poincaré plot !<
      !<
    • if \c nPtrj(l)<0, then \c nPtrj(l) = Ni(l), !< where \c Ni(l) is the grid resolution used to construct the Beltrami field in volume \f$l\f$
    • @@ -493,13 +493,13 @@ module inputlist LOGICAL :: LHevalues = .false. !< to compute eigenvalues of \f$\nabla {\bf F}\f$ LOGICAL :: LHevectors = .false. !< to compute eigenvectors (and also eigenvalues) of \f$\nabla {\bf F}\f$ LOGICAL :: LHmatrix = .false. !< to compute and write to file the elements of \f$\nabla {\bf F}\f$ - INTEGER :: Lperturbed = 0 !< to compute linear, perturbed equilibrium - INTEGER :: dpp = -1 !< perturbed harmonic - INTEGER :: dqq = -1 !< perturbed harmonic - INTEGER :: Lerrortype = 0 !< the type of error output for Lcheck=1 - INTEGER :: Ngrid = -1 !< the number of points to output in the grid, -1 for Lrad(vvol) - REAL :: dRZ = 1E-5 !< difference in geometry for finite difference estimate (debug only) - INTEGER :: Lcheck = 0 !< implement various checks + integer :: Lperturbed = 0 !< to compute linear, perturbed equilibrium + integer :: dpp = -1 !< perturbed harmonic + integer :: dqq = -1 !< perturbed harmonic + integer :: Lerrortype = 0 !< the type of error output for Lcheck=1 + integer :: Ngrid = -1 !< the number of points to output in the grid, -1 for Lrad(vvol) + real(wp) :: dRZ = 1E-5 !< difference in geometry for finite difference estimate (debug only) + integer :: Lcheck = 0 !< implement various checks !<
        !<
      • if \c Lcheck = 0, no additional check on the calculation is performed
      • !<
      • if \c Lcheck = 1, the error in the current, i.e. \f$\nabla\times{\bf B}-\mu{\bf B}\f$ is computed as a post-diagnostic
      • @@ -538,8 +538,8 @@ module inputlist !<
      LOGICAL :: Ltiming = .false. !< to check timing LOGICAL :: Ltransform = .false. !< to evaluate iota and straight field line coordinates - REAL :: fudge = 1.0e-00 !< redundant - REAL :: scaling = 1.0e-00 !< redundant + real(wp) :: fudge = 1.0e-00 !< redundant + real(wp) :: scaling = 1.0e-00 !< redundant !> @} diff --git a/src/intghs.f90 b/src/intghs.F90 similarity index 79% rename from src/intghs.f90 rename to src/intghs.F90 index 66777f34..72befee9 100644 --- a/src/intghs.f90 +++ b/src/intghs.F90 @@ -71,25 +71,25 @@ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! module intghs_module - + use mod_kinds, only: wp => dp !> \brief This calculates the integral of something related to matrix-vector-multiplication. !> \todo Zhisong might need to update the documentation of this type. type intghs_workspace - REAL, allocatable :: efmn(:,:) !< This is efmn. - REAL, allocatable :: ofmn(:,:) !< This is ofmn. - REAL, allocatable :: cfmn(:,:) !< - REAL, allocatable :: sfmn(:,:) !< - REAL, allocatable :: evmn(:,:) !< - REAL, allocatable :: odmn(:,:) !< - REAL, allocatable :: ijreal(:,:) !< - REAL, allocatable :: jireal(:,:) !< - REAL, allocatable :: jkreal(:,:) !< - REAL, allocatable :: kjreal(:,:) !< - REAL, allocatable :: Bloweremn(:,:,:) !< - REAL, allocatable :: Bloweromn(:,:,:) !< - REAL, allocatable :: gBupper(:,:,:) !< - REAL, allocatable :: Blower(:,:,:) !< - REAL, allocatable :: basis(:,:,:,:) !< + real(wp), allocatable :: efmn(:,:) !< This is efmn. + real(wp), allocatable :: ofmn(:,:) !< This is ofmn. + real(wp), allocatable :: cfmn(:,:) !< + real(wp), allocatable :: sfmn(:,:) !< + real(wp), allocatable :: evmn(:,:) !< + real(wp), allocatable :: odmn(:,:) !< + real(wp), allocatable :: ijreal(:,:) !< + real(wp), allocatable :: jireal(:,:) !< + real(wp), allocatable :: jkreal(:,:) !< + real(wp), allocatable :: kjreal(:,:) !< + real(wp), allocatable :: Bloweremn(:,:,:) !< + real(wp), allocatable :: Bloweromn(:,:,:) !< + real(wp), allocatable :: gBupper(:,:,:) !< + real(wp), allocatable :: Blower(:,:,:) !< + real(wp), allocatable :: basis(:,:,:,:) !< end type TYPE(intghs_workspace) :: wk !< This is an instance of the intghs_workspace type. @@ -106,7 +106,7 @@ end module intghs_module !> @param lrad !> @param idx subroutine intghs( lquad, mn, lvol, lrad, idx ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi, pi2 @@ -134,16 +134,32 @@ subroutine intghs( lquad, mn, lvol, lrad, idx ) use intghs_module !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lquad, mn, lvol, lrad, idx +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lquad, mn, lvol, lrad, idx + + integer :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele, ideriv, ifail, Lcurvature + integer :: mi, ni, bid + + real(wp) :: lss, jthweight, Tl, Dl, sbar, dfactor, ik, w(1:lquad) - INTEGER :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele, ideriv, ifail, Lcurvature - INTEGER :: mi, ni, bid - REAL :: lss, jthweight, Tl, Dl, sbar, dfactor, ik, w(1:lquad) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN( intghs ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -172,7 +188,12 @@ subroutine intghs( lquad, mn, lvol, lrad, idx ) endif if (.not. Lsavedguvij) then - WCALL( intghs, compute_guvijsave, (lquad, lvol, ideriv, Lcurvature) ) + + cput = MPI_WTIME() + Tintghs = Tintghs + ( cput-cpuo ) + call compute_guvijsave(lquad, lvol, ideriv, Lcurvature) + cpuo = MPI_WTIME() + endif do jquad = 1, lquad @@ -333,7 +354,12 @@ subroutine intghs( lquad, mn, lvol, lrad, idx ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN( intghs ) + +9999 continue + cput = MPI_WTIME() + Tintghs = Tintghs + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -345,7 +371,7 @@ end subroutine intghs !> !> @param lvol subroutine intghs_workspace_init(lvol) - + use mod_kinds, only: wp => dp use constants, only : zero use inputlist, only : Mpol, Lrad, Wmacros, Wintghs use fileunits, only : ounit @@ -353,32 +379,98 @@ subroutine intghs_workspace_init(lvol) use allglobal, only : Ntz, mn, Iquad, myid, ncpu, cpus, MPI_COMM_SPEC use intghs_module - LOCALS - INTEGER, INTENT(IN) :: lvol - INTEGER :: lquad +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, INTENT(IN) :: lvol + integer :: lquad + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(intghs) lquad = Iquad(lvol) - SALLOCATE(wk%gBupper, (1:Ntz,3,lquad), zero) - SALLOCATE(wk%Blower, (1:Ntz,3,lquad), zero) - SALLOCATE(wk%Bloweremn, (1:mn,3,lquad), zero) - SALLOCATE(wk%Bloweromn, (1:mn,3,lquad), zero) - SALLOCATE(wk%efmn, (1:mn,lquad), zero) - SALLOCATE(wk%ofmn, (1:mn,lquad), zero) - SALLOCATE(wk%evmn, (1:mn,lquad), zero) - SALLOCATE(wk%odmn, (1:mn,lquad), zero) - SALLOCATE(wk%cfmn, (1:mn,lquad), zero) - SALLOCATE(wk%sfmn, (1:mn,lquad), zero) - SALLOCATE(wk%ijreal, (1:mn,lquad), zero) - SALLOCATE(wk%jkreal, (1:mn,lquad), zero) - SALLOCATE(wk%jireal, (1:mn,lquad), zero) - SALLOCATE(wk%kjreal, (1:mn,lquad), zero) - SALLOCATE(wk%basis, (0:Lrad(lvol),0:mpol,0:1, lquad), zero) - - RETURN(intghs) + + allocate( wk%gBupper(1:Ntz,3,lquad), stat=astat ) + wk%gBupper(1:Ntz,3,lquad) = zero + + + allocate( wk%Blower(1:Ntz,3,lquad), stat=astat ) + wk%Blower(1:Ntz,3,lquad) = zero + + + allocate( wk%Bloweremn(1:mn,3,lquad), stat=astat ) + wk%Bloweremn(1:mn,3,lquad) = zero + + + allocate( wk%Bloweromn(1:mn,3,lquad), stat=astat ) + wk%Bloweromn(1:mn,3,lquad) = zero + + + allocate( wk%efmn(1:mn,lquad), stat=astat ) + wk%efmn(1:mn,lquad) = zero + + + allocate( wk%ofmn(1:mn,lquad), stat=astat ) + wk%ofmn(1:mn,lquad) = zero + + + allocate( wk%evmn(1:mn,lquad), stat=astat ) + wk%evmn(1:mn,lquad) = zero + + + allocate( wk%odmn(1:mn,lquad), stat=astat ) + wk%odmn(1:mn,lquad) = zero + + + allocate( wk%cfmn(1:mn,lquad), stat=astat ) + wk%cfmn(1:mn,lquad) = zero + + + allocate( wk%sfmn(1:mn,lquad), stat=astat ) + wk%sfmn(1:mn,lquad) = zero + + + allocate( wk%ijreal(1:mn,lquad), stat=astat ) + wk%ijreal(1:mn,lquad) = zero + + + allocate( wk%jkreal(1:mn,lquad), stat=astat ) + wk%jkreal(1:mn,lquad) = zero + + + allocate( wk%jireal(1:mn,lquad), stat=astat ) + wk%jireal(1:mn,lquad) = zero + + + allocate( wk%kjreal(1:mn,lquad), stat=astat ) + wk%kjreal(1:mn,lquad) = zero + + + allocate( wk%basis(0:Lrad(lvol),0:mpol,0:1, lquad), stat=astat ) + wk%basis(0:Lrad(lvol),0:mpol,0:1, lquad) = zero + + + +9999 continue + cput = MPI_WTIME() + Tintghs = Tintghs + ( cput-cpuo ) + return + end subroutine intghs_workspace_init @@ -386,33 +478,84 @@ end subroutine intghs_workspace_init !> !> @param lvol subroutine intghs_workspace_destroy() - + use mod_kinds, only: wp => dp use inputlist, only : Wmacros, Wintghs use fileunits, only : ounit use cputiming, only : Tintghs use allglobal, only : myid, ncpu, cpus, MPI_COMM_SPEC use intghs_module - LOCALS - - BEGIN(intghs) - - DALLOCATE(wk%gBupper) - DALLOCATE(wk%Blower) - DALLOCATE(wk%Bloweremn) - DALLOCATE(wk%Bloweromn) - DALLOCATE(wk%efmn) - DALLOCATE(wk%ofmn) - DALLOCATE(wk%evmn) - DALLOCATE(wk%odmn) - DALLOCATE(wk%cfmn) - DALLOCATE(wk%sfmn) - DALLOCATE(wk%ijreal) - DALLOCATE(wk%jkreal) - DALLOCATE(wk%jireal) - DALLOCATE(wk%kjreal) - DALLOCATE(wk%basis) - - RETURN(intghs) + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + + deallocate(wk%gBupper,stat=astat) + + + deallocate(wk%Blower,stat=astat) + + + deallocate(wk%Bloweremn,stat=astat) + + + deallocate(wk%Bloweromn,stat=astat) + + + deallocate(wk%efmn,stat=astat) + + + deallocate(wk%ofmn,stat=astat) + + + deallocate(wk%evmn,stat=astat) + + + deallocate(wk%odmn,stat=astat) + + + deallocate(wk%cfmn,stat=astat) + + + deallocate(wk%sfmn,stat=astat) + + + deallocate(wk%ijreal,stat=astat) + + + deallocate(wk%jkreal,stat=astat) + + + deallocate(wk%jireal,stat=astat) + + + deallocate(wk%kjreal,stat=astat) + + + deallocate(wk%basis,stat=astat) + + + +9999 continue + cput = MPI_WTIME() + Tintghs = Tintghs + ( cput-cpuo ) + return + end subroutine intghs_workspace_destroy diff --git a/src/jo00aa.f90 b/src/jo00aa.F90 similarity index 89% rename from src/jo00aa.f90 rename to src/jo00aa.F90 index 8d594312..dfbd7df6 100644 --- a/src/jo00aa.f90 +++ b/src/jo00aa.F90 @@ -68,7 +68,7 @@ !> @param[in] lquad degree of Gaussian quadrature !> @param[in] mn number of Fourier harmonics subroutine jo00aa( lvol, Ntz, lquad, mn ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi2 @@ -93,30 +93,58 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + ! these are really global, but are included in argument list to remove allocations - INTEGER, intent(in) :: lvol, Ntz, lquad, mn + integer, intent(in) :: lvol, Ntz, lquad, mn + + integer :: jquad, Lcurvature, ll, ii, jj, kk, uu, ideriv, twolquad, mm, jk - INTEGER :: jquad, Lcurvature, ll, ii, jj, kk, uu, ideriv, twolquad, mm, jk + real(wp) :: lss, sbar, sbarhim(0:2), gBu(1:Ntz,1:3,0:3), gJu(1:Ntz,1:3), jerror(1:3), jerrormax(1:3), intvol + real(wp) :: B_cartesian(1:Ntz,1:3), J_cartesian(1:Ntz,1:3) - REAL :: lss, sbar, sbarhim(0:2), gBu(1:Ntz,1:3,0:3), gJu(1:Ntz,1:3), jerror(1:3), jerrormax(1:3), intvol - REAL :: B_cartesian(1:Ntz,1:3), J_cartesian(1:Ntz,1:3) + real(wp) :: Atemn(1:mn,0:2), Azemn(1:mn,0:2), Atomn(1:mn,0:2), Azomn(1:mn,0:2) - REAL :: Atemn(1:mn,0:2), Azemn(1:mn,0:2), Atomn(1:mn,0:2), Azomn(1:mn,0:2) + integer :: itype, icdgqf + real(wp) :: aa, bb, cc, dd, weight(1:lquad+1), abscis(1:lquad), workfield(1:2*lquad) - INTEGER :: itype, icdgqf - REAL :: aa, bb, cc, dd, weight(1:lquad+1), abscis(1:lquad), workfield(1:2*lquad) + real(wp) :: zeta, teta, st(2), Bst(2) - REAL :: zeta, teta, st(2), Bst(2) - BEGIN(jo00aa) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( jo00aa, lquad.lt.1, invalid lquad supplied to jo00aa ) - FATAL( jo00aa, lvol.lt.1 .or. lvol.gt.Mvol, invalid interface label ) + + if( lquad.lt.1 ) then + write(6,'("jo00aa : fatal : myid=",i3," ; lquad.lt.1 ; invalid lquad supplied to jo00aa ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "jo00aa : lquad.lt.1 : invalid lquad supplied to jo00aa ;" + endif + + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("jo00aa : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; invalid interface label ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "jo00aa : lvol.lt.1 .or. lvol.gt.Mvol : invalid interface label ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -134,7 +162,7 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) ! write(ounit,'("jo00aa : WARNING ! : THE ERROR FLAGS RETURNED BY CDGQF SEEM DIFFERENT TO NAG:D01BCF (may be trivial, but please revise); 2018/01/10;")') - cput= GETTIME + cput= MPI_WTIME() select case( icdgqf ) ! 123456789012345 case( 0 ) ; if( Wjo00aa ) write(ounit,1000) cput-cpus, myid, lvol, icdgqf, "success ", abscis(1:lquad) case( 1 ) ; write(ounit,1000) cput-cpus, myid, lvol, icdgqf, "failed ", abscis(1:lquad) @@ -151,7 +179,13 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) 1000 format("jo00aa : ",f10.2," : myid=",i3," ; lvol=",i3," ; icdgqf=",i3," ; "a15" ;":" abscissae ="99f10.6) 1001 format("jo00aa : ", 10x ," : "3x" "3x" "3x" "15x" ;":" weights ="99f10.6) - FATAL( jo00aa, icdgqf.ne.0, failed to construct Gaussian integration abscisae and weights ) + + if( icdgqf.ne.0 ) then + write(6,'("jo00aa : fatal : myid=",i3," ; icdgqf.ne.0 ; failed to construct Gaussian integration abscisae and weights ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "jo00aa : icdgqf.ne.0 : failed to construct Gaussian integration abscisae and weights ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -178,7 +212,12 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) Lcurvature = 2 - WCALL( jo00aa, coords, ( lvol, lss, Lcurvature, Ntz, mn ) ) ! returns coordinates, metrics, . . . + + cput = MPI_WTIME() + Tjo00aa = Tjo00aa + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! returns coordinates, metrics, . . . if (Lcoordinatesingularity) then ! Zernike 1 Jul 2019 call get_zernike_d2(sbar, Lrad(lvol), mpol, zernike) @@ -388,10 +427,10 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) beltramierror(lvol,7:9) = jerrormax(1:3) ! the max error if (Lerrortype.eq.1 .and. Igeometry .eq. 3) then - cput = GETTIME ; write(ounit,1002) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:3), cput-cpui ! write error to screen; + cput = MPI_WTIME() ; write(ounit,1002) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:3), cput-cpui ! write error to screen; ; ; write(ounit,1003) cput-cpus, myid, lvol, Lrad(lvol), jerrormax(1:3), cput-cpui ! write error to screen; else - cput = GETTIME ; write(ounit,1004) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:3), cput-cpui ! write error to screen; + cput = MPI_WTIME() ; write(ounit,1004) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:3), cput-cpui ! write error to screen; ; ; write(ounit,1005) cput-cpus, myid, lvol, Lrad(lvol), jerrormax(1:3), cput-cpui ! write error to screen; endif @@ -411,7 +450,12 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz do jj = 0, Nt-1 ; teta = jj * pi2 / Nt ; jk = 1 + jj + kk*Nt ; st(1:2) = (/ one, teta /) - WCALL( jo00aa, bfield, ( zeta, st(1:Node), Bst(1:Node) ) ) + + cput = MPI_WTIME() + Tjo00aa = Tjo00aa + ( cput-cpuo ) + call bfield( zeta, st(1:Node), Bst(1:Node) ) + cpuo = MPI_WTIME() + jerror(2) = max(jerror(2), abs(Bst(1) * gBzeta)) @@ -422,14 +466,19 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz do jj = 0, Nt-1 ; teta = jj * pi2 / Nt ; jk = 1 + jj + kk*Nt ; st(1:2) = (/ -one, teta /) - WCALL( jo00aa, bfield, ( zeta, st(1:Node), Bst(1:Node) ) ) + + cput = MPI_WTIME() + Tjo00aa = Tjo00aa + ( cput-cpuo ) + call bfield( zeta, st(1:Node), Bst(1:Node) ) + cpuo = MPI_WTIME() + jerror(1) = max(jerror(1), abs(Bst(1) * gBzeta)) enddo enddo endif - cput = GETTIME ; write(ounit,1006) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:2), cput-cpui ! write error to screen; + cput = MPI_WTIME() ; write(ounit,1006) cput-cpus, myid, lvol, Lrad(lvol), jerror(1:2), cput-cpui ! write error to screen; ! check fluxes Bst = zero @@ -450,14 +499,19 @@ subroutine jo00aa( lvol, Ntz, lquad, mn ) Bst(2) = abs(Bst(2) - dpflux(lvol)) endif - cput = GETTIME ; write(ounit,1007) cput-cpus, myid, lvol, Lrad(lvol), Bst(1:2), cput-cpui ! write error to screen; + cput = MPI_WTIME() ; write(ounit,1007) cput-cpus, myid, lvol, Lrad(lvol), Bst(1:2), cput-cpui ! write error to screen; 1006 format("jo00aa : ",f10.2," : myid=",i3," ; lvol =",i3," ; lrad =",i3," ; MAX gB^s(-1)="es23.15" , gB^s(+1) ="es23.15" ; time="f8.2"s ;") 1007 format("jo00aa : ",f10.2," : myid=",i3," ; lvol =",i3," ; lrad =",i3," ; dtfluxERR ="es23.15" , dpfluxERR="es23.15" ; time="f8.2"s ;") !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(jo00aa) + +9999 continue + cput = MPI_WTIME() + Tjo00aa = Tjo00aa + ( cput-cpuo ) + return + !>
    diff --git a/src/lbpol.f90 b/src/lbpol.F90 similarity index 79% rename from src/lbpol.f90 rename to src/lbpol.F90 index c4958705..963d0603 100644 --- a/src/lbpol.f90 +++ b/src/lbpol.F90 @@ -6,19 +6,19 @@ !latex \calledby{\link{xspech} and !latex \link{dfp100}} -!latex \calls{\link{coords} and +!latex \calls{\link{coords} and !latex \link{numrec}} !latex \begin{enumerate} !latex \item Call \link{coords} to compute the metric coefficients and the jacobian. -!latex \item Build coefficients \inputvar{efmn}, \inputvar{ofmn}, \inputvar{cfmn}, \inputvar{sfmn} from the field vector potential \inputvar{Ate}, \inputvar{Ato}, +!latex \item Build coefficients \inputvar{efmn}, \inputvar{ofmn}, \inputvar{cfmn}, \inputvar{sfmn} from the field vector potential \inputvar{Ate}, \inputvar{Ato}, !latex \inputvar{Aze} and \inputvar{Azo}, and radial derivatives of the Chebyshev polynomials \inputvar{TT(ll,innout,1)}. These variables -!latex are the radial derivative of the Fourier coefficients of the magnetic field vector potential. -!latex \item Take the inverse Fourier transform of \inputvar{efmn}, \inputvar{ofmn}, \inputvar{cfmn}, \inputvar{sfmn}. These are the covariant components of $dA$, +!latex are the radial derivative of the Fourier coefficients of the magnetic field vector potential. +!latex \item Take the inverse Fourier transform of \inputvar{efmn}, \inputvar{ofmn}, \inputvar{cfmn}, \inputvar{sfmn}. These are the covariant components of $dA$, !latex \textit{i.e.} the contravariant components of $\mathbf{B}$. !latex \item Build covariant components of the field using the metric coefficients \inputvar{guvij} and the jacobian \inputvar{sg}. -!latex \item Fourier transform the covariant components of the field and store them in the variables \inputvar{Btemn}, \inputvar{Btomn}, \inputvar{Bzemn} and +!latex \item Fourier transform the covariant components of the field and store them in the variables \inputvar{Btemn}, \inputvar{Btomn}, \inputvar{Bzemn} and !latex \inputvar{Bzomn}. !latex \end{enumerate} @@ -26,7 +26,7 @@ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! subroutine lbpol(lvol, Bt00, ideriv, iocons) - + use mod_kinds, only: wp => dp use constants, only : mu0, pi, pi2, two, one, half, zero use allglobal, only : Ate, Aze, Ato, Azo, TT, & @@ -48,15 +48,23 @@ subroutine lbpol(lvol, Bt00, ideriv, iocons) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + ! ------ - - INTEGER :: Lcurvature, ideriv, ii, ll, ifail, lvol, mi, ni, iocons - REAL :: lss, Bt00(1:Mvol, 0:1, -1:2) - REAL :: lAte(1:mn), lAze(1:mn), lAto(1:mn), lAzo(1:mn) - REAL :: dAt(1:Ntz), dAz(1:Ntz), Bt(1:Ntz), Bz(1:Ntz), dAt0(1:Ntz), dAz0(1:Ntz) - REAL :: dBtzero ! Value of first B_theta mode jump - REAL :: mfactor ! Regularisation factor + + integer :: Lcurvature, ideriv, ii, ll, ifail, lvol, mi, ni, iocons + real(wp) :: lss, Bt00(1:Mvol, 0:1, -1:2) + real(wp) :: lAte(1:mn), lAze(1:mn), lAto(1:mn), lAzo(1:mn) + real(wp) :: dAt(1:Ntz), dAz(1:Ntz), Bt(1:Ntz), Bz(1:Ntz), dAt0(1:Ntz), dAz0(1:Ntz) + real(wp) :: dBtzero ! Value of first B_theta mode jump + real(wp) :: mfactor ! Regularisation factor LOGICAL :: LGeometricDerivative ! Lcurvature: Controls what the routine coords computes. @@ -66,7 +74,15 @@ subroutine lbpol(lvol, Bt00, ideriv, iocons) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - BEGIN(lbpol) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + ! TODO: This subroutine is very similar to curent.f90 - maybe merge both in a single subroutine to simplify? @@ -84,8 +100,13 @@ subroutine lbpol(lvol, Bt00, ideriv, iocons) ! First get the metric component and jacobian Lcurvature = 1 - WCALL( lbpol, coords, (lvol, lss, Lcurvature, Ntz, mn ) ) ! get guvij and sg - + + cput = MPI_WTIME() + Tlbpol = Tlbpol + ( cput-cpuo ) + call coords(lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! get guvij and sg + ! Then compute the vector potential and its derivatives. call build_vector_potential(lvol, iocons, ideriv, 1) @@ -101,7 +122,12 @@ subroutine lbpol(lvol, Bt00, ideriv, iocons) ! Get derivatives of metric element Lcurvature = 3 - WCALL( lbpol, coords, (lvol, lss, Lcurvature, Ntz, mn ) ) ! get sg times d/dx (g_mu,nu / sg) + + cput = MPI_WTIME() + Tlbpol = Tlbpol + ( cput-cpuo ) + call coords(lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! get sg times d/dx (g_mu,nu / sg) ! Compute vector potential without taking derivatives call build_vector_potential(lvol, iocons, 0, 1) @@ -127,7 +153,12 @@ subroutine lbpol(lvol, Bt00, ideriv, iocons) ! Now Btemn(1, 0, vvol) and Btemn(1, 1, vvol) contain Bte00(s=-1) and Bte00(s=1) for each volume vvol. - RETURN(lbpol) + +9999 continue + cput = MPI_WTIME() + Tlbpol = Tlbpol + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/lforce.f90 b/src/lforce.F90 similarity index 89% rename from src/lforce.f90 rename to src/lforce.F90 index 1d9fd715..1d1098b8 100644 --- a/src/lforce.f90 +++ b/src/lforce.F90 @@ -142,7 +142,7 @@ !> @param MMl !> @param[in] iflag subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, iflag ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two @@ -172,27 +172,67 @@ subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, ifl !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, iocons, ideriv, Ntz, iflag - REAL :: dAt(1:Ntz, -1:2), dAz(1:Ntz, -1:2), XX(1:Ntz), YY(1:Ntz), dRR(1:Ntz,-1:1), dZZ(1:Ntz,-1:1), DDl, MMl +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, iocons, ideriv, Ntz, iflag + real(wp) :: dAt(1:Ntz, -1:2), dAz(1:Ntz, -1:2), XX(1:Ntz), YY(1:Ntz), dRR(1:Ntz,-1:1), dZZ(1:Ntz,-1:1), DDl, MMl - REAL :: IIl(1:Ntz), length(1:Ntz), dLL(1:Ntz) - INTEGER :: Lcurvature, ii, jj, kk, ll, ifail, ivol, lnn, mi, id!, oicons - REAL :: dBB(1:Ntz, -1:2), lss, mfactor + real(wp) :: IIl(1:Ntz), length(1:Ntz), dLL(1:Ntz) + integer :: Lcurvature, ii, jj, kk, ll, ifail, ivol, lnn, mi, id!, oicons + real(wp) :: dBB(1:Ntz, -1:2), lss, mfactor - REAL :: dAs(1:Ntz)!, dRdt(-1:1,0:1), dZdt(-1:1,0:1) - REAL :: lgvuij(1:Ntz,1:3,1:3) ! local workspace; 13 Sep 13; + real(wp) :: dAs(1:Ntz)!, dRdt(-1:1,0:1), dZdt(-1:1,0:1) + real(wp) :: lgvuij(1:Ntz,1:3,1:3) ! local workspace; 13 Sep 13; + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(lforce) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( lforce, lvol.lt.1 .or. lvol.gt.Mvol, illegal lvol ) - FATAL( lforce, lvol.eq.1 .and. iocons.eq.0, illegal combination ) - FATAL( lforce, lvol.eq.Mvol .and. iocons.eq.1, illegal combination ) - FATAL( lforce, iflag.lt.0 .or. iflag.gt.1, illegal iflag ) + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("lforce : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; illegal lvol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "lforce : lvol.lt.1 .or. lvol.gt.Mvol : illegal lvol ;" + endif + + + if( lvol.eq.1 .and. iocons.eq.0 ) then + write(6,'("lforce : fatal : myid=",i3," ; lvol.eq.1 .and. iocons.eq.0 ; illegal combination ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "lforce : lvol.eq.1 .and. iocons.eq.0 : illegal combination ;" + endif + + + if( lvol.eq.Mvol .and. iocons.eq.1 ) then + write(6,'("lforce : fatal : myid=",i3," ; lvol.eq.Mvol .and. iocons.eq.1 ; illegal combination ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "lforce : lvol.eq.Mvol .and. iocons.eq.1 : illegal combination ;" + endif + + + if( iflag.lt.0 .or. iflag.gt.1 ) then + write(6,'("lforce : fatal : myid=",i3," ; iflag.lt.0 .or. iflag.gt.1 ; illegal iflag ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "lforce : iflag.lt.0 .or. iflag.gt.1 : illegal iflag ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -208,7 +248,12 @@ subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, ifl Lcurvature = 1 - WCALL( lforce, coords, ( lvol, lss, Lcurvature, Ntz, mn ) ) ! get coordinates and derivatives wrt Rj, Zj, at specific radial location; + + cput = MPI_WTIME() + Tlforce = Tlforce + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! get coordinates and derivatives wrt Rj, Zj, at specific radial location; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -252,7 +297,12 @@ subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, ifl if( ideriv.eq.-1 ) then ! Get coordinate metrics and their derivatives wrt Rj, Zj on interface; lss = two * iocons - one ; Lcurvature = 4 - WCALL( lforce, coords, ( lvol, lss, Lcurvature, Ntz, mn ) ) + + cput = MPI_WTIME() + Tlforce = Tlforce + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + dBB(1:Ntz,id) = dBB(1:Ntz, id) + & half * ( dAz(1:Ntz, 0)*dAz(1:Ntz, 0)*guvij(1:Ntz,2,2,1) & @@ -342,7 +392,13 @@ subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, ifl ! ijreal(1:Ntz) contains the pressure + magnetic energy term; #ifdef DEBUG - FATAL( lforce, iocons.lt.0 .or. iocons.gt.2, error ) + + if( iocons.lt.0 .or. iocons.gt.2 ) then + write(6,'("lforce : fatal : myid=",i3," ; iocons.lt.0 .or. iocons.gt.2 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "lforce : iocons.lt.0 .or. iocons.gt.2 : error ;" + endif + #endif ;ifail = 0 @@ -367,7 +423,12 @@ subroutine lforce( lvol, iocons, ideriv, Ntz, dBB, XX, YY, length, DDl, MMl, ifl !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(lforce) + +9999 continue + cput = MPI_WTIME() + Tlforce = Tlforce + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/ma00aa.f90 b/src/ma00aa.F90 similarity index 90% rename from src/ma00aa.f90 rename to src/ma00aa.F90 index cb9b5390..25ae8cf4 100644 --- a/src/ma00aa.f90 +++ b/src/ma00aa.F90 @@ -65,7 +65,7 @@ !> @param[in] lvol index of nested volume !> @param[in] lrad order of Chebychev polynomials subroutine ma00aa( lquad, mn, lvol, lrad ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi, pi2 @@ -102,34 +102,56 @@ subroutine ma00aa( lquad, mn, lvol, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lquad, mn, lvol, lrad +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lquad, mn, lvol, lrad + + integer :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele - INTEGER :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele + integer :: kk, kd, kka, kks, kda, kds, Lcurvature, ideriv - INTEGER :: kk, kd, kka, kks, kda, kds, Lcurvature, ideriv + real(wp) :: lss, jthweight, fee, feo, foe, foo, Tl, Dl, Tp, Dp, TlTp, TlDp, DlTp, DlDp, ikda, ikds, imn2, ilrad, lssm - REAL :: lss, jthweight, fee, feo, foe, foo, Tl, Dl, Tp, Dp, TlTp, TlDp, DlTp, DlDp, ikda, ikds, imn2, ilrad, lssm + real(wp) :: foocc, foocs, foosc, fooss + real(wp) :: fsscc, fsscs, fsssc, fssss + real(wp) :: fstcc, fstcs, fstsc, fstss + real(wp) :: fszcc, fszcs, fszsc, fszss + real(wp) :: fttcc, fttcs, fttsc, fttss + real(wp) :: ftzcc, ftzcs, ftzsc, ftzss + real(wp) :: fzzcc, fzzcs, fzzsc, fzzss - REAL :: foocc, foocs, foosc, fooss - REAL :: fsscc, fsscs, fsssc, fssss - REAL :: fstcc, fstcs, fstsc, fstss - REAL :: fszcc, fszcs, fszsc, fszss - REAL :: fttcc, fttcs, fttsc, fttss - REAL :: ftzcc, ftzcs, ftzsc, ftzss - REAL :: fzzcc, fzzcs, fzzsc, fzzss + real(wp) :: sbar + real(wp), allocatable :: basis(:,:,:,:) - REAL :: sbar - REAL, allocatable :: basis(:,:,:,:) - BEGIN( ma00aa ) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( ma00aa, lvol.lt.1 .or. lvol.gt.Mvol, illegal volume label ) + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("ma00aa : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; illegal volume label ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma00aa : lvol.lt.1 .or. lvol.gt.Mvol : illegal volume label ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -177,16 +199,29 @@ subroutine ma00aa( lquad, mn, lvol, lrad ) DDzzss = zero endif !NOTstellsym - SALLOCATE(basis, (0:lrad,0:mpol,0:1,lquad), zero) + + allocate( basis(0:lrad,0:mpol,0:1,lquad), stat=astat ) + basis(0:lrad,0:mpol,0:1,lquad) = zero + if( dBdX%L ) then ; Lcurvature = 3 ; ideriv = 1 else ; Lcurvature = 1 ; ideriv = 0 endif if (.not. Lsavedguvij) then - WCALL( ma00aa, compute_guvijsave, (lquad, lvol, ideriv, Lcurvature) ) + + cput = MPI_WTIME() + Tma00aa = Tma00aa + ( cput-cpuo ) + call compute_guvijsave(lquad, lvol, ideriv, Lcurvature) + cpuo = MPI_WTIME() + endif - WCALL( ma00aa, metrix,( lquad, lvol ) ) ! compute metric elements; 16 Jan 13; + + cput = MPI_WTIME() + Tma00aa = Tma00aa + ( cput-cpuo ) + call metrix( lquad, lvol ) + cpuo = MPI_WTIME() + ! compute metric elements; 16 Jan 13; do jquad = 1, lquad lss = gaussianabscissae(jquad,lvol) ; jthweight = gaussianweight(jquad,lvol) @@ -342,7 +377,9 @@ subroutine ma00aa( lquad, mn, lvol, lrad ) !$OMP END PARALLEL DO !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - DALLOCATE(basis) + + deallocate(basis,stat=astat) + DToocc = DToocc * pi2pi2nfphalf TTssss = TTssss * pi2pi2nfphalf @@ -385,7 +422,12 @@ subroutine ma00aa( lquad, mn, lvol, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN( ma00aa ) + +9999 continue + cput = MPI_WTIME() + Tma00aa = Tma00aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/ma02aa.f90 b/src/ma02aa.F90 similarity index 79% rename from src/ma02aa.f90 rename to src/ma02aa.F90 index 4a175dc3..6e3db768 100644 --- a/src/ma02aa.f90 +++ b/src/ma02aa.F90 @@ -9,7 +9,7 @@ !> @param[in] lvol index of nested volume for which to run this !> @param[in] NN number of degrees of freedom in the (packed format) vector potential; subroutine ma02aa( lvol, NN ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, ten @@ -40,53 +40,75 @@ subroutine ma02aa( lvol, NN ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, NN +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, NN - INTEGER :: ideriv - REAL :: tol, dpsi(1:2), lastcpu - CHARACTER :: packorunpack + integer :: ideriv + real(wp) :: tol, dpsi(1:2), lastcpu + character :: packorunpack - INTEGER :: Nxdof, Ndof, Ldfjac, iflag, maxfev, mode, LRR, nfev, njev, nprint, ihybrj - REAL :: Xdof(1:2), Fdof(1:2), Ddof(1:2,1:2), oDdof(1:2,1:2) - REAL :: factor, diag(1:2), RR(1:2*(2+1)/2), QTF(1:2), wk(1:2,1:4) + integer :: Nxdof, Ndof, Ldfjac, iflag, maxfev, mode, LRR, nfev, njev, nprint, ihybrj + real(wp) :: Xdof(1:2), Fdof(1:2), Ddof(1:2,1:2), oDdof(1:2,1:2) + real(wp) :: factor, diag(1:2), RR(1:2*(2+1)/2), QTF(1:2), wk(1:2,1:4) - INTEGER :: irevcm + integer :: irevcm - INTEGER :: pNN + integer :: pNN - REAL :: xi(0:NN), Fxi(0:NN), xo(0:NN), Mxi(1:NN) + real(wp) :: xi(0:NN), Fxi(0:NN), xo(0:NN), Mxi(1:NN) external :: mp00ac #ifdef DEBUG - INTEGER :: ixx, jxx, jfinite ! computing finite-difference derivatives of \iota wrt \mu and \Delta \psi_p; - REAL :: lfdiff, dFdof(-1:1,-1:1,1:2) - REAL, allocatable :: dsolution(:,:,:,:) + integer :: ixx, jxx, jfinite ! computing finite-difference derivatives of \iota wrt \mu and \Delta \psi_p; + real(wp) :: lfdiff, dFdof(-1:1,-1:1,1:2) + real(wp), allocatable :: dsolution(:,:,:,:) #endif !required for hybrj1; - INTEGER :: ihybrj1, Ldfmuaa, lengthwork - REAL :: NewtonError - REAL , allocatable :: DFxi(:,:), work(:) + integer :: ihybrj1, Ldfmuaa, lengthwork + real(wp) :: NewtonError + real(wp) , allocatable :: DFxi(:,:), work(:) external :: df00ab ! required for E04UFF; - INTEGER :: NLinearConstraints, NNonLinearConstraints, LDA, LDCJ, LDR, iterations, LIWk, LRWk, ie04uff - INTEGER, allocatable :: Istate(:), NEEDC(:), IWk(:) - REAL :: objectivefunction - REAL , allocatable :: LinearConstraintMatrix(:,:), LowerBound(:), UpperBound(:) - REAL , allocatable :: constraintfunction(:), constraintgradient(:,:), multipliers(:), objectivegradient(:), RS(:,:), RWk(:) - CHARACTER :: optionalparameter*33 + integer :: NLinearConstraints, NNonLinearConstraints, LDA, LDCJ, LDR, iterations, LIWk, LRWk, ie04uff + integer, allocatable :: Istate(:), NEEDC(:), IWk(:) + real(wp) :: objectivefunction + real(wp) , allocatable :: LinearConstraintMatrix(:,:), LowerBound(:), UpperBound(:) + real(wp) , allocatable :: constraintfunction(:), constraintgradient(:,:), multipliers(:), objectivegradient(:), RS(:,:), RWk(:) + character :: optionalparameter*33 + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(ma02aa) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( ma02aa, lvol.lt.1 .or. lvol.gt.Mvol, illegal lvol ) + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("ma02aa : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; illegal lvol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma02aa : lvol.lt.1 .or. lvol.gt.Mvol : illegal lvol ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -102,7 +124,7 @@ subroutine ma02aa( lvol, NN ) !> if( LBsequad ) then ! sequential quadratic programming (SQP); construct minimum energy with constrained helicity; - lastcpu = GETTIME + lastcpu = MPI_WTIME() NLinearConstraints = 0 ! no linear constraints; @@ -114,10 +136,19 @@ subroutine ma02aa( lvol, NN ) LDR = NN - SALLOCATE( LinearConstraintMatrix, (1:LDA,1:1), zero ) ! linear constraint matrix; - SALLOCATE( LowerBound, (1:NN+NLinearConstraints+NNonLinearConstraints), zero ) ! lower bounds on variables, linear constraints and non-linear constraints; - SALLOCATE( UpperBound, (1:NN+NLinearConstraints+NNonLinearConstraints), zero ) ! upper bounds on variables, linear constraints and non-linear constraints; + allocate( LinearConstraintMatrix(1:LDA,1:1), stat=astat ) + LinearConstraintMatrix(1:LDA,1:1) = zero + ! linear constraint matrix; + + + allocate( LowerBound(1:NN+NLinearConstraints+NNonLinearConstraints), stat=astat ) + LowerBound(1:NN+NLinearConstraints+NNonLinearConstraints) = zero + ! lower bounds on variables, linear constraints and non-linear constraints; + + allocate( UpperBound(1:NN+NLinearConstraints+NNonLinearConstraints), stat=astat ) + UpperBound(1:NN+NLinearConstraints+NNonLinearConstraints) = zero + ! upper bounds on variables, linear constraints and non-linear constraints; LowerBound( 1 : NN ) = -1.0E+21 ! variable constraints; no constraint; UpperBound( 1 : NN ) = +1.0E+21 ! @@ -128,32 +159,64 @@ subroutine ma02aa( lvol, NN ) iterations = 0 ! iteration counter; - SALLOCATE( Istate, (1:NN+NLinearConstraints+NNonLinearConstraints), 0 ) - SALLOCATE( constraintfunction, (1:NNonLinearConstraints), zero ) ! constraint functions; + allocate( Istate(1:NN+NLinearConstraints+NNonLinearConstraints), stat=astat ) + Istate(1:NN+NLinearConstraints+NNonLinearConstraints) = 0 + - SALLOCATE( constraintgradient, (1:LDCJ,1:NN), zero ) ! derivatives of constraint functions; - SALLOCATE( multipliers, (1:NN+NLinearConstraints+NNonLinearConstraints), zero ) ! Lagrange multipliers ?; + allocate( constraintfunction(1:NNonLinearConstraints), stat=astat ) + constraintfunction(1:NNonLinearConstraints) = zero + ! constraint functions; + + + allocate( constraintgradient(1:LDCJ,1:NN), stat=astat ) + constraintgradient(1:LDCJ,1:NN) = zero + ! derivatives of constraint functions; + + + allocate( multipliers(1:NN+NLinearConstraints+NNonLinearConstraints), stat=astat ) + multipliers(1:NN+NLinearConstraints+NNonLinearConstraints) = zero + ! Lagrange multipliers ?; objectivefunction = zero ! objective function; - SALLOCATE( objectivegradient, (1:NN), zero ) ! derivatives of objective function; - SALLOCATE( RS, (1:LDR,1:NN), zero ) + allocate( objectivegradient(1:NN), stat=astat ) + objectivegradient(1:NN) = zero + ! derivatives of objective function; + + + allocate( RS(1:LDR,1:NN), stat=astat ) + RS(1:LDR,1:NN) = zero + ideriv = 0 ; dpsi(1:2) = (/ dtflux(lvol), dpflux(lvol) /) ! these are also used below; packorunpack = 'P' - CALL( ma02aa, packab, ( packorunpack, lvol, NN, xi(1:NN), ideriv ) ) - SALLOCATE( NEEDC, (1:NNonLinearConstraints), 0 ) + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call packab( packorunpack, lvol, NN, xi(1:NN), ideriv ) + cpuo = MPI_WTIME() + + + + allocate( NEEDC(1:NNonLinearConstraints), stat=astat ) + NEEDC(1:NNonLinearConstraints) = 0 + LIWk = 3*NN + NLinearConstraints + 2*NNonLinearConstraints ! workspace; - SALLOCATE( IWk, (1:LIWk), 0 ) ! workspace; + + allocate( IWk(1:LIWk), stat=astat ) + IWk(1:LIWk) = 0 + ! workspace; LRWk = 2*NN**2 + NN * NLinearConstraints + 2 * NN * NNonLinearConstraints + 21 * NN + 11 * NLinearConstraints + 22 * NNonLinearConstraints + 1 ! workspace; - SALLOCATE( RWk, (1:LRWk), zero ) ! workspace; + + allocate( RWk(1:LRWk), stat=astat ) + RWk(1:LRWk) = zero + ! workspace; irevcm = 0 ; ie04uff = 1 ! reverse communication loop control; ifail error flag; @@ -242,22 +305,51 @@ subroutine ma02aa( lvol, NN ) ! enddo ! end of do ! reverse communication loop; - DALLOCATE(RWk) - DALLOCATE(IWk) - DALLOCATE(NEEDC) - DALLOCATE(RS) - DALLOCATE(objectivegradient) - DALLOCATE(multipliers) - DALLOCATE(constraintgradient) - DALLOCATE(constraintfunction) - DALLOCATE(Istate) - DALLOCATE(LowerBound) - DALLOCATE(UpperBound) - DALLOCATE(LinearConstraintMatrix) + + deallocate(RWk,stat=astat) + + + deallocate(IWk,stat=astat) + + + deallocate(NEEDC,stat=astat) + + + deallocate(RS,stat=astat) + + + deallocate(objectivegradient,stat=astat) + + + deallocate(multipliers,stat=astat) + + + deallocate(constraintgradient,stat=astat) + + + deallocate(constraintfunction,stat=astat) + + + deallocate(Istate,stat=astat) + + + deallocate(LowerBound,stat=astat) + + + deallocate(UpperBound,stat=astat) + + + deallocate(LinearConstraintMatrix,stat=astat) + packorunpack = 'U' - CALL( ma02aa, packab ( packorunpack, lvol, NN, xi(1:NN), ideriv ) ) + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call packab ( packorunpack, lvol, NN, xi(1:NN), ideriv ) + cpuo = MPI_WTIME() + lBBintegral(lvol) = half * sum( xi(1:NN) * matmul( dMA(1:NN,1:NN), xi(1:NN) ) ) + sum( xi(1:NN) * MBpsi(1:NN) ) ! + psiMCpsi lABintegral(lvol) = half * sum( xi(1:NN) * matmul( dMD(1:NN,1:NN), xi(1:NN) ) ) ! + sum( xi(1:NN) * MEpsi(1:NN) ) ! + psiMFpsi @@ -276,10 +368,16 @@ subroutine ma02aa( lvol, NN ) if( LBnewton ) then - lastcpu = GETTIME + lastcpu = MPI_WTIME() + + + allocate( DFxi(0:NN,0:NN), stat=astat ) + DFxi(0:NN,0:NN) = zero + + + allocate( work(1:(1+NN)*(1+NN+13)/2), stat=astat ) + work(1:(1+NN)*(1+NN+13)/2) = zero - SALLOCATE(DFxi, (0:NN,0:NN), zero) - SALLOCATE(work, (1:(1+NN)*(1+NN+13)/2), zero) xi(0) = mu(lvol) ! initialize; helicity multiplier is treated as an independent degree-of-freedom; @@ -287,7 +385,12 @@ subroutine ma02aa( lvol, NN ) packorunpack = 'P' ! CALL( ma02aa, packab ( packorunpack, lvol, NN, xi(1:NN), dpsi(1:2), ideriv ) ) - CALL( ma02aa, packab ( packorunpack, lvol, NN, xi(1:NN), ideriv ) ) + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call packab ( packorunpack, lvol, NN, xi(1:NN), ideriv ) + cpuo = MPI_WTIME() + pNN = NN + 1 ; Ldfmuaa = pNN ; tol = mupftol ; lengthwork = pNN * ( pNN+13 ) / 2 @@ -306,9 +409,14 @@ subroutine ma02aa( lvol, NN ) packorunpack = 'U' ; ideriv = 0 - CALL( ma02aa, packab( packorunpack, lvol, NN, xi(1:NN), ideriv ) ) - cput = GETTIME + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call packab( packorunpack, lvol, NN, xi(1:NN), ideriv ) + cpuo = MPI_WTIME() + + + cput = MPI_WTIME() select case( ihybrj1 ) case( :-1 ) @@ -328,14 +436,31 @@ subroutine ma02aa( lvol, NN ) ; write(ounit,1020) cput-cpus, myid, lvol, ihybrj1, helicity(lvol), mu(lvol), dpflux(lvol), cput-lastcpu, NewtonError, "bad progress ; " endif case default - FATAL( ma02aa, .true., illegal ifail returned by hybrj1 ) + + if( .true. ) then + write(6,'("ma02aa : fatal : myid=",i3," ; .true. ; illegal ifail returned by hybrj1 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma02aa : .true. : illegal ifail returned by hybrj1 ;" + endif + end select #ifdef DEBUG xo(1:NN) = xi(1:NN) ! save original for comparison; packorunpack = 'P' ; ideriv = 0 - CALL( ma02aa, packab( packorunpack, lvol, NN, xi(1:NN), ideriv ) ) - FATAL( ma02aa, sum(abs(xi(1:Nfielddof(lvol))-xo(1:Nfielddof(lvol))))/Nfielddof(lvol).gt.vsmall, un/packing routine is incorrect ) + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call packab( packorunpack, lvol, NN, xi(1:NN), ideriv ) + cpuo = MPI_WTIME() + + + if( sum(abs(xi(1:Nfielddof(lvol))-xo(1:Nfielddof(lvol))))/Nfielddof(lvol).gt.vsmall ) then + write(6,'("ma02aa : fatal : myid=",i3," ; sum(abs(xi(1:Nfielddof(lvol))-xo(1:Nfielddof(lvol))))/Nfielddof(lvol).gt.vsmall ; un/packing routine is incorrect ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma02aa : sum(abs(xi(1:Nfielddof(lvol))-xo(1:Nfielddof(lvol))))/Nfielddof(lvol).gt.vsmall : un/packing routine is incorrect ;" + endif + #endif !if( NewtonError.lt.mupftol ) then @@ -347,8 +472,12 @@ subroutine ma02aa( lvol, NN ) solution(1:NN,0) = xi(1:NN) - DALLOCATE( DFxi ) - DALLOCATE( work ) + + deallocate(DFxi ,stat=astat) + + + deallocate(work ,stat=astat) + endif ! end of if( LBnewton ) then @@ -419,7 +548,7 @@ subroutine ma02aa( lvol, NN ) if( LBlinear ) then ! assume Beltrami field is parameterized by helicity multiplier (and poloidal flux); - lastcpu = GETTIME + lastcpu = MPI_WTIME() if( Lplasmaregion ) then @@ -461,7 +590,12 @@ subroutine ma02aa( lvol, NN ) ; ; Ndof = 1 ; Ldfjac = Ndof ; nfev = 1 ; njev = 0 ; ihybrj = 1; ! provide dummy values for consistency; - WCALL( ma02aa, mp00ac, ( Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) ) + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call mp00ac( Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) + cpuo = MPI_WTIME() + helicity(lvol) = lABintegral(lvol) ! this was computed in mp00ac; @@ -472,11 +606,22 @@ subroutine ma02aa( lvol, NN ) tol = mupftol ; LRR = Ndof * ( Ndof+1 ) / 2 ; mode = 0 ; diag(1:2) = zero ; factor = one ; maxfev = mupfits ; nprint = 0 - FATAL( ma02aa, Ndof.gt.2, illegal ) - WCALL( ma02aa, hybrj2, ( mp00ac, Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, tol, & + if( Ndof.gt.2 ) then + write(6,'("ma02aa : fatal : myid=",i3," ; Ndof.gt.2 ; illegal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma02aa : Ndof.gt.2 : illegal ;" + endif + + + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call hybrj2( mp00ac, Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, tol, & maxfev, diag(1:Ndof), mode, factor, nprint, ihybrj, nfev, njev, RR(1:LRR), LRR, QTF(1:Ndof), & - WK(1:Ndof,1), WK(1:Ndof,2), WK(1:Ndof,3), WK(1:Ndof,4) ) ) + WK(1:Ndof,1), WK(1:Ndof,2), WK(1:Ndof,3), WK(1:Ndof,4) ) + cpuo = MPI_WTIME() + if( Lplasmaregion ) then @@ -506,7 +651,12 @@ subroutine ma02aa( lvol, NN ) iflag = 2 ; Ldfjac = Ndof ! call mp00ac: tr00ab/curent to ensure the derivatives of B, transform, currents, wrt mu/dtflux & dpflux are calculated; - WCALL( ma02aa, mp00ac, ( Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) ) + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call mp00ac( Ndof, Xdof(1:Ndof), Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) + cpuo = MPI_WTIME() + endif ! end of if( Lconstraint.eq.1 .or. ( Lvacuumregion .and. Lconstraint.eq.0 ) ) ; @@ -515,7 +665,7 @@ subroutine ma02aa( lvol, NN ) end select ! end of select case( Nxdof ) ; - cput = GETTIME + cput = MPI_WTIME() select case(ihybrj) ! this screen output may not be correct for Lvacuumregion; case( 1 ) @@ -534,7 +684,13 @@ subroutine ma02aa( lvol, NN ) ; write(ounit,1040) cput-cpus, myid, lvol, ihybrj, helicity(lvol), mu(lvol), dpflux(lvol), cput-lastcpu, "bad progress ", Fdof(1:Ndof) case default ; write(ounit,1040) cput-cpus, myid, lvol, ihybrj, helicity(lvol), mu(lvol), dpflux(lvol), cput-lastcpu, "illegal ifail ", Fdof(1:Ndof) - FATAL( ma02aa, .true., illegal ifail returned by hybrj ) + + if( .true. ) then + write(6,'("ma02aa : fatal : myid=",i3," ; .true. ; illegal ifail returned by hybrj ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ma02aa : .true. : illegal ifail returned by hybrj ;" + endif + end select endif ! end of if( LBlinear ) then; @@ -567,7 +723,10 @@ subroutine ma02aa( lvol, NN ) if( Lconstraint.eq.1 .or. Lvacuumregion ) then ! only in this case are the derivatives calculated; - SALLOCATE( dsolution, (1:NN,0:2,-1:1,-1:1), zero ) ! packed vector potential; + + allocate( dsolution(1:NN,0:2,-1:1,-1:1), stat=astat ) + dsolution(1:NN,0:2,-1:1,-1:1) = zero + ! packed vector potential; if( Lplasmaregion ) then Xdof(1:2) = xoffset + (/ mu(lvol), dpflux(lvol) /) ! initial guess for degrees of freedom; offset from zero so that relative error is small; @@ -589,13 +748,18 @@ subroutine ma02aa( lvol, NN ) iflag = 2 ! iflag controls derivative calculation in mp00ac; analytic derivatives of rotational-transform are required; - CALL( ma02aa, mp00ac( Ndof, Xdof(1:Ndof), dFdof(ixx,jxx,1:Ndof), oDdof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) ) ! compute "exact" derivatives; + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call mp00ac( Ndof, Xdof(1:Ndof), dFdof(ixx,jxx,1:Ndof), oDdof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) + cpuo = MPI_WTIME() + ! compute "exact" derivatives; dsolution(1:NN,1,0,0) = solution(1:NN,1) ! packed vector potential; derivative wrt mu ; dsolution(1:NN,2,0,0) = solution(1:NN,2) ! packed vector potential; derivative wrt dpflux; jfinite = 0 - cput = GETTIME + cput = MPI_WTIME() write(ounit,2000) cput-cpus, myid, lvol, jfinite, "derivative", oDdof(1:Ldfjac,1:Ndof) do jfinite = -4,-2,+1 ; lfdiff = ten**jfinite @@ -615,7 +779,12 @@ subroutine ma02aa( lvol, NN ) iflag = 1 ! analytic derivatives of rotational-transform are not required; - CALL( ma02aa, mp00ac( Ndof, Xdof(1:Ndof), dFdof(ixx,jxx,1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) ) ! compute function values only; + + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + call mp00ac( Ndof, Xdof(1:Ndof), dFdof(ixx,jxx,1:Ndof), Ddof(1:Ldfjac,1:Ndof), Ldfjac, iflag ) + cpuo = MPI_WTIME() + ! compute function values only; dsolution(1:NN,0,ixx,jxx) = solution(1:NN,0) @@ -625,16 +794,18 @@ subroutine ma02aa( lvol, NN ) ; Ddof(1:Ndof,1) = ( dFdof( 1, 0, 1:Ndof) - dFdof(-1, 0, 1:Ndof) ) / lfdiff ! derivative wrt helicity multiplier ; if( Ndof.eq.2 ) Ddof(1:Ndof,2) = ( dFdof( 0, 1, 1:Ndof) - dFdof( 0,-1, 1:Ndof) ) / lfdiff ! derivative wrt enclosed poloidal flux; - cput = GETTIME + cput = MPI_WTIME() !write(ounit,2000) cput-cpus, myid, lvol, jfinite, " error ", Ddof(1:Ldfjac,1:Ndof) - oDdof(1:Ldfjac,1:Ndof) write(ounit,2000) cput-cpus, myid, lvol, jfinite, " estimate ", Ddof(1:Ldfjac,1:Ndof) enddo ! end of do jfinite; - cput = GETTIME + cput = MPI_WTIME() write(ounit,2000) cput-cpus - DALLOCATE(dsolution) + + deallocate(dsolution,stat=astat) + 2000 format("ma02aa : ":,f10.2," :":" myid=",i3," : lvol=",i3," ; jj=",i3," ; "a10" : DF=["es23.15" ,"es23.15" ,"es23.15" ,"es23.15" ] ;") @@ -646,7 +817,12 @@ subroutine ma02aa( lvol, NN ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(ma02aa) + +9999 continue + cput = MPI_WTIME() + Tma02aa = Tma02aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/macros b/src/macros deleted file mode 100644 index d534ec4d..00000000 --- a/src/macros +++ /dev/null @@ -1,1308 +0,0 @@ -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -! this file, ext_m.F90, has been constructed by macro expansion; see ext.f90 for source; - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -m4_changequote({,})m4_dnl ; can put comments here; -m4_changecom(!)m4_dnl ; can put comments here; -m4_define(INTEGER,integer)m4_dnl ; can put comments here; -m4_define(REAL,real(8))m4_dnl ; can put comments here; -!m4_define(CHARACTER,character)m4_dnl ; can put comments here; -m4_define(GETTIME,MPI_WTIME())m4_dnl ; can put comments here; -m4_define(MPISTART,{! macro expansion of mpistart; - - myid = 0 ; ncpu = 1 - call MPI_INIT( ierr ) - call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) - call MPI_COMM_SIZE( MPI_COMM_WORLD, ncpu, ierr ) - - ! macro expansion of mpistart; end;})m4_dnl can put comments here -m4_define(MPIFINALIZE,{! macro expansion of mpifinalize; - - call MPI_FINALIZE(ierr) - - ! macro expansion of mpifinalize; end;})m4_dnl can put comments here -m4_define(LOCALS,{! macro expansion of locals; - -#ifdef OPENMP - USE OMP_LIB -#endif - use mpi - implicit none - INTEGER :: ierr, astat, ios, nthreads, ithread - REAL :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; - - ! macro expansion of locals; end;})m4_dnl ; can put comments here; - -m4_define(GETTHREAD,{! macro expansion of getthread; - -#ifdef OPENMP - ithread = omp_get_thread_num() + 1 -#else - ithread = 1 -#endif - - ! macro expansion of getthread; end;})m4_dnl ; can put comments here; - -m4_define(SALLOCATE,{ ! macro expansion of sallocate = set allocate; -! allocate a variable of name _1 in the range _2 and set the value to _3 - -#ifdef DEBUG - if( allocated( $1 ) ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; $1 already allocated ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop 'macros : 0123456789 : myid= ; $1 already allocated ;' - endif - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; allocating $1 ;")') myid -#endif - - if( allocated( $1 ) ) deallocate( $1 ) - - allocate( $1$2, stat=astat ) - -#ifdef DEBUG - if( astat.ne.0 ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; error allocating $1 ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop 'error allocating $1' - endif - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; allocated $1 ;")') myid -#endif - - $1$2 = $3 - - ! macro expansion of sallocate; end;})m4_dnl ; can put comments here; -m4_define(NALLOCATE,{! macro expansion of nallocate; -! allocate a variable of name _1 in the range _2 - -#ifdef DEBUG - - if( allocated($1) ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; $1 already allocated ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop '$1 allocated' - endif - - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; allocating $1 ;")') myid - -#endif - - if( allocated( $1 ) ) deallocate( $1 ) - allocate($1$2,stat=astat) - -#ifdef DEBUG - - if( astat.ne.0 ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; error allocating $1 ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop 'error allocating $1' - endif - - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; allocated $1 ;")') myid - -#endif - - ! macro expansion of nallocate; end;})m4_dnl ; can put comments here; -m4_define(DALLOCATE,{! macro expansion of dallocate; -! deallocate _1 -#ifdef DEBUG - - if( .not.allocated($1) ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; $1 not allocated ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop '$1 allocated' - endif - - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; de-allocating $1 ;")') myid - -#endif - - deallocate($1,stat=astat) - -#ifdef DEBUG - - if( astat.ne.0 ) then - cput = MPI_WTIME() - write(6,'("macros : ",f10.2," : myid=",i3," ; error de-allocating $1 ;")') cput-cpus, myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop 'error allocating $1' - - endif - - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; de-allocated $1 ;")') myid - -#endif - - ! macro expansion of dallocate; end;})m4_dnl ; can put comments here; -m4_define(FATAL,{! macro expansion of fatal; -! called from within _1; check logical expression in _2 and exit with reason _3 if the expression _2 is true - if( $2 ) then - write(6,'("$1 : fatal : myid=",i3," ; $2 ; $3;")') myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "$1 : $2 : $3 ;" - endif - - ! macro expansion of fatal; end;})m4_dnl ; can put comments here; -m4_define(LlBCAST,{! macro expansion of llbcast; -! logical MPI broadcast of _1; length is _2; what is _3 ? - -#ifdef DEBUG - - if( $2.lt.0 ) then - write(6,'(" : ",10x," ; myid=",i3," ; error broadcasting $1 ;")') myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop - endif - - if( Wmacros ) write(6,'(" : ",10x," ; myid=",i3," ; lvol=",i3," ; broadcasting $1 ;")') myid, $3+1 - -#endif - - call MPI_BCAST($1,$2,MPI_LOGICAL,$3,MPI_COMM_SPEC,ierr) - -#ifdef DEBUG - - if( Wmacros ) write(6,'(" : ",10x," ; myid=",i3," ; lvol=",i3," ; broadcasted $1 ;")') myid, $3+1 - -#endif - - ! macro expansion of llbcast; end;})m4_dnl can put comments here -m4_define(IlBCAST,{! macro expansion of ilbcast; -! integer MPI broadcast of _1; length is _2; what is _3 ? - -#ifdef DEBUG - - if( $2.lt.0 ) then - write(6,'(" : "10x" ; myid=",i3," ; error broadcasting $1 ;")') myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop - endif - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasting $1 ;")') myid, $3 - -#endif - - call MPI_BCAST( $1, $2, MPI_INTEGER, $3, MPI_COMM_SPEC, ierr ) - -#ifdef DEBUG - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasted $1 ;")') myid, $3 - -#endif - - ! macro expansion of ilbcast; end;})m4_dnl can put comments here -m4_define(RlBCAST,{! macro expansion of rlbcast; -! real MPI broadcast of _1; length is _2; _3 is radial volume which is worked on - -#ifdef DEBUG - - if( $2.lt.0 ) then - write(6,'(" : "10x" ; myid=",i3," ; error broadcasting $1 ;")')myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop - endif - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasting $1 ;")') myid, $3+1 - -#endif - - call MPI_BCAST($1,$2,MPI_DOUBLE_PRECISION,$3,MPI_COMM_SPEC,ierr) - -#ifdef DEBUG - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasted $1 ;")') myid, $3+1 - -#endif - - ! macro expansion of rlbcast; end;})m4_dnl can put comments here -m4_define(ClBCAST,{! macro expansion of clbcast; -! character MPI broadcast of _1; length is _2; index is _3 - -#ifdef DEBUG - - if( $2.lt.0 ) then - write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; error broadcasting $1 ;")') myid, $3+1 - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop - endif - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasting $1 ;")') myid, $3+1 - -#endif - - call MPI_BCAST($1,$2,MPI_CHARACTER,$3,MPI_COMM_SPEC,ierr) - -#ifdef DEBUG - - if( Wmacros ) write(6,'(" : "10x" ; myid=",i3," ; lvol=",i3," ; broadcasted $1 ;")') myid, $3+1 - -#endif - - ! macro expansion of clbcast; end;})m4_dnl can put comments here -m4_define(CALL,{! macro expansion of call; -! call _2 with arguments _3 from within _1 - - cput = MPI_WTIME() - T$1 = T$1 + ( cput-cpuo ) - - call $2$3 - - !cpuo = zero - cpuo = MPI_WTIME() - - ! macro expansion of call; end;})m4_dnl ; can put comments here; -m4_define(WCALL,{! macro expansion of wcall; -! call _2 with arguments _3 from within _1 and write wall time needed for call to screen - - cput = MPI_WTIME() - T$1 = T$1 + ( cput-cpuo ) - -#ifdef DEBUG - if( W$1 ) then - cput = MPI_WTIME() - write(ounit,'("$1 : ",f10.2," : myid=",i3," ; calling $2 ;")') cput-cpus, myid - endif -#endif - - call $2$3 - -#ifdef DEBUG - if( W$1 ) then - cput = MPI_WTIME() - write(ounit,'("$1 : ",f10.2," : myid=",i3," ; called $2 ;")') cput-cpus, myid - endif -#endif - - !cpuo = zero - cpuo = MPI_WTIME() - - ! macro expansion of wcall; end;})m4_dnl ; can put comments here; -m4_define(HDEFGRP,{! macro expansion of hdefgrp; -! define a HDF5 group in _1 with name _2 and save reference into hid_t _3; _4 and _5 should be __FILE__ and __LINE__ - - call h5lexists_f($1, "$2", grp_exists, hdfier) - - if (.not.grp_exists) then - ! if group does not exist, create it - call h5gcreate_f($1, "$2", $3, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5gcreate_f from hdefgrp at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5gcreate_f from hdefgrp at $4:$5 ;" - endif - else - ! if the group already exists, open it - call h5gopen_f($1, "$2", $3, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5gopen_f from hdefgrp at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5gopen_f from hdefgrp at $4:$5 ;" - endif - endif - - ! macro expansion of hdefgrp; end;})m4_dnl ; can put comments here; -m4_define(HCLOSEGRP,{! macro expansion of hclosegrp; -! close a HDF5 group given in _1 - - call h5gclose_f($1, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5gclose_f from hclosegrp at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5gclose_f from hclosegrp at $4:$5 ;" - endif - - ! macro expansion of hclosegrp; end;})m4_dnl ; can put comments here; -m4_define(H5DESCR,{! macro expansion of h5descr; -! describe an already-open HDF5 object given in _1 at location _2 with text given in _3 and leave it open; _4 and _5 should be __FILE__ and __LINE__ -! also write a LaTeX comment on that output object -!latex \item{\verb+$2+} $3 - attr_data = "$3" - attrlen=len(attr_data) - - call h5screate_simple_f(arank, adims, aspace_id, hdfier) ! Create scalar data space for the attribute. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from h5descr at $4:$5 ;" - endif - - call h5tcopy_f(H5T_NATIVE_CHARACTER, atype_id, hdfier) ! Create datatype for the attribute. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tcopy_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tcopy_f from h5descr at $4:$5 ;" - endif - - call h5tset_size_f(atype_id, attrlen, hdfier) ! Create datatype for the attribute. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tset_size_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tset_size_f from h5descr at $4:$5 ;" - endif - - call h5acreate_f($1, aname, atype_id, aspace_id, attr_id, hdfier) ! create descriptive attribute - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5acreate_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5acreate_f from h5descr at $4:$5 ;" - endif - - call h5awrite_f(attr_id, atype_id, attr_data, adims, hdfier) ! Write the attribute data. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5awrite_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5awrite_f from h5descr at $4:$5 ;" - endif - - call h5aclose_f(attr_id, hdfier) ! Close the attribute. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5aclose_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5aclose_f from h5descr at $4:$5 ;" - endif - - call h5tclose_f(atype_id, hdfier) ! Close the attribute datatype. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tclose_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tclose_f from h5descr at $4:$5 ;" - endif - - call h5sclose_f(aspace_id, hdfier) ! Terminate access to the data space. - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5sclose_f from h5descr at $4:$5 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5sclose_f from h5descr at $4:$5 ;" - endif - - ! macro expansion of h5descr; end;})m4_dnl ; can put comments here; - m4_define(H5DESCR_CDSET,{! macro expansion of h5descr_cdset; -! describe an already-open HDF5 dataset identified by dset_id at location _1 with text given in _2 and close it at the end; _3 and _4 should be __FILE__ and __LINE__ -! also write a LaTeX comment on that output object -!latex \item{\verb+$1+} $2 - attr_data = "$2" - attrlen=len(attr_data) - - ! Create scalar data space for the attribute. - call h5screate_simple_f(arank, adims, aspace_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from h5descr_cdset at $3:$4 ;" - endif - - ! Create datatype for the attribute. - call h5tcopy_f(H5T_NATIVE_CHARACTER, atype_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tcopy_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tcopy_f from h5descr_cdset at $3:$4 ;" - endif - - call h5tset_size_f(atype_id, attrlen, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tset_size_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tset_size_f from h5descr_cdset at $3:$4 ;" - endif - - ! create descriptive attribute - call h5acreate_f(dset_id, aname, atype_id, aspace_id, attr_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5acreate_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5acreate_f from h5descr_cdset at $3:$4 ;" - endif - - ! Write the attribute data. - call h5awrite_f(attr_id, atype_id, attr_data, adims, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5awrite_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5awrite_f from h5descr_cdset at $3:$4 ;" - endif - - ! Close the attribute. - call h5aclose_f(attr_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5aclose_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5aclose_f from h5descr_cdset at $3:$4 ;" - endif - - ! Close the attribute datatype. - call h5tclose_f(atype_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5tclose_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5tclose_f from h5descr_cdset at $3:$4 ;" - endif - - ! Terminate access to the data space. - call h5sclose_f(aspace_id, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5sclose_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5sclose_f from h5descr_cdset at $3:$4 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from h5descr_cdset at $3:$4 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from h5descr_cdset at $3:$4 ;" - endif - - ! macro expansion of h5descr_cdset; end;})m4_dnl ; can put comments here; -m4_define(HWRITELV,{! macro expansion of hwritelv; -! write logical variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1; _5 and _6 should be __FILE__ and __LINE__ -! example: hwritelv( grpInputGlobal, 1, LreadGF, (/ LreadGF /) ) ! scalar -! example: hwritelv( grpInput, 5, success, success(1:5) ) ! rank-1 - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriteiv ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwritelv at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritelv at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwritelv at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, merge(1,0,$4) , onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwritelv at $5:$6 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwritelv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from hwritelv at $5:$6 ;" - endif - - endif - - ! macro expansion of hwritelv; end;})m4_dnl ; can put comments here; -m4_define(HWRITELV_LO,{! macro expansion of hwritelv_lo; -! write logical variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave dataset open for e.g. adding an attribute; _5 and _6 should be __FILE__ and __LINE__ -! example: hwritelv_lo( grpInputGlobal, 1, LreadGF, (/ LreadGF /) ) ! scalar -! example: hwritelv_lo( grpInput, 5, success, success(1:5) ) ! rank-1 -! and close it using h5descr_cdset( /input/global/LreadGF, reading flag for GF ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriteiv ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritelv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwritelv_lo at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritelv_lo at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritelv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritelv_lo at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritelv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwritelv_lo at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, merge(1,0,$4) , onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritelv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwritelv_lo at $5:$6 ;" - endif - - endif - - ! macro expansion of hwritelv_lo; end;})m4_dnl ; can put comments here; -m4_define(HWRITEIV,{! macro expansion of hwriteiv; -! write integer variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1; _5 and _6 should be __FILE__ and __LINE__ -! example: hwriteiv( grpInputPhysics, 1, Igeometry, (/ Igeometry /) ) ! scalar -! example: hwriteiv( grpInputPhysics, Mvol, Lrad, Lrad(1:Mvol) ) ! rank-1 - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriteiv ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriteiv at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriteiv at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriteiv at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, $4, onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriteiv at $5:$6 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriteiv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from hwriteiv at $5:$6 ;" - endif - - endif - - ! macro expansion of hwriteiv; end;})m4_dnl ; can put comments here; -m4_define(HWRITEIV_LO,{! macro expansion of hwriteiv_lo; -! write integer variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave the dataset open for e.g. adding and attribute -! example: hwriteiv( grpInputPhysics, 1, Igeometry, (/ Igeometry /) ) ! scalar -! example: hwriteiv( grpInputPhysics, Mvol, Lrad, Lrad(1:Mvol) ) ! rank-1 -! and close it using h5descr_cdset( /input/physics/Igeometry, geometry identifier ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriteiv_lo ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriteiv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriteiv_lo at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriteiv_lo at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriteiv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriteiv_lo at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_INTEGER, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriteiv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriteiv_lo at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_INTEGER, $4, onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriteiv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriteiv_lo at $5:$6 ;" - endif - - endif - - ! macro expansion of hwriteiv_lo; end;})m4_dnl ; can put comments here; -m4_define(HWRITERV,{! macro expansion of hwriterv; -! write real variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1; _5 and _6 should be __LINE__ and __FILE__ -! example: hwriterv( grpInputPhysics, 1, phiedge, (/ phiedge /) ) ! scalar -! example: hwriterv( grpInputPhysics, Mvol, tflux, tflux(1:Mvol) ) ! rank-1 - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriterv ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriterv at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterv at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriterv at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $4, onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriterv at $5:$6 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriterv at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from hwriterv at $5:$6 ;" - endif - - endif - - ! macro expansion of hwriterv; end;})m4_dnl ; can put comments here; -m4_define(HWRITERV_LO,{! macro expansion of hwriterv_lo; -! write real variable _4 (scalar (_2=1) or rank-1 (_2=length)) into a dataset named _3 into group _1 and leave it open, e.g. for adding an attribute; _5 and _6 should be __FILE__ and __LINE__ -! example: hwriterv( grpInputPhysics, 1, phiedge, (/ phiedge /) ) ! scalar -! example: hwriterv( grpInputPhysics, Mvol, tflux, tflux(1:Mvol) ) ! rank-1 -! and close it with h5descr_cdset( /input/physics/phiedge, total enclosed toroidal flux ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $3 ;")') myid -#endif - - rank = 1 ; onedims(1) = $2 - - if( $2.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriterv ; $3 : $2.le.0 at $5:$6 ;")') - - else - - call h5screate_simple_f( rank, onedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriterv_lo at $5:$6 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterv_lo at $5:$6 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$3", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $3 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterv_lo at $5:$6 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$3", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriterv_lo at $5:$6 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $4, onedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterv_lo at $5:$6 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriterv_lo at $5:$6 ;" - endif - - endif - - ! macro expansion of hwriterv_lo; end;})m4_dnl ; can put comments here; -m4_define(HWRITERA,{! macro expansion of hwritea; -! write real array _5 (_2 rows, _3 columns) into a dataset named _4 into group _1; _6 and _7 should be __FILE__ and __LINE__ -! example: hwritera( grpInputPhysics, (2*Ntor+1), (2*Mpol+1), Rbc, Rbc(-Ntor:Ntor,-Mpol:Mpol) ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $4 ;")') myid -#endif - - rank = 2 ; twodims(1:2) = (/ $2, $3 /) - - if( $2.le.0 .or. $3.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwritera ; $4 : $2.le.0 .or. $3.le.0 at $6:$7 ;")') - - else - - call h5screate_simple_f( rank, twodims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwritera at $6:$7 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$4", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritera at $6:$7 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$4", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwritera at $6:$7 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $5, twodims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwritera at $6:$7 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwritera at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from hwritera at $6:$7 ;" - endif - - endif - - ! macro expansion of hwritea; end;})m4_dnl ; can put comments here; -m4_define(HWRITERA_LO,{! macro expansion of hwritea_lo; -! write real array _5 (_2 rows, _3 columns) into a dataset named _4 into group _1 and leave it open, e.g. for adding an attribute; _6 and _7 should be __FILE__ and __LINE__ -! example: hwritera( grpInputPhysics, (2*Ntor+1), (2*Mpol+1), Rbc, Rbc(-Ntor:Ntor,-Mpol:Mpol) ) -! and close it then via h5descr_cdset( /input/physics/Rbc, boundary R cosine Fourier coefficients ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $4 ;")') myid -#endif - - rank = 2 ; twodims(1:2) = (/ $2, $3 /) - - if( $2.le.0 .or. $3.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwritera ; $4 : $2.le.0 .or. $3.le.0 at $6:$7 ;")') - - else - - call h5screate_simple_f( rank, twodims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwritera_lo at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwritera_lo at $6:$7 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera_lo at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritera_lo at $6:$7 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$4", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $4 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwritera_lo at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwritera_lo at $6:$7 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$4", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwritera_lo at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwritera_lo at $6:$7 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $5, twodims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwritera_lo at $6:$7 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwritera_lo at $6:$7 ;" - endif - - endif - - ! macro expansion of hwritea_lo; end;})m4_dnl ; can put comments here; -m4_define(HWRITERC,{! macro expansion of hwriterc; -! write real cube _6 (_2 rows, _3 columns, _4 pages) into a dataset named _5 into group _1; _7 and _8 should containt __FILE__ and __LINE__ -! example: hwriterc( grpOutput, (Mrad+1), 2, 2, TT, TT(0:Mrad,0:1,0:1) ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $5 ;")') myid -#endif - - rank = 3 ; threedims(1:3) = (/ $2, $3, $4 /) - - if( $2.le.0 .or. $3.le.0 .or. $4.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriterc ; $5 : $2.le.0 .or. $3.le.0 .or. $4.le.0 at $7:$8 ;")') - - else - - call h5screate_simple_f( rank, threedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriterc at $7:$8 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$5", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterc at $7:$8 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$5", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriterc at $7:$8 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $6, threedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriterc at $7:$8 ;" - endif - - call h5dclose_f(dset_id, hdfier) ! terminate dataset; - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dclose_f from hwriterc at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dclose_f from hwriterc at $7:$8 ;" - endif - - endif - - ! macro expansion of hwriterc; end;})m4_dnl ; can put comments here; -m4_define(HWRITERC_LO,{! macro expansion of hwriterc_lo; -! write real cube _6 (_2 rows, _3 columns, _4 pages) into a dataset named _5 into group _1 and leave open for e.g. adding an attribute; _7 and _8 should be __FILE__ and __LINE__ -! example: hwriterc( grpOutput, (Mrad+1), 2, 2, TT, TT(0:Mrad,0:1,0:1) ) -! and close it with h5descr_cdset( /output/TT, something abbreviated by TT ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'("macros : ", 10x ," : myid=",i3," ; writing $5 ;")') myid -#endif - - rank = 3 ; threedims(1:3) = (/ $2, $3, $4 /) - - if( $2.le.0 .or. $3.le.0 .or. $4.le.0 ) then - - write(6,'("sphdf5 : "10x" : error calling hwriterc ; $5 : $2.le.0 .or. $3.le.0 .or. $4.le.0 at $7:$8 ;")') - - else - - call h5screate_simple_f( rank, threedims, space_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5screate_simple_f from hwriterc_lo at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5screate_simple_f from hwriterc_lo at $7:$8 ;" - endif - - ! temporarily disable error printing to not confuse users - call h5eset_auto_f(0, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc_lo at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterc_lo at $7:$8 ;" - endif - - ! check if dataset can be opened - call h5dopen_f($1, "$5", dset_id, hdfier) - if (hdfier.lt.0) then - var_exists = .false. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 does not exist yet, creating it" - endif - else - var_exists = .true. - if (hdfDebug .and. myid.eq.0) then ; write(*,*) "dataset $5 exists already, opening it" - endif - endif - - ! re-establish previous state of error printing to be sensitive to "real" errors - call h5eset_auto_f(internalHdf5Msg, hdfier) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5eset_auto_f from hwriterc_lo at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5eset_auto_f from hwriterc_lo at $7:$8 ;" - endif - - ! if the dataset does not exist already, create it. Otherwise, it should be open already - if (.not.var_exists) then - call h5dcreate_f( $1, "$5", H5T_NATIVE_DOUBLE, space_id, dset_id, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dcreate_f from hwriterc_lo at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dcreate_f from hwriterc_lo at $7:$8 ;" - endif - endif - - call h5dwrite_f( dset_id, H5T_NATIVE_DOUBLE, $6, threedims, hdfier ) - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling h5dwrite_f from hwriterc_lo at $7:$8 ;")') - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling h5dwrite_f from hwriterc_lo at $7:$8 ;" - endif - - endif - - ! macro expansion of hwriterc_lo; end;})m4_dnl ; can put comments here; -m4_define(H5CALL,{! macro expansion of h5call; -! wrapper for HDF5 API calls which checks the hdfier error flag after each call -! and interrupts execution of the program right away if anything goes wrong -! call _2 with arguments _3 from within _1 and write wall time needed for call to screen; _4 and _5 should be __FILE__ and __LINE__ - - call $2$3 - if( hdfier.ne.0 ) then - write(6,'("sphdf5 : "10x" : error calling $2 in process ",i3," from h5call at $4:$5;")') myid - call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) - stop "sphdf5 : error calling $2 from h5call at $4:$5 ;" - endif - - ! macro expansion of h5call; end;})m4_dnl ; can put comments here; -m4_define(BEGIN,{! macro expansion of begin; -! comment the start of a subroutine to screen output - - !cpui = zero - cpui = MPI_WTIME() - cpuo = cpui -#ifdef OPENMP - nthreads = omp_get_max_threads() -#else - nthreads = 1 -#endif - -#ifdef DEBUG - - if( W$1 ) write(ounit,'("$1 : ",f10.2," : myid=",i3," ; start ;")') cpui-cpus, myid - -#endif - - ! macro expansion of begin; end;})m4_dnl ; can put comments here; -m4_define(LREGION,{! macro expansion of lregion; -! given annulus number _1, set Lcoordinatesingularity, Lplasmaregion and Lvacuumregion accordingly - - if( Igeometry.eq.1 .or. $1.gt.1 ) then ; Lcoordinatesingularity = .false. - else ; Lcoordinatesingularity = .true. - endif - - if( $1.le.Nvol ) then ; Lplasmaregion = .true. - else ; Lplasmaregion = .false. - endif - - Lvacuumregion = .not.Lplasmaregion - - ! macro expansion of lregion; end;})m4_dnl ; can put comments here; -m4_define(RETURN,{! macro expansion of return; -! comment the end of a subroutine to screen output - -9999 continue - - cput = MPI_WTIME() - T$1 = T$1 + ( cput-cpuo ) - -#ifdef DEBUG - - if( W$1 ) write(ounit,'("$1 : ",f10.2," : myid=",i3," ; finish ; time=",f10.2," ;")') cput-cpus, myid, cput-cpui - -#endif - - return - - ! macro expansion of return; end;})m4_dnl ; can put comments here; -m4_define(SUMTIME,{! macro expansion of sumtime; - -#ifdef DEBUG - if( Wmacros ) write(ounit,'(" : ", 10x ," : calling mpi_reduce : $1 ;")') -#endif - - call MPI_REDUCE(T$1, $1T, 1, MPI_DOUBLE_PRECISION, MPI_SUM, 0, MPI_COMM_SPEC, ierr ) - -#ifdef DEBUG - if( Wmacros ) write(ounit,'(" : ", 10x ," : called mpi_reduce : $1 ;")') -#endif - - ! macro expansion of sumtime; end;})m4_dnl ; can put comments here; -m4_define(PRTTIME,{! macro expansion of prttime; - - write(ounit,'("finish : ",f10.2," : time spent in $1 =",f10.2," ;")') cput-cpus, $1T ; Ttotal = Ttotal + $1T - - ! macro expansion of prttime; end;})m4_dnl ; can put comments here; - -m4_define(TMPOUT,{ !Temperarily output a message to help debugging - if( myid .eq. 0) write(ounit,*) "### DEBUG : $1 = ", $1 -})m4_dnl - - -m4_define(MPIOUT,{ !Temperarily output a message to help debugging - write(ounit,*) "*** DEBUG : myid = ", myid, " ; $1 = ", $1 -})m4_dnl diff --git a/src/manual.f90 b/src/manual.F90 similarity index 94% rename from src/manual.f90 rename to src/manual.F90 index 22cc8271..f8aef8d6 100644 --- a/src/manual.f90 +++ b/src/manual.F90 @@ -13,23 +13,23 @@ !latex \subsection{poloidal flux and rotational transform} -!latex Given the canonical integrable form, -!latex ${\bf A} = \psi \nabla \t - \chi(\psi) \nabla \z$, -!latex we can derive +!latex Given the canonical integrable form, +!latex ${\bf A} = \psi \nabla \t - \chi(\psi) \nabla \z$, +!latex we can derive !latex ${\bf B} = \nabla \psi \times \nabla \t +\nabla \zeta \times \nabla \psi \;\chi^\prime$. -!latex The poloidal flux is given by +!latex The poloidal flux is given by !latex \begin{eqnarray} \Psi_p = \int \!\! \int {\bf B} \cdot {\bf e}_\zeta \times{\bf e}_\psi \; d\zeta d\psi = 2 \pi \int \chi^\prime d\psi. !latex \end{eqnarray} -!latex The rotational-transform is +!latex The rotational-transform is !latex \begin{eqnarray} \iotabar = \frac{ {\bf B} \cdot \nabla \t}{ {\bf B} \cdot \nabla \zeta} = \chi^\prime. !latex \end{eqnarray} !latex The rotational-transform is the same sign as the poloidal flux. -!latex +!latex !latex The SPEC representation for the magnetic vector potential is !latex \begin{eqnarray} {\bf A} = A_\t \nabla \t + A_\z \nabla \z, !latex \end{eqnarray} !latex where we can see that $A_\z = - \chi$. -!latex The poloidal flux is +!latex The poloidal flux is !latex \begin{eqnarray} \int {\bf B}\cdot d{\bf s} = \oint A_\z d\z. !latex \end{eqnarray} !latex It would seem that the rotational-transform has opposite sign to $A_\z$. @@ -37,15 +37,15 @@ !latex \subsection{Outline} -!latex This document is intended to organise the different potentially valuable improvements to the SPEC code, which could make it more robust, faster, and increase its capabilities. +!latex This document is intended to organise the different potentially valuable improvements to the SPEC code, which could make it more robust, faster, and increase its capabilities. !latex The document is divided in two categories. -!latex In \Sec{NumericalImprovements}, Numerical Improvements: independent improvements that are of numerical importance but have no added physics value \emph{per se}, although they may allow new or better physics investigations. +!latex In \Sec{NumericalImprovements}, Numerical Improvements: independent improvements that are of numerical importance but have no added physics value \emph{per se}, although they may allow new or better physics investigations. !latex In \Sec{PhysicsApplications}, research topics that could be addressed with the code, either in its present form or after the completion of one or more topics listed in \Sec{NumericalImprovements}. !latex \subsection{Numerical Improvements} \label{sec:NumericalImprovements} !latex \subsubsection{Downloading \SPEC to \type{Git}} -!latex This should be straight forward and easy. +!latex This should be straight forward and easy. !latex SRH will work on this. Estimated date of completion 06/2017. !latex \subsubsection{Compile code with \type{GCC} for error checking} \label{sec:recompile} @@ -75,18 +75,18 @@ !latex Potential speed improvement is considerable. !latex \subsubsection{Exploit symmetry of ``local'' Beltrami matrices} \label{sec:beltrami} -!latex This is easy. Take a look at \link{matrix}, which constructs the Beltrami matrices, and \link{mp00ac}, which performs the inversion. +!latex This is easy. Take a look at \link{matrix}, which constructs the Beltrami matrices, and \link{mp00ac}, which performs the inversion. !latex Potential speed improvement is considerable. -!latex \subsubsection{Exploit block tri-diagonal structure of ``global'' linearized force balance matrix} +!latex \subsubsection{Exploit block tri-diagonal structure of ``global'' linearized force balance matrix} !latex This requires an efficient subroutine. !latex SRH believes that Hirshman constructed such a routine !latex [\paper{S.P. Hirshman {\em et al.}}{S.P. Hirshman, K.S. Purumalla {\em et al.}}{10.1016/j.jcp.2010.04.049}{J. Comput. Phys.}{229}{6392}{2010}]. !latex The potential speed improvement is tremendous. !latex See \link{newton} for where the tri-diagonal, linearized force-balance matrix is inverted. -!latex \subsubsection{Enforce Helicity constraint} \label{sec:L2} -!latex This will allow investigation of different, arguably more-physical classes of equilibria. +!latex \subsubsection{Enforce Helicity constraint} \label{sec:L2} +!latex This will allow investigation of different, arguably more-physical classes of equilibria. !latex See \link{ma02aa} !latex \subsubsection{Establish test-cases} \label{sec:testcase} @@ -106,7 +106,7 @@ !latex \subsubsection{Interpret eigenvectors and eigenvalues of Hessian} \label{sec:stability} !latex This is already completed: see \link{hesian}. !latex For toroidal geometry there is a complication; namely that the hessian matrix includes the derivatives of the spectral constraints. -!latex For Cartesian geometry, it is ready to go. +!latex For Cartesian geometry, it is ready to go. !latex SRH will begin writing a paper on the stability of slab MRxMHD equilibria. !latex \subsection{Physics Applications} \label{sec:PhysicsApplications} @@ -119,7 +119,7 @@ !latex requires: \Sec{freeb} !latex \subsubsection{Evaluate stability of MRxMHD equilibria} perhaps starting from simplest system (slab tearing). !latex requires: \Sec{stability} - + !latex \subsection{Revision of coordinate singularity: axisymmetric; polar coordinates;} !latex \begin{enumerate} @@ -147,7 +147,7 @@ !latex \be A_x & = & \sum_m r^m [ a_{m,0} + a_{m,1} \; r^2 + a_{m,2} \; r^4 + \dots ] \sin(m\t), \\ !latex A_y & = & \sum_m r^m [ b_{m,0} + b_{m,1} \; r^2 + b_{m,2} \; r^4 + \dots ] \cos(m\t), \\ !latex A_z & = & \sum_m r^m [ c_{m,0} + c_{m,1} \; r^2 + c_{m,2} \; r^4 + \dots ] \cos(m\t), \label{eq:regularAz} \\ -!latex g & = & \sum_m r^m [ g_{m,0} + g_{m,1} \; r^2 + g_{m,2} \; r^4 + \dots ] \sin(m\t), +!latex g & = & \sum_m r^m [ g_{m,0} + g_{m,1} \; r^2 + g_{m,2} \; r^4 + \dots ] \sin(m\t), !latex \ee !latex where attention is restricted to stellarator symmetric geometry, but similar expressions hold for the non-stellarator symmetric terms. !latex \item Collecting these expressions, the vector potential can be expressed @@ -193,7 +193,7 @@ !latex \ee !latex where the $f_{m,n}(\rho)$ and $g_{m,n}(\rho)$ are arbitrary polynomials in $\rho$. !latex \item Additional gauge freedom can be exploited: including an additional gauge term $\nabla h$ where $h$ only depends on $\z$, e.g. -!latex \be h(\z) = h_{0,0} \, \z + \sum h_{0,n} \sin( - n\z),\ee +!latex \be h(\z) = h_{0,0} \, \z + \sum h_{0,n} \sin( - n\z),\ee !latex does not change the magnetic field and does not change any of the above discussion. !latex \item The representation for the $A_{\t,m,n}$ does not change, but we must clarify that \Eqn{Azmn} holds for only the $m\ne0$ harmonics: !latex \be A_{\z,m,n} & = & r^{m} \;\;\;\;g_{m,n}(\rho), \;\;\;\mbox{\rm for $m \ne 0$}. @@ -209,7 +209,7 @@ !latex \item To simplify the algorithmic implementation of these conditions, !latex we shall introduce a `regularization' factor, $\rho^{m/2} = r^m$. !latex \item Note that the representation for $A_{\t,m,n}$ given in \Eqn{Atmn}, -!latex with an arbitrary polynomial $f_{m,n}(\rho) = f_{m,n,0} + f_{m,n,1}\rho + f_{m,n,2}\rho^2 + \dots$, +!latex with an arbitrary polynomial $f_{m,n}(\rho) = f_{m,n,0} + f_{m,n,1}\rho + f_{m,n,2}\rho^2 + \dots$, !latex is equivalent to $A_{\t,m,n} = \rho^{m/2} \alpha_{m,n}(\rho)$ where $\alpha_{m,n}(\rho)$ is an arbitrary polynomial !latex with the constraint $\alpha_{m,n}(0)=0$. !latex \item We can write the vector potential as @@ -247,12 +247,12 @@ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! subroutine manual - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - + use constants, only : zero - use numerical, only : + use numerical, only : use fileunits, only : ounit @@ -261,20 +261,41 @@ subroutine manual use cputiming, only : Tmanual use allglobal, only : myid, cpus - + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - LOCALS - BEGIN(manual) + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! ! this "routine" is purely for documentation; 08 Feb 16; - + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(manual) + +9999 continue + cput = MPI_WTIME() + Tmanual = Tmanual + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/matrix.f90 b/src/matrix.F90 similarity index 95% rename from src/matrix.f90 rename to src/matrix.F90 index 73466c59..e0749f7f 100644 --- a/src/matrix.f90 +++ b/src/matrix.F90 @@ -344,7 +344,7 @@ !> @param[in] mn !> @param[in] lrad subroutine matrix( lvol, mn, lrad ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two @@ -377,37 +377,77 @@ subroutine matrix( lvol, mn, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER, intent(in) :: lvol, mn, lrad + integer, intent(in) :: lvol, mn, lrad !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER :: NN, ii, jj, ll, kk, pp, ll1, pp1, mi, ni, mj, nj, mimj, minj, nimj, ninj, mjmi, mjni, njmi, njni, id, jd + integer :: NN, ii, jj, ll, kk, pp, ll1, pp1, mi, ni, mj, nj, mimj, minj, nimj, ninj, mjmi, mjni, njmi, njni, id, jd - REAL :: Wtete, Wteto, Wtote, Wtoto - REAL :: Wteze, Wtezo, Wtoze, Wtozo - REAL :: Wzete, Wzeto, Wzote, Wzoto - REAL :: Wzeze, Wzezo, Wzoze, Wzozo + real(wp) :: Wtete, Wteto, Wtote, Wtoto + real(wp) :: Wteze, Wtezo, Wtoze, Wtozo + real(wp) :: Wzete, Wzeto, Wzote, Wzoto + real(wp) :: Wzeze, Wzezo, Wzoze, Wzozo - REAL :: Htete, Hteto, Htote, Htoto - REAL :: Hteze, Htezo, Htoze, Htozo - REAL :: Hzete, Hzeto, Hzote, Hzoto - REAL :: Hzeze, Hzezo, Hzoze, Hzozo + real(wp) :: Htete, Hteto, Htote, Htoto + real(wp) :: Hteze, Htezo, Htoze, Htozo + real(wp) :: Hzete, Hzeto, Hzote, Hzoto + real(wp) :: Hzeze, Hzezo, Hzoze, Hzozo - REAL,allocatable :: TTdata(:,:,:), TTMdata(:,:) ! queues to construct sparse matrices + real(wp),allocatable :: TTdata(:,:,:), TTMdata(:,:) ! queues to construct sparse matrices + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(matrix) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( matrix, .not.allocated(dMA), error ) - FATAL( matrix, .not.allocated(dMD), error ) - FATAL( matrix, .not.allocated(dMB), error ) - FATAL( matrix, .not.allocated(dMG), error ) + + if( .not.allocated(dMA) ) then + write(6,'("matrix : fatal : myid=",i3," ; .not.allocated(dMA) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "matrix : .not.allocated(dMA) : error ;" + endif + + + if( .not.allocated(dMD) ) then + write(6,'("matrix : fatal : myid=",i3," ; .not.allocated(dMD) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "matrix : .not.allocated(dMD) : error ;" + endif + + + if( .not.allocated(dMB) ) then + write(6,'("matrix : fatal : myid=",i3," ; .not.allocated(dMB) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "matrix : .not.allocated(dMB) : error ;" + endif + + + if( .not.allocated(dMG) ) then + write(6,'("matrix : fatal : myid=",i3," ; .not.allocated(dMG) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "matrix : .not.allocated(dMG) : error ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -417,8 +457,14 @@ subroutine matrix( lvol, mn, lrad ) dMA(0:NN,0:NN) = zero dMD(0:NN,0:NN) = zero - SALLOCATE( TTdata, (0:lrad, 0:mpol, 0:1), zero) - SALLOCATE( TTMdata, (0:lrad, 0:mpol), zero) + + allocate( TTdata(0:lrad, 0:mpol, 0:1), stat=astat ) + TTdata(0:lrad, 0:mpol, 0:1) = zero + + + allocate( TTMdata(0:lrad, 0:mpol), stat=astat ) + TTMdata(0:lrad, 0:mpol) = zero + ! fill in Zernike/Chebyshev polynomials depending on Lcooridnatesingularity if (Lcoordinatesingularity) then @@ -628,10 +674,19 @@ subroutine matrix( lvol, mn, lrad ) endif ! end of if( YESstellsym ) ; ! call subroutine matrixBG to construct dMB and dMG - WCALL( matrix, matrixBG, ( lvol, mn, lrad ) ) - DALLOCATE( TTdata ) - DALLOCATE( TTMdata ) + cput = MPI_WTIME() + Tmatrix = Tmatrix + ( cput-cpuo ) + call matrixBG( lvol, mn, lrad ) + cpuo = MPI_WTIME() + + + + deallocate(TTdata ,stat=astat) + + + deallocate(TTMdata ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -663,7 +718,12 @@ subroutine matrix( lvol, mn, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(matrix) + +9999 continue + cput = MPI_WTIME() + Tmatrix = Tmatrix + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -672,6 +732,7 @@ end subroutine matrix !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! subroutine matrixBG( lvol, mn, lrad ) + use mod_kinds, only: wp => dp ! only compute the dMB and dMG matrix for matrix-free mode use constants, only : zero, one use allglobal, only : NAdof, im, in,& @@ -679,9 +740,9 @@ subroutine matrixBG( lvol, mn, lrad ) iVnc, iVns, iBnc, iBns, & Lme, Lmf, Lmg, Lmh implicit none - INTEGER, intent(in) :: lvol, mn, lrad + integer, intent(in) :: lvol, mn, lrad - INTEGER :: NN, ii, id, mi, ni + integer :: NN, ii, id, mi, ni NN = NAdof(lvol) ! shorthand; diff --git a/src/memory.F90 b/src/memory.F90 new file mode 100644 index 00000000..2f5425a1 --- /dev/null +++ b/src/memory.F90 @@ -0,0 +1,637 @@ +!> \file +!> \brief memory management module + +!> \brief allocate Beltrami matrices +!> +!> @param vvol +!> @param LcomputeDerivatives +subroutine allocate_Beltrami_matrices(vvol, LcomputeDerivatives) + use mod_kinds, only: wp => dp + use fileunits + + use inputlist, only: Wmemory, Wmacros + + use allglobal + + use cputiming + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: vvol + LOGICAL, intent(in) :: LcomputeDerivatives + integer :: NN + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + NN = NAdof(vvol) ! shorthand; + + if (NOTMatrixFree .or. LcomputeDerivatives) then + + allocate( dMA(0:NN,0:NN), stat=astat ) + dMA(0:NN,0:NN) = zero + ! required for both plasma region and vacuum region; + + allocate( dMD(0:NN,0:NN), stat=astat ) + dMD(0:NN,0:NN) = zero + + else + + allocate( Adotx(0:NN), stat=astat ) + Adotx(0:NN) = zero + + + allocate( Ddotx(0:NN), stat=astat ) + Ddotx(0:NN) = zero + + endif + + ! we will need the rest even with or without matrix-free + + allocate( dMB(0:NN,0: 2), stat=astat ) + dMB(0:NN,0: 2) = zero + + + allocate( dMG(0:NN ), stat=astat ) + dMG(0:NN ) = zero + + + + allocate( solution(1:NN,-1:2), stat=astat ) + solution(1:NN,-1:2) = zero + ! this will contain the vector potential from the linear solver and its derivatives; + + + allocate( MBpsi(1:NN), stat=astat ) + MBpsi(1:NN) = zero + + + if (LILUprecond) then + + allocate( dMAS(1:NdMASmax(vvol)), stat=astat ) + dMAS(1:NdMASmax(vvol)) = zero + + + allocate( dMDS(1:NdMASmax(vvol)), stat=astat ) + dMDS(1:NdMASmax(vvol)) = zero + + + allocate( idMAS(1:NN+1), stat=astat ) + idMAS(1:NN+1) = 0 + + + allocate( jdMAS(1:NdMASmax(vvol)), stat=astat ) + jdMAS(1:NdMASmax(vvol)) = 0 + + endif ! if we use GMRES and ILU preconditioner + + +9999 continue + cput = MPI_WTIME() + Tmemory = Tmemory + ( cput-cpuo ) + return + + +end subroutine allocate_Beltrami_matrices + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!> \brief deallocate Beltrami matrices +!> +!> @param LcomputeDerivatives +subroutine deallocate_Beltrami_matrices(LcomputeDerivatives) + use mod_kinds, only: wp => dp + use fileunits + + use inputlist, only: Wmemory, Wmacros + + use allglobal + + use cputiming + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + LOGICAL, intent(in) :: LcomputeDerivatives + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (NOTMatrixFree .or. LcomputeDerivatives) then + + deallocate(dMA,stat=astat) + + + deallocate(dMD,stat=astat) + + else + + deallocate(Adotx,stat=astat) + + + deallocate(Ddotx,stat=astat) + + endif + + + deallocate(dMB,stat=astat) + + + + deallocate(dMG,stat=astat) + + + + deallocate(solution,stat=astat) + + + + deallocate(MBpsi,stat=astat) + + + if (LILUprecond) then + + deallocate(dMAS,stat=astat) + + + deallocate(dMDS,stat=astat) + + + deallocate(idMAS,stat=astat) + + + deallocate(jdMAS,stat=astat) + + endif ! if we use GMRES and ILU preconditioner + + +9999 continue + cput = MPI_WTIME() + Tmemory = Tmemory + ( cput-cpuo ) + return + + +end subroutine deallocate_Beltrami_matrices + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!> \brief allocate geometry matrices +!> +!> @param vvol +!> @param LcomputeDerivatives +subroutine allocate_geometry_matrices(vvol, LcomputeDerivatives) + use mod_kinds, only: wp => dp +! Allocate all geometry dependent matrices for a given ll + + use constants, only: zero + + use fileunits + + use inputlist, only: Wmemory, Wmacros, Mpol, Lrad + + use allglobal + + use cputiming + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: vvol + + LOGICAL, intent(in) :: LcomputeDerivatives + + integer :: ll, lldof, jjdof, iidof + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + ll = Lrad(vvol) + + if (Lcoordinatesingularity) then ! different radial dof for Zernike; 02 Jul 19 + lldof = (Lrad(vvol) - mod(Lrad(vvol),2)) / 2 + if (YESMatrixFree .and. .not. LcomputeDerivatives) then + ! we only need a reduced number of terms to be computed for the preconditioner + iidof = Mpol + 1 + jjdof = 1 + else + ! we need full-size matrices + iidof = mn + jjdof = mn + endif + else + lldof = Lrad(vvol) + if (YESMatrixFree .and. .not. LcomputeDerivatives) then + iidof = 1 + jjdof = 1 + else + iidof = mn + jjdof = mn + endif + end if + + + allocate( guvijsave(1:Ntz,1:3,1:3,1:Iquad(vvol)), stat=astat ) + guvijsave(1:Ntz,1:3,1:3,1:Iquad(vvol)) = zero + + + + allocate( DToocc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DToocc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TTssss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TTssss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDstsc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDstsc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDszsc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDszsc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDttcc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDttcc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDtzcc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDtzcc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDzzcc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDzzcc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( Tss(0:lldof,1:mn), stat=astat ) + Tss(0:lldof,1:mn) = zero + + + allocate( Dtc(0:lldof,1:mn), stat=astat ) + Dtc(0:lldof,1:mn) = zero + + + allocate( Dzc(0:lldof,1:mn), stat=astat ) + Dzc(0:lldof,1:mn) = zero + + + allocate( Ttc(0:lldof,1:mn), stat=astat ) + Ttc(0:lldof,1:mn) = zero + + + allocate( Tzc(0:lldof,1:mn), stat=astat ) + Tzc(0:lldof,1:mn) = zero + + + if (NOTstellsym) then + + + allocate( DToocs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DToocs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DToosc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DToosc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DTooss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DTooss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( TTsscc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TTsscc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TTsscs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TTsscs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TTsssc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TTsssc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( TDstcc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDstcc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDstcs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDstcs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDstss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDstss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( TDszcc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDszcc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDszcs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDszcs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( TDszss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + TDszss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( DDttcs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDttcs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDttsc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDttsc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDttss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDttss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( DDtzcs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDtzcs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDtzsc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDtzsc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDtzss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDtzss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( DDzzcs(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDzzcs(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDzzsc(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDzzsc(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + allocate( DDzzss(0:lldof,0:lldof,1:iidof,1:jjdof), stat=astat ) + DDzzss(0:lldof,0:lldof,1:iidof,1:jjdof) = zero + + + + allocate( Tsc(0:lldof,1:mn), stat=astat ) + Tsc(0:lldof,1:mn) = zero + + + allocate( Dts(0:lldof,1:mn), stat=astat ) + Dts(0:lldof,1:mn) = zero + + + allocate( Dzs(0:lldof,1:mn), stat=astat ) + Dzs(0:lldof,1:mn) = zero + + + allocate( Tts(0:lldof,1:mn), stat=astat ) + Tts(0:lldof,1:mn) = zero + + + allocate( Tzs(0:lldof,1:mn), stat=astat ) + Tzs(0:lldof,1:mn) = zero + + + end if !NOTstellsym + + +9999 continue + cput = MPI_WTIME() + Tmemory = Tmemory + ( cput-cpuo ) + return + + +end subroutine allocate_geometry_matrices + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!> \brief deallocate geometry matrices +!> +!> @param LcomputeDerivatives +subroutine deallocate_geometry_matrices(LcomputeDerivatives) + use mod_kinds, only: wp => dp +! Deallocate all geometry dependent matrices + use constants, only: zero + + use fileunits + + use inputlist, only: Wmemory, Wmacros + + use allglobal + + use cputiming + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + LOGICAL, intent(in) :: LcomputeDerivatives + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + Lsavedguvij = .false. + + deallocate(guvijsave,stat=astat) + + + + deallocate(DToocc,stat=astat) + + + deallocate(TTssss,stat=astat) + + + deallocate(TDstsc,stat=astat) + + + deallocate(TDszsc,stat=astat) + + + deallocate(DDttcc,stat=astat) + + + deallocate(DDtzcc,stat=astat) + + + deallocate(DDzzcc,stat=astat) + + + + deallocate(Tss,stat=astat) + + + deallocate(Dtc,stat=astat) + + + deallocate(Dzc,stat=astat) + + + deallocate(Ttc,stat=astat) + + + deallocate(Tzc,stat=astat) + + + if (NOTstellsym) then + + + deallocate(DToocs,stat=astat) + + + deallocate(DToosc,stat=astat) + + + deallocate(DTooss,stat=astat) + + + + deallocate(TTsscc,stat=astat) + + + deallocate(TTsscs,stat=astat) + + + deallocate(TTsssc,stat=astat) + + + + deallocate(TDstcc,stat=astat) + + + deallocate(TDstcs,stat=astat) + + + deallocate(TDstss,stat=astat) + + + + deallocate(TDszcc,stat=astat) + + + deallocate(TDszcs,stat=astat) + + + deallocate(TDszss,stat=astat) + + + + deallocate(DDttcs,stat=astat) + + + deallocate(DDttsc,stat=astat) + + + deallocate(DDttss,stat=astat) + + + + deallocate(DDtzcs,stat=astat) + + + deallocate(DDtzsc,stat=astat) + + + deallocate(DDtzss,stat=astat) + + + + deallocate(DDzzcs,stat=astat) + + + deallocate(DDzzsc,stat=astat) + + + deallocate(DDzzss,stat=astat) + + + + deallocate(Tsc,stat=astat) + + + deallocate(Dts,stat=astat) + + + deallocate(Dzs,stat=astat) + + + deallocate(Tts,stat=astat) + + + deallocate(Tzs,stat=astat) + + + endif + + +9999 continue + cput = MPI_WTIME() + Tmemory = Tmemory + ( cput-cpuo ) + return + + +end subroutine deallocate_geometry_matrices diff --git a/src/memory.f90 b/src/memory.f90 deleted file mode 100644 index 744cd16d..00000000 --- a/src/memory.f90 +++ /dev/null @@ -1,298 +0,0 @@ -!> \file -!> \brief memory management module - -!> \brief allocate Beltrami matrices -!> -!> @param vvol -!> @param LcomputeDerivatives -subroutine allocate_Beltrami_matrices(vvol, LcomputeDerivatives) - - use fileunits - - use inputlist, only: Wmemory, Wmacros - - use allglobal - - use cputiming - - LOCALS - - INTEGER, intent(in) :: vvol - LOGICAL, intent(in) :: LcomputeDerivatives - INTEGER :: NN - - BEGIN(memory) - - NN = NAdof(vvol) ! shorthand; - - if (NOTMatrixFree .or. LcomputeDerivatives) then - SALLOCATE( dMA, (0:NN,0:NN), zero ) ! required for both plasma region and vacuum region; - SALLOCATE( dMD, (0:NN,0:NN), zero ) - else - SALLOCATE( Adotx, (0:NN), zero) - SALLOCATE( Ddotx, (0:NN), zero) - endif - - ! we will need the rest even with or without matrix-free - SALLOCATE( dMB, (0:NN,0: 2), zero ) - SALLOCATE( dMG, (0:NN ), zero ) - - SALLOCATE( solution, (1:NN,-1:2), zero ) ! this will contain the vector potential from the linear solver and its derivatives; - - SALLOCATE( MBpsi, (1:NN), zero ) - - if (LILUprecond) then - SALLOCATE( dMAS, (1:NdMASmax(vvol)), zero) - SALLOCATE( dMDS, (1:NdMASmax(vvol)), zero) - SALLOCATE( idMAS, (1:NN+1), 0) - SALLOCATE( jdMAS, (1:NdMASmax(vvol)), 0) - endif ! if we use GMRES and ILU preconditioner - - RETURN(memory) - -end subroutine allocate_Beltrami_matrices - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!> \brief deallocate Beltrami matrices -!> -!> @param LcomputeDerivatives -subroutine deallocate_Beltrami_matrices(LcomputeDerivatives) - - use fileunits - - use inputlist, only: Wmemory, Wmacros - - use allglobal - - use cputiming - - LOCALS - - LOGICAL, intent(in) :: LcomputeDerivatives - - BEGIN(memory) - - if (NOTMatrixFree .or. LcomputeDerivatives) then - DALLOCATE(dMA) - DALLOCATE(dMD) - else - DALLOCATE(Adotx) - DALLOCATE(Ddotx) - endif - - DALLOCATE(dMB) - - DALLOCATE(dMG) - - DALLOCATE(solution) - - DALLOCATE(MBpsi) - - if (LILUprecond) then - DALLOCATE(dMAS) - DALLOCATE(dMDS) - DALLOCATE(idMAS) - DALLOCATE(jdMAS) - endif ! if we use GMRES and ILU preconditioner - - RETURN(memory) - -end subroutine deallocate_Beltrami_matrices - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!> \brief allocate geometry matrices -!> -!> @param vvol -!> @param LcomputeDerivatives -subroutine allocate_geometry_matrices(vvol, LcomputeDerivatives) - -! Allocate all geometry dependent matrices for a given ll - - use constants, only: zero - - use fileunits - - use inputlist, only: Wmemory, Wmacros, Mpol, Lrad - - use allglobal - - use cputiming - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - - LOCALS - - INTEGER :: vvol - - LOGICAL, intent(in) :: LcomputeDerivatives - - INTEGER :: ll, lldof, jjdof, iidof - - BEGIN(memory) - - ll = Lrad(vvol) - - if (Lcoordinatesingularity) then ! different radial dof for Zernike; 02 Jul 19 - lldof = (Lrad(vvol) - mod(Lrad(vvol),2)) / 2 - if (YESMatrixFree .and. .not. LcomputeDerivatives) then - ! we only need a reduced number of terms to be computed for the preconditioner - iidof = Mpol + 1 - jjdof = 1 - else - ! we need full-size matrices - iidof = mn - jjdof = mn - endif - else - lldof = Lrad(vvol) - if (YESMatrixFree .and. .not. LcomputeDerivatives) then - iidof = 1 - jjdof = 1 - else - iidof = mn - jjdof = mn - endif - end if - - SALLOCATE( guvijsave, (1:Ntz,1:3,1:3,1:Iquad(vvol)), zero) - - SALLOCATE( DToocc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TTssss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDstsc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDszsc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDttcc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDtzcc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDzzcc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( Tss, (0:lldof,1:mn), zero ) - SALLOCATE( Dtc, (0:lldof,1:mn), zero ) - SALLOCATE( Dzc, (0:lldof,1:mn), zero ) - SALLOCATE( Ttc, (0:lldof,1:mn), zero ) - SALLOCATE( Tzc, (0:lldof,1:mn), zero ) - - if (NOTstellsym) then - - SALLOCATE( DToocs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DToosc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DTooss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( TTsscc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TTsscs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TTsssc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( TDstcc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDstcs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDstss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( TDszcc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDszcs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( TDszss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( DDttcs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDttsc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDttss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( DDtzcs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDtzsc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDtzss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( DDzzcs, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDzzsc, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - SALLOCATE( DDzzss, (0:lldof,0:lldof,1:iidof,1:jjdof), zero ) - - SALLOCATE( Tsc, (0:lldof,1:mn), zero ) - SALLOCATE( Dts, (0:lldof,1:mn), zero ) - SALLOCATE( Dzs, (0:lldof,1:mn), zero ) - SALLOCATE( Tts, (0:lldof,1:mn), zero ) - SALLOCATE( Tzs, (0:lldof,1:mn), zero ) - - end if !NOTstellsym - - RETURN(memory) - -end subroutine allocate_geometry_matrices - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!> \brief deallocate geometry matrices -!> -!> @param LcomputeDerivatives -subroutine deallocate_geometry_matrices(LcomputeDerivatives) - -! Deallocate all geometry dependent matrices - use constants, only: zero - - use fileunits - - use inputlist, only: Wmemory, Wmacros - - use allglobal - - use cputiming - - LOCALS - - LOGICAL, intent(in) :: LcomputeDerivatives - - BEGIN(memory) - - Lsavedguvij = .false. - DALLOCATE(guvijsave) - - DALLOCATE(DToocc) - DALLOCATE(TTssss) - DALLOCATE(TDstsc) - DALLOCATE(TDszsc) - DALLOCATE(DDttcc) - DALLOCATE(DDtzcc) - DALLOCATE(DDzzcc) - - DALLOCATE(Tss) - DALLOCATE(Dtc) - DALLOCATE(Dzc) - DALLOCATE(Ttc) - DALLOCATE(Tzc) - - if (NOTstellsym) then - - DALLOCATE(DToocs) - DALLOCATE(DToosc) - DALLOCATE(DTooss) - - DALLOCATE(TTsscc) - DALLOCATE(TTsscs) - DALLOCATE(TTsssc) - - DALLOCATE(TDstcc) - DALLOCATE(TDstcs) - DALLOCATE(TDstss) - - DALLOCATE(TDszcc) - DALLOCATE(TDszcs) - DALLOCATE(TDszss) - - DALLOCATE(DDttcs) - DALLOCATE(DDttsc) - DALLOCATE(DDttss) - - DALLOCATE(DDtzcs) - DALLOCATE(DDtzsc) - DALLOCATE(DDtzss) - - DALLOCATE(DDzzcs) - DALLOCATE(DDzzsc) - DALLOCATE(DDzzss) - - DALLOCATE(Tsc) - DALLOCATE(Dts) - DALLOCATE(Dzs) - DALLOCATE(Tts) - DALLOCATE(Tzs) - - endif - - RETURN(memory) - -end subroutine deallocate_geometry_matrices diff --git a/src/metrix.f90 b/src/metrix.F90 similarity index 90% rename from src/metrix.f90 rename to src/metrix.F90 index 3f579541..57332f23 100644 --- a/src/metrix.f90 +++ b/src/metrix.F90 @@ -36,7 +36,7 @@ !> (The "extended" Fourier resolution is used.) !> subroutine metrix( lquad, lvol ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one @@ -67,13 +67,29 @@ subroutine metrix( lquad, lvol ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, lquad +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, lquad + + integer :: Lcurvature, ifail, ideriv, jquad - INTEGER :: Lcurvature, ifail, ideriv, jquad - BEGIN( metrix ) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -114,7 +130,12 @@ subroutine metrix( lquad, lvol ) enddo !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN( metrix ) + +9999 continue + cput = MPI_WTIME() + Tmetrix = Tmetrix + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -129,15 +150,15 @@ end subroutine metrix !> @param ideriv !> @param Lcurvature subroutine compute_guvijsave(lquad, vvol, ideriv, Lcurvature) - + use mod_kinds, only: wp => dp use allglobal, only : gaussianabscissae, Ntz, mn, guvij, guvijsave, & sg implicit none - INTEGER, intent(in):: vvol, lquad, ideriv, Lcurvature - INTEGER :: jquad, ii, jj - REAL :: lss + integer, intent(in):: vvol, lquad, ideriv, Lcurvature + integer :: jquad, ii, jj + real(wp) :: lss ! we need to compute guvij and save it in guvijsave do jquad = 1, lquad diff --git a/src/mod_kinds.F90 b/src/mod_kinds.F90 new file mode 100644 index 00000000..39207684 --- /dev/null +++ b/src/mod_kinds.F90 @@ -0,0 +1,7 @@ +!> https://fortran-lang.discourse.group/t/best-way-to-declare-a-double-precision-in-fortran/69/2 +module mod_kinds +use iso_fortran_env, only: real32, real64 +implicit none +integer, parameter :: sp = real32 +integer, parameter :: dp = real64 +end module mod_kinds diff --git a/src/mp00ac.f90 b/src/mp00ac.F90 similarity index 83% rename from src/mp00ac.f90 rename to src/mp00ac.F90 index 2ecaf1ab..dba8dfae 100644 --- a/src/mp00ac.f90 +++ b/src/mp00ac.F90 @@ -110,7 +110,7 @@ !> @param[in] Ldfjac !> @param iflag indicates whether (i) iflag=1: "function" values are required; or (ii) iflag=2: "derivative" values are required subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fixed by NAG; ma02aa calls mp00ac through C05PCF; - + use mod_kinds, only: wp => dp ! if iflag.eq.0 : Xdof and Fdof are available for PRINTING ; Fdof MUST NOT BE CHANGED; Ddof MUST NOT BE CHANGED; ! if iflag.eq.1 : Fdof is to be UPDATED ; ; Ddof MUST NOT BE CHANGED; ! if iflag.eq.2 : Ddof is to be UPDATED ; Fdof MUST NOT BE CHANGED; @@ -152,51 +152,67 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: Ndof, Ldfjac - REAL , intent(in) :: Xdof(1:Ndof) - REAL :: Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof) - INTEGER :: iflag +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer, intent(in) :: Ndof, Ldfjac + real(wp) , intent(in) :: Xdof(1:Ndof) + real(wp) :: Fdof(1:Ndof), Ddof(1:Ldfjac,1:Ndof) + integer :: iflag - INTEGER, parameter :: NB = 4 ! optimal workspace block size for LAPACK:DGECON; - INTEGER :: lvol, NN, MM, ideriv, lmns, ii, jj, nnz, Lwork + integer, parameter :: NB = 4 ! optimal workspace block size for LAPACK:DGECON; - INTEGER :: idgetrf(0:1), idgetrs(0:1), idgerfs(0:1), idgecon(0:1) + integer :: lvol, NN, MM, ideriv, lmns, ii, jj, nnz, Lwork - REAL :: lmu, dpf, dtf, dpsi(1:2), tpsi(1:2), ppsi(1:2), lcpu, test(2,2) + integer :: idgetrf(0:1), idgetrs(0:1), idgerfs(0:1), idgecon(0:1) - REAL :: anorm, rcond, ferr(2), berr(2), signfactor + real(wp) :: lmu, dpf, dtf, dpsi(1:2), tpsi(1:2), ppsi(1:2), lcpu, test(2,2) - CHARACTER :: packorunpack + real(wp) :: anorm, rcond, ferr(2), berr(2), signfactor + + character :: packorunpack ! For direct LU decompose - INTEGER, allocatable :: ipiv(:), Iwork(:) + integer, allocatable :: ipiv(:), Iwork(:) - REAL , allocatable :: matrix(:,:), rhs(:,:), LU(:,:) + real(wp) , allocatable :: matrix(:,:), rhs(:,:), LU(:,:) - REAL , allocatable :: RW(:), RD(:,:) + real(wp) , allocatable :: RW(:), RD(:,:) - REAL , allocatable :: matrixC(:,:) + real(wp) , allocatable :: matrixC(:,:) ! For GMRES + ILU - INTEGER, parameter :: nrestart = 5 ! do GMRES restart after nrestart iterations - INTEGER :: maxfil ! bandwidth for ILU subroutines, will be estimated + integer, parameter :: nrestart = 5 ! do GMRES restart after nrestart iterations + integer :: maxfil ! bandwidth for ILU subroutines, will be estimated - INTEGER :: NS, itercount, Nbilut + integer :: NS, itercount, Nbilut - REAL , allocatable :: matrixS(:), bilut(:) - INTEGER, allocatable :: ibilut(:), jbilut(:) + real(wp) , allocatable :: matrixS(:), bilut(:) + integer, allocatable :: ibilut(:), jbilut(:) - INTEGER, parameter :: ipar_SIZE = 128 - INTEGER :: ipar(ipar_SIZE), iluierr, RCI_REQUEST, nw, t1, t2, t3 - REAL :: fpar(ipar_SIZE), v1 - REAL, allocatable :: wk(:) - INTEGER,allocatable :: jw(:), iperm(:) + integer, parameter :: ipar_SIZE = 128 + integer :: ipar(ipar_SIZE), iluierr, RCI_REQUEST, nw, t1, t2, t3 + real(wp) :: fpar(ipar_SIZE), v1 + real(wp), allocatable :: wk(:) + integer,allocatable :: jw(:), iperm(:) + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(mp00ac) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -205,7 +221,13 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( mp00ac, iflag.ne.1 .and. iflag.ne.2, invalid iflag ) ! see nprint=0 in ma02aa and C05PCF; perhaps NAG:C05PCF is no longer used; + + if( iflag.ne.1 .and. iflag.ne.2 ) then + write(6,'("mp00ac : fatal : myid=",i3," ; iflag.ne.1 .and. iflag.ne.2 ; invalid iflag ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "mp00ac : iflag.ne.1 .and. iflag.ne.2 : invalid iflag ;" + endif + ! see nprint=0 in ma02aa and C05PCF; perhaps NAG:C05PCF is no longer used; #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -246,11 +268,20 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi NN = NAdof(lvol) ! shorthand; - SALLOCATE( rhs , (1:NN,0:2 ), zero ) + + allocate( rhs (1:NN,0:2 ), stat=astat ) + rhs (1:NN,0:2 ) = zero + if (NOTMatrixFree) then ! create the full size matrix - SALLOCATE( matrix, (1:NN,1:NN), zero ) + + allocate( matrix(1:NN,1:NN), stat=astat ) + matrix(1:NN,1:NN) = zero + else ! create a dummy variable - SALLOCATE( matrix, (1:1,1:1), zero ) + + allocate( matrix(1:1,1:1), stat=astat ) + matrix(1:1,1:1) = zero + endif solution(1:NN,-1:2) = zero ! this is a global array allocated in dforce; @@ -260,15 +291,33 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi case (1) ! direct matrix solver Lwork = NB*NN - SALLOCATE( RW, (1:Lwork ), zero ) - SALLOCATE( RD, (1:NN,0:2), zero ) - SALLOCATE( LU, (1:NN,1:NN), zero ) - SALLOCATE( ipiv, (1:NN), 0 ) - SALLOCATE( Iwork, (1:NN), 0 ) + + allocate( RW(1:Lwork ), stat=astat ) + RW(1:Lwork ) = zero + + + allocate( RD(1:NN,0:2), stat=astat ) + RD(1:NN,0:2) = zero + + + allocate( LU(1:NN,1:NN), stat=astat ) + LU(1:NN,1:NN) = zero + + + allocate( ipiv(1:NN), stat=astat ) + ipiv(1:NN) = 0 + + + allocate( Iwork(1:NN), stat=astat ) + Iwork(1:NN) = 0 + case (2:3) ! GMRES if (LILUprecond) then NS = NdMAS(lvol) ! shorthand - SALLOCATE( matrixS, (1:NS), zero ) + + allocate( matrixS(1:NS), stat=astat ) + matrixS(1:NS) = zero + ! estimate bandwidth if (Lcoordinatesingularity) then @@ -280,15 +329,33 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi end if Nbilut = (2*maxfil+2)*NN - SALLOCATE( bilut, (1:Nbilut), zero) - SALLOCATE( jbilut, (1:Nbilut), 0) - SALLOCATE( ibilut, (1:NN+1), 0) + + allocate( bilut(1:Nbilut), stat=astat ) + bilut(1:Nbilut) = zero + + + allocate( jbilut(1:Nbilut), stat=astat ) + jbilut(1:Nbilut) = 0 + + + allocate( ibilut(1:NN+1), stat=astat ) + ibilut(1:NN+1) = 0 + endif nw = (NN+3)*(nrestart+2) + (nrestart+1)*nrestart - SALLOCATE( wk, (1:nw), zero) - SALLOCATE( jw, (1:2*NN), 0) - SALLOCATE( iperm, (1:2*NN), 0) + + allocate( wk(1:nw), stat=astat ) + wk(1:nw) = zero + + + allocate( jw(1:2*NN), stat=astat ) + jw(1:2*NN) = 0 + + + allocate( iperm(1:2*NN), stat=astat ) + iperm(1:2*NN) = 0 + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -352,7 +419,13 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi else ! Lvacuumregion ; #ifdef FORCEFREEVACUUM - FATAL( mp00ac, .true., need to revise Beltrami matrices in vacuum region for arbitrary force-free field ) + + if( .true. ) then + write(6,'("mp00ac : fatal : myid=",i3," ; .true. ; need to revise Beltrami matrices in vacuum region for arbitrary force-free field ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "mp00ac : .true. : need to revise Beltrami matrices in vacuum region for arbitrary force-free field ;" + endif + #else if (NOTMatrixFree) then @@ -400,7 +473,7 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi end select ! ideriv; - cput = GETTIME + cput = MPI_WTIME() if( idgetrf(ideriv) .eq. 0 .and. idgetrs(ideriv) .eq. 0 .and. idgerfs(ideriv) .eq. 0 .and. rcond .ge. machprec) then if( Wmp00ac ) write(ounit,1010) cput-cpus, myid, lvol, ideriv, "idgetrf idgetrs idgerfs", idgetrf(ideriv), idgetrs(ideriv), idgetrf(ideriv), "success ; ", cput-lcpu @@ -435,7 +508,13 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if (LILUprecond) then ! ILU factorization call ilutp(NN,matrixS,jdMAS,idMAS,maxfil,epsILU,0.1,NN,bilut,jbilut,ibilut,Nbilut,wk,jw,iperm,iluierr) - FATAL(mp00ac, iluierr.ne.0, construction of preconditioner failed) + + if( iluierr.ne.0 ) then + write(6,'("mp00ac : fatal : myid=",i3," ; iluierr.ne.0 ; construction of preconditioner failed;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "mp00ac : iluierr.ne.0 : construction of preconditioner failed ;" + endif + endif call rungmres(NN,nrestart,lmu,lvol,rhs(1:NN,0),solution(1:NN,0),ipar,fpar,wk,nw,GMRESlastsolution(1:NN,0,lvol),matrix,bilut,jbilut,ibilut,iperm,ierr) @@ -469,7 +548,7 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi end do ! ii end select ! ideriv - cput = GETTIME + cput = MPI_WTIME() if (ierr.ge.0) then if( Wmp00ac ) write(ounit,1011) cput-cpus, myid, lvol, ideriv, ierr, " successful ; " @@ -495,7 +574,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if( iflag.eq.1 .and. ideriv.gt.0 ) cycle packorunpack = 'U' - WCALL( mp00ac, packab, ( packorunpack, lvol, NN, solution(1:NN,ideriv), ideriv ) ) ! unpacking; this assigns oAt, oAz through common; + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call packab( packorunpack, lvol, NN, solution(1:NN,ideriv), ideriv ) + cpuo = MPI_WTIME() + ! unpacking; this assigns oAt, oAz through common; if (ideriv .eq. 0 .and. .not. NOTMatrixFree) then call intghs(Iquad(lvol), mn, lvol, Lrad(lvol), 0) ! compute the integrals of B_lower @@ -525,26 +609,54 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - DALLOCATE( matrix ) - DALLOCATE( rhs ) + + deallocate(matrix ,stat=astat) + + + deallocate(rhs ,stat=astat) + select case (Lmatsolver) case (1) ! LU - DALLOCATE( RW ) - DALLOCATE( RD ) - DALLOCATE( LU ) - DALLOCATE( ipiv ) - DALLOCATE( Iwork ) + + deallocate(RW ,stat=astat) + + + deallocate(RD ,stat=astat) + + + deallocate(LU ,stat=astat) + + + deallocate(ipiv ,stat=astat) + + + deallocate(Iwork ,stat=astat) + case (2:3) ! GMRES if (LILUprecond) then - DALLOCATE( matrixS ) - DALLOCATE( bilut ) - DALLOCATE( jbilut ) - DALLOCATE( ibilut ) + + deallocate(matrixS ,stat=astat) + + + deallocate(bilut ,stat=astat) + + + deallocate(jbilut ,stat=astat) + + + deallocate(ibilut ,stat=astat) + endif - DALLOCATE( wk ) - DALLOCATE( jw ) - DALLOCATE( iperm ) + + deallocate(wk ,stat=astat) + + + deallocate(jw ,stat=astat) + + + deallocate(iperm ,stat=astat) + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -590,7 +702,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if( Lplasmaregion ) then if( Wtr00ab ) then ! compute rotational transform only for diagnostic purposes; - WCALL( mp00ac, tr00ab, ( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call tr00ab( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + endif Fdof(1:Ndof ) = zero ! provide dummy intent out; Lconstraint=-1 indicates no iterations over mu , dpflux are required; @@ -599,11 +716,21 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi else ! Lvacuumregion if( Wtr00ab ) then ! compute rotational transform only for diagnostic purposes; - WCALL( mp00ac, tr00ab, ( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call tr00ab( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + endif if( Wcurent ) then ! compute enclosed currents only for diagnostic purposes; - WCALL( mp00ac, curent,( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call curent( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + curtor = dItGpdxtp(0,0,lvol) ! icurrent(0) ! update input variables; curpol = dItGpdxtp(1,0,lvol) ! gcurrent(0) endif @@ -618,7 +745,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if( Lplasmaregion ) then if( Wtr00ab ) then ! compute rotational transform only for diagnostic purposes; - WCALL( mp00ac, tr00ab, ( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call tr00ab( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + endif Fdof(1:Ndof ) = zero ! provide dummy intent out; Lconstraint= 0 indicates no iterations over mu, dpflux are required; @@ -626,7 +758,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi else ! Lvacuumregion - WCALL( mp00ac, curent,( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call curent( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + if( iflag.eq.1 ) Fdof(1:2 ) = (/ dItGpdxtp(0,0,lvol) - curtor, dItGpdxtp(1,0,lvol) - curpol /) if( iflag.eq.2 ) Ddof(1:2,1) = (/ dItGpdxtp(0,1,lvol) , dItGpdxtp(1,1,lvol) /) @@ -637,7 +774,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi case( 1 ) ! Lconstraint= 1; - WCALL( mp00ac, tr00ab,( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) ) ! required for both plasma and vacuum region; + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call tr00ab( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + ! required for both plasma and vacuum region; if( Lplasmaregion ) then @@ -654,7 +796,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi else ! Lvacuumregion - WCALL( mp00ac, curent, ( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call curent( lvol, mn, Nt, Nz, iflag, dItGpdxtp(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + curtor = dItGpdxtp(0,0,lvol) ! update input variables; 08 Jun 16; !curpol = dItGpdxtp(1,0,lvol) @@ -683,7 +830,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if( Lplasmaregion ) then if( Wtr00ab ) then ! compute rotational transform only for diagnostic purposes; - WCALL( mp00ac, tr00ab, ( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) ) + + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + call tr00ab( lvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2,lvol) ) + cpuo = MPI_WTIME() + endif Fdof(1:Ndof ) = zero ! provide dummy intent out; no iteration other mu and psip locally @@ -708,21 +860,33 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi if( Wmp00ac .or. Wma02aa ) then ! the following is screen output; - cput = GETTIME + cput = MPI_WTIME() if( Lplasmaregion ) then select case( iflag ) case( 0 ) ; write(ounit,3000) cput-cpus, myid, lvol, lmu, dpf, iflag ! this is impossible by above logic; case( 1 ) ; write(ounit,3000) cput-cpus, myid, lvol, lmu, dpf, iflag, Fdof(1:Ndof) case( 2 ) ; write(ounit,3010) cput-cpus, myid, lvol, lmu, dpf, iflag, Ddof(1:Ndof,1:Ndof) - case default ; FATAL( mp00ac, .true., illegal iflag on entry ) + case default ; + if( .true. ) then + write(6,'("mp00ac : fatal : myid=",i3," ; .true. ; illegal iflag on entry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "mp00ac : .true. : illegal iflag on entry ;" + endif + end select else ! Lvacuumregion select case( iflag ) case( 0 ) ; write(ounit,3001) cput-cpus, myid, lvol, dtf, dpf, iflag ! this is impossible by above logic; case( 1 ) ; write(ounit,3001) cput-cpus, myid, lvol, dtf, dpf, iflag, Fdof(1:Ndof) case( 2 ) ; write(ounit,3011) cput-cpus, myid, lvol, dtf, dpf, iflag, Ddof(1:Ndof,1:Ndof) - case default ; FATAL( mp00ac, .true., illegal iflag on entry ) + case default ; + if( .true. ) then + write(6,'("mp00ac : fatal : myid=",i3," ; .true. ; illegal iflag on entry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "mp00ac : .true. : illegal iflag on entry ;" + endif + end select endif @@ -755,7 +919,12 @@ subroutine mp00ac( Ndof, Xdof, Fdof, Ddof, Ldfjac, iflag ) ! argument list is fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(mp00ac) + +9999 continue + cput = MPI_WTIME() + Tmp00ac = Tmp00ac + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -783,6 +952,7 @@ end subroutine mp00ac !> @param iperm !> @param ierr subroutine rungmres(n,nrestart,mu,vvol,rhs,sol,ipar,fpar,wk,nw,guess,a,au,jau,ju,iperm,ierr) + use mod_kinds, only: wp => dp ! Driver subroutine for GMRES ! modified from riters.f from SPARSKIT v2.0 ! by ZSQ 02 Feb 2020 @@ -791,14 +961,14 @@ subroutine rungmres(n,nrestart,mu,vvol,rhs,sol,ipar,fpar,wk,nw,guess,a,au,jau,ju use allglobal, only : LILUprecond use fileunits implicit none - INTEGER :: n, nrestart, nw, vvol, ju(*), jau(*), iperm(*) - INTEGER :: ipar(16) - INTEGER :: ierr - REAL :: guess(n), au(*), mu - REAL :: fpar(16), rhs(1:n), sol(1:n), wk(1:nw), a(*) + integer :: n, nrestart, nw, vvol, ju(*), jau(*), iperm(*) + integer :: ipar(16) + integer :: ierr + real(wp) :: guess(n), au(*), mu + real(wp) :: fpar(16), rhs(1:n), sol(1:n), wk(1:nw), a(*) - INTEGER :: i, its - REAL :: res, tmprhs(1:n) + integer :: i, its + real(wp) :: res, tmprhs(1:n) its = 0 res = zero @@ -860,17 +1030,18 @@ end subroutine rungmres !> @param mu !> @param vvol subroutine matvec(n, x, ax, a, mu, vvol) + use mod_kinds, only: wp => dp ! compute a.x by either by coumputing it directly, ! or using a matrix free method use constants, only : zero, one use inputlist, only : Lrad use allglobal, only : NOTMatrixFree, Iquad, mn, dmd implicit none - INTEGER, intent(in) :: n, vvol - REAL :: ax(1:n), x(1:n), a(*), mu - INTEGER :: ideriv - REAL :: dax(0:n), ddx(0:n), cput, lastcpu - CHARACTER :: packorunpack + integer, intent(in) :: n, vvol + real(wp) :: ax(1:n), x(1:n), a(*), mu + integer :: ideriv + real(wp) :: dax(0:n), ddx(0:n), cput, lastcpu + character :: packorunpack if (NOTMatrixFree) then ! if we have the matrix, then just multiply it to x call DGEMV('N', n, n, one, dMD(1,1), n+1, x, 1, zero, ddx(1), 1) @@ -898,14 +1069,15 @@ end subroutine matvec !> @param ju !> @param iperm subroutine prec_solve(n,vecin,vecout,au,jau,ju,iperm) + use mod_kinds, only: wp => dp ! apply the preconditioner implicit none - INTEGER :: n, iperm(*), jau(*), ju(*) - REAL :: vecin(*), au(*) - REAL :: vecout(*) + integer :: n, iperm(*), jau(*), ju(*) + real(wp) :: vecin(*), au(*) + real(wp) :: vecout(*) - INTEGER :: ii - REAL :: tempv(n) + integer :: ii + real(wp) :: tempv(n) call lusol(n,vecin,tempv,au,jau,ju) ! sparse LU solve ! apply permutation diff --git a/src/mtrxhs.f90 b/src/mtrxhs.F90 similarity index 91% rename from src/mtrxhs.f90 rename to src/mtrxhs.F90 index 101dbd91..dc9fac3b 100644 --- a/src/mtrxhs.f90 +++ b/src/mtrxhs.F90 @@ -11,7 +11,7 @@ !> @param resultD !> @param idx subroutine mtrxhs( lvol, mn, lrad, resultA, resultD, idx ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, half @@ -40,24 +40,40 @@ subroutine mtrxhs( lvol, mn, lrad, resultA, resultD, idx ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER, intent(in) :: lvol, mn, lrad, idx + integer, intent(in) :: lvol, mn, lrad, idx - REAL, intent(out) :: resultA(0:NAdof(lvol)), resultD(0:NAdof(lvol)) + real(wp), intent(out) :: resultA(0:NAdof(lvol)), resultD(0:NAdof(lvol)) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER :: NN, ii, ll, jj, ll1, mi, ni, id, jd, kk + integer :: NN, ii, ll, jj, ll1, mi, ni, id, jd, kk + + real(wp) :: Wte, Wto, Wze, Wzo, Hte, Hto, Hze, Hzo - REAL :: Wte, Wto, Wze, Wzo, Hte, Hto, Hze, Hzo + real(wp), allocatable :: TTdata(:,:,:), TTMdata(:,:) - REAL, allocatable :: TTdata(:,:,:), TTMdata(:,:) - BEGIN(mtrxhs) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -67,8 +83,14 @@ subroutine mtrxhs( lvol, mn, lrad, resultA, resultD, idx ) resultA(0:NN) = zero resultD(0:NN) = zero - SALLOCATE( TTdata, (0:lrad, 0:mpol, 0:1), zero) - SALLOCATE( TTMdata, (0:lrad, 0:mpol), zero) + + allocate( TTdata(0:lrad, 0:mpol, 0:1), stat=astat ) + TTdata(0:lrad, 0:mpol, 0:1) = zero + + + allocate( TTMdata(0:lrad, 0:mpol), stat=astat ) + TTMdata(0:lrad, 0:mpol) = zero + ! fill in Zernike/Chebyshev polynomials depending on Lcooridnatesingularity if (Lcoordinatesingularity) then @@ -207,12 +229,21 @@ subroutine mtrxhs( lvol, mn, lrad, resultA, resultD, idx ) !$OMP END PARALLEL DO endif ! end of if( YESstellsym ) ; - DALLOCATE( TTdata ) - DALLOCATE( TTMdata ) + + deallocate(TTdata ,stat=astat) + + + deallocate(TTMdata ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(mtrxhs) + +9999 continue + cput = MPI_WTIME() + Tmtrxhs = Tmtrxhs + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/newton.f90 b/src/newton.F90 similarity index 69% rename from src/newton.f90 rename to src/newton.F90 index 7bd26216..93945e9f 100644 --- a/src/newton.f90 +++ b/src/newton.F90 @@ -5,10 +5,10 @@ !> \brief timing of Newton iterations module newtontime - - INTEGER :: nFcalls !< number of calls to get function values (?) - INTEGER :: nDcalls !< number of calls to get derivative values (?) - REAL :: lastcpu !< last CPU that called this (?) + use mod_kinds, only: wp => dp + integer :: nFcalls !< number of calls to get function values (?) + integer :: nDcalls !< number of calls to get derivative values (?) + real(wp) :: lastcpu !< last CPU that called this (?) end module newtontime @@ -47,7 +47,7 @@ end module newtontime !> @param[inout] position !> @param[out] ihybrd subroutine newton( NGdof, position, ihybrd ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, ten @@ -77,41 +77,57 @@ subroutine newton( NGdof, position, ihybrd ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: NGdof - REAL , intent(inout) :: position(0:NGdof) - INTEGER, intent(out) :: ihybrd +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: NGdof + real(wp) , intent(inout) :: position(0:NGdof) + integer, intent(out) :: ihybrd LOGICAL :: LComputeDerivatives - INTEGER :: wflag, iflag, idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn, ierr2 - REAL :: rflag - CHARACTER :: pack + integer :: wflag, iflag, idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn, ierr2 + real(wp) :: rflag + character :: pack - INTEGER :: irevcm, mode, Ldfjac, LR - REAL :: xtol, epsfcn, factor - REAL :: diag(1:NGdof), QTF(1:NGdof), workspace(1:NGdof,1:4) + integer :: irevcm, mode, Ldfjac, LR + real(wp) :: xtol, epsfcn, factor + real(wp) :: diag(1:NGdof), QTF(1:NGdof), workspace(1:NGdof,1:4) - REAL :: force(0:NGdof) - REAL, allocatable :: fjac(:,:), RR(:), work(:,:) + real(wp) :: force(0:NGdof) + real(wp), allocatable :: fjac(:,:), RR(:), work(:,:) - INTEGER :: ML, MU ! required for only Lc05ndf; + integer :: ML, MU ! required for only Lc05ndf; LOGICAL :: Lexit = .true. ! perhaps this could be made user input; LOGICAL :: LComputeAxis - INTEGER :: nprint = 1, nfev, njev + integer :: nprint = 1, nfev, njev - INTEGER, parameter :: maxfev = 5000 ! maximum calls per iteration; + integer, parameter :: maxfev = 5000 ! maximum calls per iteration; external :: fcn1, fcn2 - BEGIN(newton) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( Wnewton .and. myid.eq.0 ) then ! screen output; - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("newton : ", 10x ," : ")') write(ounit,'("newton : ",f10.2," : Lfindzero="i2" ; forcetol="es13.5" ; c05xtol="es13.5" ; c05factor="es13.5" ; LreadGF="L2" ; NGdof="i6" ;")')& cput-cpus, Lfindzero, forcetol, c05xtol, c05factor, LreadGF, NGdof @@ -141,16 +157,21 @@ subroutine newton( NGdof, position, ihybrd ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - lastcpu = GETTIME + lastcpu = MPI_WTIME() if( Lexit ) then ! will call initial force, and if ForceErr.lt.forcetol will immediately exit; LComputeDerivatives= .false. LComputeAxis = .true. - WCALL( newton, dforce, ( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis) ) ! calculate the force-imbalance; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis) + cpuo = MPI_WTIME() + ! calculate the force-imbalance; if( myid.eq.0 ) then ! screen output; - cput = GETTIME + cput = MPI_WTIME() ; write(ounit,1000) cput-cpus, nFcalls, nDcalls, ForceErr, cput-lastcpu, "|BB|e", alog10(BBe(1:min(Mvol-1,28))) if( Igeometry.ge.3 ) then ! include spectral constraints; ;write(ounit,1001) "|II|o", alog10(IIo(1:min(Mvol-1,28))) @@ -179,20 +200,44 @@ subroutine newton( NGdof, position, ihybrd ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( fjac, (1:NGdof, 1:NGdof), zero) - SALLOCATE( RR, (1:NGdof*(NGdof+1)/2), zero) + + allocate( fjac(1:NGdof, 1:NGdof), stat=astat ) + fjac(1:NGdof, 1:NGdof) = zero + + + allocate( RR(1:NGdof*(NGdof+1)/2), stat=astat ) + RR(1:NGdof*(NGdof+1)/2) = zero + if( Lfindzero.eq.2 ) then - SALLOCATE( dFFdRZ, (1:LGdof,0:1,1:LGdof,0:1,1:Mvol), zero ) - SALLOCATE( dBBdmp, (1:LGdof,1:Mvol,0:1,1:2), zero ) + + allocate( dFFdRZ(1:LGdof,0:1,1:LGdof,0:1,1:Mvol), stat=astat ) + dFFdRZ(1:LGdof,0:1,1:LGdof,0:1,1:Mvol) = zero + + + allocate( dBBdmp(1:LGdof,1:Mvol,0:1,1:2), stat=astat ) + dBBdmp(1:LGdof,1:Mvol,0:1,1:2) = zero + if( LocalConstraint ) then - SALLOCATE( dmupfdx, (1:Mvol, 1:1,1:2,1:LGdof,0:1), zero ) + + allocate( dmupfdx(1:Mvol, 1:1,1:2,1:LGdof,0:1), stat=astat ) + dmupfdx(1:Mvol, 1:1,1:2,1:LGdof,0:1) = zero + else - SALLOCATE( dmupfdx, (1:Mvol, 1:Mvol-1,1:2,1:LGdof,1), zero ) ! TODO change the format to put vvol in last index position... + + allocate( dmupfdx(1:Mvol, 1:Mvol-1,1:2,1:LGdof,1), stat=astat ) + dmupfdx(1:Mvol, 1:Mvol-1,1:2,1:LGdof,1) = zero + ! TODO change the format to put vvol in last index position... endif - SALLOCATE( hessian, (1:NGdof,1:NGdof), zero ) - SALLOCATE( dessian, (1:NGdof,1:LGdof), zero ) + + allocate( hessian(1:NGdof,1:NGdof), stat=astat ) + hessian(1:NGdof,1:NGdof) = zero + + + allocate( dessian(1:NGdof,1:LGdof), stat=astat ) + dessian(1:NGdof,1:LGdof) = zero + Lhessianallocated = .true. else @@ -208,36 +253,52 @@ subroutine newton( NGdof, position, ihybrd ) case( 1 ) ! use function values to find x st f(x)=0, where x is the geometry of the interfaces, and f is the force; - WCALL( newton, hybrd, ( fcn1, NGdof, position(1:NGdof), force(1:NGdof), & + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call hybrd( fcn1, NGdof, position(1:NGdof), force(1:NGdof), & xtol, maxfev, ML, MU, epsfcn, diag(1:NGdof), mode, factor, nprint, ihybrd, nfev, fjac(1:Ldfjac,1:NGdof), Ldfjac, & - RR(1:LR), LR, QTF(1:NGdof), workspace(1:NGdof,1), workspace(1:NGdof,2), workspace(1:NGdof,3), workspace(1:NGdof,4) ) ) + RR(1:LR), LR, QTF(1:NGdof), workspace(1:NGdof,1), workspace(1:NGdof,2), workspace(1:NGdof,3), workspace(1:NGdof,4) ) + cpuo = MPI_WTIME() + case( 2 ) ! use function values and user-supplied derivatives to find x st f(x)=0, where x is the geometry of the interfaces, and f is the force; - WCALL( newton, hybrj, ( fcn2, NGdof, position(1:NGdof), force(1:NGdof), fjac(1:Ldfjac,1:NGdof), Ldfjac, & + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call hybrj( fcn2, NGdof, position(1:NGdof), force(1:NGdof), fjac(1:Ldfjac,1:NGdof), Ldfjac, & xtol, maxfev, diag(1:NGdof), mode, factor, nprint, ihybrd, nfev, njev, & - RR(1:LR), LR, QTF(1:NGdof), workspace(1:NGdof,1), workspace(1:NGdof,2), workspace(1:NGdof,3), workspace(1:NGdof,4) ) ) + RR(1:LR), LR, QTF(1:NGdof), workspace(1:NGdof,1), workspace(1:NGdof,2), workspace(1:NGdof,3), workspace(1:NGdof,4) ) + cpuo = MPI_WTIME() + case default - FATAL( newton, .true., value of Lfindzero not supported ) + + if( .true. ) then + write(6,'("newton : fatal : myid=",i3," ; .true. ; value of Lfindzero not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : .true. : value of Lfindzero not supported ;" + endif + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - if( myid.eq.0 ) then - cput = GETTIME - ; write(ounit,'("newton : ", 10x ," :")') - select case( ihybrd ) - case( 1 ) ; write(ounit,'("newton : ",f10.2," : finished ; success ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - case( 0 ) ; write(ounit,'("newton : ",f10.2," : finished ; input error ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - case( 2 ) ; write(ounit,'("newton : ",f10.2," : finished ; max. iter ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - case( 3 ) ; write(ounit,'("newton : ",f10.2," : finished ; xtol too small ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - case( 4:5 ) ; write(ounit,'("newton : ",f10.2," : finished ; bad progress ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - case default ; write(ounit,'("newton : ",f10.2," : finished ; illegal ifail ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls - end select - endif ! end of if( myid.eq.0 ) then; + if( myid.eq.0 ) then + cput = MPI_WTIME() + ; write(ounit,'("newton : ", 10x ," :")') + select case( ihybrd ) + case( 1 ) ; write(ounit,'("newton : ",f10.2," : finished ; success ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + case( 0 ) ; write(ounit,'("newton : ",f10.2," : finished ; input error ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + case( 2 ) ; write(ounit,'("newton : ",f10.2," : finished ; max. iter ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + case( 3 ) ; write(ounit,'("newton : ",f10.2," : finished ; xtol too small ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + case( 4:5 ) ; write(ounit,'("newton : ",f10.2," : finished ; bad progress ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + case default ; write(ounit,'("newton : ",f10.2," : finished ; illegal ifail ; ic05p*f="i2" ; its="i7" ,"i4" ;")') cput-cpus, ihybrd, nFcalls, nDcalls + end select + endif ! end of if( myid.eq.0 ) then; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -246,11 +307,20 @@ subroutine newton( NGdof, position, ihybrd ) if( Wnewton ) write(ounit,'("newton : ", 10x ," : saving derivative matrix to file ;")') #ifdef DEBUG - FATAL( newton, .not.Lhessianallocated, error ) + + if( .not.Lhessianallocated ) then + write(6,'("newton : fatal : myid=",i3," ; .not.Lhessianallocated ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : .not.Lhessianallocated : error ;" + endif + #endif !hessian(1:NGdof,1:NGdof) = zero - SALLOCATE(work, (1:NGdof,1:NGdof), zero)! BLAS version; 19 Jul 2019 + + allocate( work(1:NGdof,1:NGdof), stat=astat ) + work(1:NGdof,1:NGdof) = zero +! BLAS version; 19 Jul 2019 ijdof = 0 do idof = 1, NGdof !do jdof = idof, NGdof ; ijdof = ijdof + 1 ; hessian(idof,jdof) = RR(ijdof) ! un-pack R matrix; old version @@ -262,7 +332,9 @@ subroutine newton( NGdof, position, ihybrd ) !hessian(1:NGdof,1:NGdof) = matmul( fjac(1:NGdof,1:NGdof), hessian(1:NGdof,1:NGdof) ) call DGEMM('N','N',NGdof,NGdof,NGdof,one,fjac,NGdof,work,NGdof,zero,hessian,NGdof) ! BLAS version; 19 Jul 2019 - DALLOCATE(work)! BLAS version; 19 Jul 2019 + + deallocate(work,stat=astat) +! BLAS version; 19 Jul 2019 call writereadgf( 'W', NGdof, ireadhessian ) ! write derivative matrix to file; @@ -274,19 +346,38 @@ subroutine newton( NGdof, position, ihybrd ) call MPI_BARRIER( MPI_COMM_SPEC, ierr2) if( Lfindzero.eq.2 ) then - DALLOCATE( dFFdRZ ) - DALLOCATE( dBBdmp ) - DALLOCATE( dmupfdx ) - DALLOCATE( hessian ) - DALLOCATE( dessian ) + + deallocate(dFFdRZ ,stat=astat) + + + deallocate(dBBdmp ,stat=astat) + + + deallocate(dmupfdx ,stat=astat) + + + deallocate(hessian ,stat=astat) + + + deallocate(dessian ,stat=astat) + Lhessianallocated = .false. endif - DALLOCATE( fjac ) - DALLOCATE( RR ) + + deallocate(fjac ,stat=astat) + + + deallocate(RR ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(newton) + +9999 continue + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -302,7 +393,7 @@ end subroutine newton !> @param[in] NGdof !> @param[out] ireadhessian subroutine writereadgf( readorwrite, NGdof , ireadhessian ) - + use mod_kinds, only: wp => dp use constants, only : zero use numerical, only : @@ -316,16 +407,30 @@ subroutine writereadgf( readorwrite, NGdof , ireadhessian ) use allglobal, only : myid, cpus, MPI_COMM_SPEC, ext, & mn, im, in, hessian, Lhessianallocated - LOCALS - CHARACTER, intent(in) :: readorwrite +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + character, intent(in) :: readorwrite LOGICAL :: exist - INTEGER, intent(in) :: NGdof - INTEGER, intent(out) :: ireadhessian + integer, intent(in) :: NGdof + integer, intent(out) :: ireadhessian - INTEGER :: lIgeometry, lIstellsym, lLfreebound, lNvol, lMpol, lNtor, lNGdof + integer :: lIgeometry, lIstellsym, lLfreebound, lNvol, lMpol, lNtor, lNGdof + + + if( .not.Lhessianallocated ) then + write(6,'("newton : fatal : myid=",i3," ; .not.Lhessianallocated ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : .not.Lhessianallocated : error ;" + endif - FATAL( newton, .not.Lhessianallocated, error ) ireadhessian = 0 ! set default intent out; @@ -337,20 +442,44 @@ subroutine writereadgf( readorwrite, NGdof , ireadhessian ) ios = 0 open( dunit, file="."//trim(ext)//".sp.DF", status="replace", form="unformatted", iostat=ios ) ! save derivative matrix to file; - FATAL( newton, ios.ne.0, error opening derivative matrix file ) + + if( ios.ne.0 ) then + write(6,'("newton : fatal : myid=",i3," ; ios.ne.0 ; error opening derivative matrix file ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : ios.ne.0 : error opening derivative matrix file ;" + endif + write( dunit, iostat=ios ) Igeometry, Istellsym, Lfreebound, Nvol, Mpol, Ntor, NGdof ! enable resolution consistency check; - FATAL( newton, ios.ne.0, error writing Nvol, Mpol, Ntor, NGdof ) + + if( ios.ne.0 ) then + write(6,'("newton : fatal : myid=",i3," ; ios.ne.0 ; error writing Nvol;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : ios.ne.0 : error writing Nvol ;" + endif + write( dunit, iostat=ios ) hessian(1:NGdof,1:NGdof) - FATAL( newton, ios.ne.0, error writing hessian to file ) + + if( ios.ne.0 ) then + write(6,'("newton : fatal : myid=",i3," ; ios.ne.0 ; error writing hessian to file ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : ios.ne.0 : error writing hessian to file ;" + endif + close( dunit, iostat=ios ) - FATAL( newton, ios.ne.0, error closing derivative matrix file ) + + if( ios.ne.0 ) then + write(6,'("newton : fatal : myid=",i3," ; ios.ne.0 ; error closing derivative matrix file ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : ios.ne.0 : error closing derivative matrix file ;" + endif + case( 'R' ) - cput = GETTIME + cput = MPI_WTIME() inquire( file="."//trim(ext)//".sp.DF", exist=exist ) ! the derivative matrix; @@ -404,7 +533,13 @@ subroutine writereadgf( readorwrite, NGdof , ireadhessian ) case default - FATAL( newton, .true., invalid readorwrite ) + + if( .true. ) then + write(6,'("newton : fatal : myid=",i3," ; .true. ; invalid readorwrite ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : .true. : invalid readorwrite ;" + endif + end select @@ -425,7 +560,7 @@ end subroutine writereadgf !> @param[out] fvec !> @param[in] irevcm subroutine fcn1( NGdof, xx, fvec, irevcm ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, ten @@ -456,19 +591,35 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: NGdof, irevcm - REAL , intent(in) :: xx(1:NGdof) - REAL , intent(out) :: fvec(1:NGdof) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; - REAL :: position(0:NGdof), force(0:NGdof) + + integer, intent(in) :: NGdof, irevcm + real(wp) , intent(in) :: xx(1:NGdof) + real(wp) , intent(out) :: fvec(1:NGdof) + + real(wp) :: position(0:NGdof), force(0:NGdof) LOGICAL :: LComputeDerivatives, Lonlysolution, LComputeAxis - INTEGER :: idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn - CHARACTER :: pack + integer :: idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn + character :: pack + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(newton) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -485,12 +636,16 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) pack = 'U' ! unpack geometrical degrees of freedom; LComputeAxis = .true. LComputeDerivatives = .false. ! function value only solver --> no need to compute derivatives - WCALL( newton, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) ) + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() ; write(ounit,1000) cput-cpus, nFcalls, nDcalls, ForceErr, cput-lastcpu, "|BB|e", alog10(BBe(1:min(Mvol-1,28))) if( Igeometry.ge.3 ) then ! include spectral constraints; @@ -502,13 +657,23 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) write(ounit,1001) "|II|e", alog10(IIe(1:min(Mvol-1,28))) endif endif - lastcpu = GETTIME + lastcpu = MPI_WTIME() - WCALL( newton, wrtend ) ! write restart file; save geometry to ext.end; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call wrtend + cpuo = MPI_WTIME() + ! write restart file; save geometry to ext.end; endif ! end of if( myid.eq.0 ); - WCALL( newton, write_convergence_output, ( nDcalls, ForceErr ) ) ! save iRbc, iZbs consistent with position; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call write_convergence_output( nDcalls, ForceErr ) + cpuo = MPI_WTIME() + ! save iRbc, iZbs consistent with position; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -518,7 +683,12 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) LComputeDerivatives = .false. LComputeAxis = .true. - WCALL( newton, dforce, ( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) ) ! calculate the force-imbalance; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + ! calculate the force-imbalance; fvec(1:NGdof) = force(1:NGdof) @@ -526,7 +696,13 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) case default - FATAL( fcn1 , .true., illegal irevcm : C05P*F error ) + + if( .true. ) then + write(6,'("fcn1 : fatal : myid=",i3," ; .true. ; illegal irevcm : C05P*F error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "fcn1 : .true. : illegal irevcm : C05P*F error ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -539,7 +715,12 @@ subroutine fcn1( NGdof, xx, fvec, irevcm ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(newton) + +9999 continue + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -558,7 +739,7 @@ end subroutine fcn1 !> @param[in] Ldfjac !> @param[in] irevcm indicator for reverse communication; provided by solver to tell this method what to compute subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, ten @@ -589,19 +770,35 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: NGdof, Ldfjac, irevcm - REAL , intent(in) :: xx(1:NGdof) - REAL , intent(out) :: fvec(1:NGdof), fjac(1:Ldfjac,1:NGdof) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + - REAL :: position(0:NGdof), force(0:NGdof) + integer, intent(in) :: NGdof, Ldfjac, irevcm + real(wp) , intent(in) :: xx(1:NGdof) + real(wp) , intent(out) :: fvec(1:NGdof), fjac(1:Ldfjac,1:NGdof) + + real(wp) :: position(0:NGdof), force(0:NGdof) LOGICAL :: LComputeDerivatives, Lonlysolution, LComputeAxis - INTEGER :: idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn - CHARACTER :: pack + integer :: idof, jdof, ijdof, ireadhessian, igdof, lvol, ii, imn + character :: pack + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(newton) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -618,12 +815,17 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) pack = 'U' ! unpack geometrical degrees of freedom; LComputeAxis = .true. LComputeDerivatives = .false. - WCALL( newton, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) ) + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() ; write(ounit,1000) cput-cpus, nFcalls, nDcalls, ForceErr, cput-lastcpu, "|BB|e", alog10(BBe(1:min(Mvol-1,28))) if( Igeometry.ge.3 ) then ! include spectral constraints; @@ -635,13 +837,23 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) write(ounit,1001) "|II|e", alog10(IIe(1:min(Mvol-1,28))) endif endif - lastcpu = GETTIME + lastcpu = MPI_WTIME() - WCALL( newton, wrtend ) ! write restart file; save geometry to ext.end; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call wrtend + cpuo = MPI_WTIME() + ! write restart file; save geometry to ext.end; endif ! end of if( myid.eq.0 ); - WCALL( newton, write_convergence_output, ( nDcalls, ForceErr ) ) ! save iRbc, iZbs consistent with position; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call write_convergence_output( nDcalls, ForceErr ) + cpuo = MPI_WTIME() + ! save iRbc, iZbs consistent with position; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -651,7 +863,12 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) LComputeDerivatives = .false. LComputeAxis = .true. - WCALL( newton, dforce, ( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) ) ! calculate the force-imbalance; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + ! calculate the force-imbalance; fvec(1:NGdof) = force(1:NGdof) @@ -660,7 +877,13 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) case( 2 ) ! before re-entry to C05PDF, fjac must contain the derivatives; #ifdef DEBUG - FATAL( newton, .not.Lhessianallocated, need to allocate hessian ) + + if( .not.Lhessianallocated ) then + write(6,'("newton : fatal : myid=",i3," ; .not.Lhessianallocated ; need to allocate hessian ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : .not.Lhessianallocated : need to allocate hessian ;" + endif + #endif nDcalls = nDcalls + 1 @@ -669,10 +892,14 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) if( myid.eq.0 ) call writereadgf( 'R', NGdof, ireadhessian ) ! reads derivatives matrix from file; - IlBCAST( ireadhessian, 1, 0 ) + + call MPI_BCAST( ireadhessian, 1, MPI_INTEGER, 0 , MPI_COMM_SPEC, ierr ) + if( ireadhessian.eq.1 ) then ! derivative matrix has been read from file; - RlBCAST( hessian(1:NGdof,1:NGdof), NGdof*NGdof, 0 ) + + call MPI_BCAST(hessian(1:NGdof,1:NGdof),NGdof*NGdof,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + endif else ! matches if( LreadGF .and. nDcalls.eq.1 ) then; @@ -685,10 +912,21 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) LComputeDerivatives = .true. LComputeAxis = .true. - WCALL( newton, dforce, ( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) ) ! calculate the force-imbalance; + + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + ! calculate the force-imbalance; #ifdef DEBUG - FATAL( newton, Lcheck.eq.4, derivatives of Beltrami field have been computed ) + + if( Lcheck.eq.4 ) then + write(6,'("newton : fatal : myid=",i3," ; Lcheck.eq.4 ; derivatives of Beltrami field have been computed ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : Lcheck.eq.4 : derivatives of Beltrami field have been computed ;" + endif + #endif endif @@ -704,14 +942,26 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) stop "newton : : myid= ; volume derivatives have been compared ;" endif - FATAL( newton, Lcheck.eq.3, volume derivatives have been compared ) ! the first process will terminate all processes; + + if( Lcheck.eq.3 ) then + write(6,'("newton : fatal : myid=",i3," ; Lcheck.eq.3 ; volume derivatives have been compared ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : Lcheck.eq.3 : volume derivatives have been compared ;" + endif + ! the first process will terminate all processes; if( (Lcheck.eq.4) .and. (nDcalls.ne.1) ) then write(ounit,'("newton : ", 10x ," : myid=",i3," ; field derivatives have been compared ;")') myid stop "newton : : myid= ; field derivatives have been compared ;" endif - FATAL( newton, (Lcheck.eq.4) .and. (nDcalls.ne.1), field derivatives have been compared ) ! the first process will terminate all processes; + + if( (Lcheck.eq.4) .and. (nDcalls.ne.1) ) then + write(6,'("newton : fatal : myid=",i3," ; (Lcheck.eq.4) .and. (nDcalls.ne.1) ; field derivatives have been compared ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "newton : (Lcheck.eq.4) .and. (nDcalls.ne.1) : field derivatives have been compared ;" + endif + ! the first process will terminate all processes; #endif @@ -719,7 +969,13 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) case default - FATAL( fcn2 , .true., illegal irevcm : hybrj error ) + + if( .true. ) then + write(6,'("fcn2 : fatal : myid=",i3," ; .true. ; illegal irevcm : hybrj error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "fcn2 : .true. : illegal irevcm : hybrj error ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -732,7 +988,12 @@ subroutine fcn2( NGdof, xx, fvec, fjac, Ldfjac, irevcm ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(newton) + +9999 continue + cput = MPI_WTIME() + Tnewton = Tnewton + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/numrec.F90 b/src/numrec.F90 new file mode 100644 index 00000000..dde3f4bb --- /dev/null +++ b/src/numrec.F90 @@ -0,0 +1,331 @@ +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!title (numerics) ! Some miscellaneous numerical routines. + +!latex \briefly{miscellaneous ``numerical'' routines} + +!l tex \calledby{\link{}} +!l tex \calls{\link{}} + +!latex \tableofcontents + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!latex \subsection{Outline} + +!latex This file contains various miscellaneous ``numerical'' routines as described below. + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!l tex \begin{itemize} + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!l tex \item \type{gi00aa} + +!subroutine gi00aa( ii, jj, ig ) ! not used; SRH: 27 Feb 18; +! +! implicit none +! +! INTEGER, intent(in) :: ii,jj +! INTEGER, intent(out) :: ig +! +! if( ( ii.eq.1 .and. jj.eq.1 ) ) ig = 1 +! if( ( ii.eq.1 .and. jj.eq.2 ) .or. ( ii.eq.2 .and. jj.eq.1 ) ) ig = 2 +! if( ( ii.eq.1 .and. jj.eq.3 ) .or. ( ii.eq.3 .and. jj.eq.1 ) ) ig = 3 +! if( ( ii.eq.2 .and. jj.eq.2 ) ) ig = 4 +! if( ( ii.eq.2 .and. jj.eq.3 ) .or. ( ii.eq.3 .and. jj.eq.2 ) ) ig = 5 +! if( ( ii.eq.3 .and. jj.eq.3 ) ) ig = 6 +! +! return +! +!end subroutine gi00aa + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!latex \subsection{\type{gi00ab}} + +!latex \begin{enumerate} + +!latex \item This routine assigns the Fourier mode labels that converts a double-sum into a single sum; i.e., the $m_j$ and $n_j$ are assigned where +!latex \be f(\t,\z) & = & \sum_{n=0}^{N} f_{0,n}\cos(-n \, N_P \, \z) +!latex + \sum_{m=1}^{M} \sum_{n=-N}^{N} f_{m,n}\cos(m\t-n \, N_P \, \z) \\ +!latex & = & \sum_j f_j \cos(m_j\t-n_j\z), \label{eq:condensedFourierrepresentation} +!latex \ee +!latex where $N\equiv $ \type{Ntor} and $M\equiv $ \type{Mpol} are given on input, and $N_P \equiv $ \type{Nfp} is the field periodicity. + +!latex \end{enumerate} + +subroutine gi00ab( Mpol, Ntor, Nfp, mn, im, in ) + use mod_kinds, only: wp => dp + implicit none + + integer, intent(in) :: Mpol, Ntor, Nfp, mn + integer, intent(out) :: im(mn), in(mn) + + integer :: imn, mm, nn + + imn = 0 + + ; mm = 0 + ;do nn = 0, Ntor + ; imn = imn+1 ; im(imn) = mm ; in(imn) = nn*Nfp + ;enddo + ; + + do mm = 1, Mpol + do nn = -Ntor, Ntor + imn = imn+1 ; im(imn) = mm ; in(imn) = nn*Nfp + enddo + enddo + + return + +end subroutine gi00ab + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +subroutine getimn(Mpol, Ntor, Nfp, mi, ni, idx) + use mod_kinds, only: wp => dp + ! convert m and n to index + implicit none + integer, intent(in) :: Mpol, Ntor, Nfp, mi, ni + integer, intent(out) :: idx + + if (mi.gt.Mpol .or. mi.lt.0 .or. ni.gt.Ntor*Nfp .or. ni.lt.-Ntor*Nfp ) then + idx = 0 + elseif (mi .eq. 0) then + idx = 1 + ni / Nfp + else + idx = 1 + Ntor + (2 * Ntor + 1) * (mi - 1) + (ni / Nfp + Ntor + 1) + end if + +end subroutine getimn + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!latex \subsection{\type{tfft}} + +!latex \begin{enumerate} + +!latex \item This constructs the ``forward'' Fourier transform. + +!latex \item Given a set of data, $(f_{i},g_{i})$ for $i = 1, \dots N_\theta N_\zeta$, on a regular two-dimensional angle grid, +!latex where $\theta_j = 2 \pi j / N_\theta$ for $j = 0, N_\theta-1$, and +!latex $\zeta_k = 2 \pi k / N_\zeta $ for $k = 0, N_\zeta -1$. +!latex The ``packing'' is governed by $i = 1 + j + k N_\theta$. +!latex The ``discrete'' resolution is $N_\theta \equiv $ \type{Nt}, $N_\zeta \equiv $ \type{Nz} and \type{Ntz} $=$ \type{Nt} $\times$ \type{Nz}, +!latex which are set in \link{preset}. +!latex \item The Fourier harmonics consistent with \Eqn{condensedFourierrepresentation} are constructed. +!latex The mode identification labels appearing in \Eqn{condensedFourierrepresentation} are $m_j \equiv $ \type{im(j)} and $n_j \equiv $ \type{in(j)}, +!latex which are set in \link{global} via a call to \type{gi00ab}. + +!latex \end{enumerate} + +subroutine tfft( Nt, Nz, ijreal, ijimag, mn, im, in, efmn, ofmn, cfmn, sfmn, ifail ) + use mod_kinds, only: wp => dp + use constants, only : half, zero, pi2 + + use fileunits, only : ounit + + use inputlist, only : Nfp + use allglobal, only : pi2nfp + + use fftw_interface +#ifdef OPENMP + use OMP_LIB +#endif + implicit none + + intrinsic aimag + + integer :: Nt, Nz, mn, im(1:mn), in(1:mn), Ntz, imn, ifail, mm, nn + real(wp) :: ijreal(1:Nt*Nz), ijimag(1:Nt*Nz), efmn(1:mn), ofmn(1:mn), cfmn(1:mn), sfmn(1:mn) + + LOGICAL :: Lcheck = .false. + integer :: jj, kk, ithread + !REAL :: jireal(1:Nt*Nz), jiimag(1:Nt*Nz), arg, ca, sa + real(wp) :: arg, ca, sa + COMPLEX(C_DOUBLE_COMPLEX) :: z1, z2, z3 + + +#ifdef OPENMP + ithread = omp_get_thread_num() + 1 +#else + ithread = 1 +#endif + + !if( Lcheck ) then ; jireal = ijreal ; jiimag = ijimag + !endif + + do jj = 1, Nz ; cplxin(:,jj,ithread) = CMPLX( ijreal((jj-1)*Nt+1:jj*Nt), ijimag((jj-1)*Nt+1:jj*Nt), KIND=C_DOUBLE_COMPLEX ) + enddo + + call fftw_execute_dft( planf, cplxin(:,:,ithread), cplxout(:,:,ithread) ) !Forward transform + Ntz = Nt * Nz + cplxout(:,:,ithread) = cplxout(:,:,ithread) / Ntz + cplxout(1,1,ithread) = half*cplxout(1,1,ithread) + + do imn = 1, mn + mm = im(imn); nn = in(imn) / Nfp + + z1 = cplxout(1 + MOD(Nt - mm, Nt), 1 + MOD(Nz + nn, Nz),ithread) + z2 = cplxout(1 + mm, 1 + MOD(Nz - nn, Nz),ithread) + + z3 = z1 + z2 + efmn(imn) = real(z3); cfmn(imn) = aimag(z3) + + z3 = z1 - z2 + ofmn(imn) = aimag(z3); sfmn(imn) = -real(z3) + enddo + + if( .not.Lcheck ) return + + ijreal(1:Ntz) = zero ; ijimag(1:Ntz) = zero + + do jj = 0, Nt-1 + + do kk = 0, Nz-1 + + do imn = 1, mn ; arg = im(imn) * jj * pi2 / Nt - in(imn) * kk * pi2nfp / Nz ; ca = cos(arg) ; sa = sin(arg) + + ijreal(1+jj+kk*Nt) = ijreal(1+jj+kk*Nt) + efmn(imn) * ca + ofmn(imn) * sa + ijimag(1+jj+kk*Nt) = ijimag(1+jj+kk*Nt) + cfmn(imn) * ca + sfmn(imn) * sa + + enddo + enddo + enddo + + !write(ounit,'("tfft : ",10x," : Fourier reconstruction error =",2es15.5," ;")') sqrt(sum((ijreal-jireal)**2)/Ntz), sqrt(sum((ijimag-jiimag)**2)/Ntz) + + return + +end subroutine tfft + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!latex \subsection{\type{invfft}} + +!latex \begin{enumerate} + +!latex \item Given the Fourier harmonics, the data on a regular angular grid are constructed. + +!latex \item This is the inverse routine to \type{tfft}. + +!latex \end{enumerate} + +subroutine invfft( mn, im, in, efmn, ofmn, cfmn, sfmn, Nt, Nz, ijreal, ijimag ) + use mod_kinds, only: wp => dp + use constants, only : zero, two, half + use inputlist, only : Nfp + use fftw_interface +#ifdef OPENMP + use OMP_LIB +#endif + + implicit none + + integer, intent(in) :: mn, im(mn), in(mn) + real(wp) , intent(in) :: efmn(mn), ofmn(mn), cfmn(mn), sfmn(mn) + integer, intent(in) :: Nt, Nz + real(wp) , intent(out) :: ijreal(Nt*Nz), ijimag(Nt*Nz) ! output real space; + + integer :: imn, jj, mm, nn, ithread + + +#ifdef OPENMP + ithread = omp_get_thread_num() + 1 +#else + ithread = 1 +#endif + + + cplxin(:,:,ithread) = zero + + !Copy real arrays to complex + do imn = 1,mn ; mm = im(imn) ; nn = in(imn) / Nfp + cplxin(1 + MOD(Nt - mm, Nt), 1 + MOD(Nz + nn, Nz),ithread) = & + half * CMPLX(efmn(imn) - sfmn(imn), cfmn(imn) + ofmn(imn), KIND=C_DOUBLE_COMPLEX) + cplxin(1 + mm, 1 + MOD(Nz - nn, Nz),ithread) = & + half * CMPLX(efmn(imn) + sfmn(imn), cfmn(imn) - ofmn(imn), KIND=C_DOUBLE_COMPLEX) + enddo + cplxin(1,1,ithread) = two*cplxin(1,1,ithread) + + call fftw_execute_dft(planb, cplxin(:,:,ithread), cplxout(:,:,ithread)) !Inverse transform + + !Copy complex result back to real arrays + do jj=1,Nz + ijreal((jj-1)*Nt+1:jj*Nt) = real(cplxout(:,jj,ithread)) + ijimag((jj-1)*Nt+1:jj*Nt) = aimag(cplxout(:,jj,ithread)) + enddo + + return + +end subroutine invfft + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +!latex \subsection{\type{gauleg}} + +!latex \begin{enumerate} + +!latex \item Compute Gaussian integration weights and abscissae. + +!latex \item From Numerical Recipes. + +!latex \end{enumerate} + +subroutine gauleg( n, weight, abscis, ifail ) + use mod_kinds, only: wp => dp + use constants, only : zero, one, two, pi + + implicit none + + intrinsic abs, cos, epsilon + + integer, intent(in) :: n + real(wp), dimension(n), intent(out) :: weight, abscis + integer, intent(out) :: ifail + + integer, parameter :: maxiter=16 + integer :: m, j, i, irefl, iter + real(wp) :: z1,z,pp,p3,p2,p1 + real(wp), parameter :: eps = epsilon(z) + + !Error checking + if( n < 1 ) then ; ifail = 2 ; return + endif + + m = (n + 1)/2 !Roots are symmetric in interval, so we only need half + do i=1,m !Loop over desired roots + irefl = n + 1 - i + if (i .ne. irefl) then + z = cos(pi*(i - 0.25)/(n + 0.5)) ! Approximate ith root + else !For an odd number of abscissae, the center must be at zero by symmetry. + z = 0.0 + endif + + !Refine by Newton method + do iter=1,maxiter + p1 = one; p2 = zero ! Initialize recurrence relation + + do j=1,n !Recurrence relation to get P(x) + p3 = p2; p2 = p1 + p1 = ((two*j - one)*z*p2 - (j - one)*p3)/j + enddo !j + + pp = n*(z*p1 - p2)/(z*z - one) !Derivative of P(x) + z1 = z; z = z1 - p1/pp !Newton iteration + if (abs(z - z1) .le. eps) exit !Convergence test + enddo !iter + if (iter > maxiter) then + ifail = 1; return + endif + + abscis(i) = -z; abscis(irefl) = z + weight(i) = two/((one - z*z)*pp*pp) + weight(irefl) = weight(i) + enddo !i + + ifail = 0 +end subroutine gauleg diff --git a/src/numrec.f90 b/src/numrec.f90 deleted file mode 100644 index b1640e33..00000000 --- a/src/numrec.f90 +++ /dev/null @@ -1,720 +0,0 @@ -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!title (numerics) ! Some miscellaneous numerical routines. - -!latex \briefly{miscellaneous ``numerical'' routines} - -!l tex \calledby{\link{}} -!l tex \calls{\link{}} - -!latex \tableofcontents - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!latex \subsection{Outline} - -!latex This file contains various miscellaneous ``numerical'' routines as described below. - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!l tex \begin{itemize} - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!l tex \item \type{gi00aa} - -!subroutine gi00aa( ii, jj, ig ) ! not used; SRH: 27 Feb 18; -! -! implicit none -! -! INTEGER, intent(in) :: ii,jj -! INTEGER, intent(out) :: ig -! -! if( ( ii.eq.1 .and. jj.eq.1 ) ) ig = 1 -! if( ( ii.eq.1 .and. jj.eq.2 ) .or. ( ii.eq.2 .and. jj.eq.1 ) ) ig = 2 -! if( ( ii.eq.1 .and. jj.eq.3 ) .or. ( ii.eq.3 .and. jj.eq.1 ) ) ig = 3 -! if( ( ii.eq.2 .and. jj.eq.2 ) ) ig = 4 -! if( ( ii.eq.2 .and. jj.eq.3 ) .or. ( ii.eq.3 .and. jj.eq.2 ) ) ig = 5 -! if( ( ii.eq.3 .and. jj.eq.3 ) ) ig = 6 -! -! return -! -!end subroutine gi00aa - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!latex \subsection{\type{gi00ab}} - -!latex \begin{enumerate} - -!latex \item This routine assigns the Fourier mode labels that converts a double-sum into a single sum; i.e., the $m_j$ and $n_j$ are assigned where -!latex \be f(\t,\z) & = & \sum_{n=0}^{N} f_{0,n}\cos(-n \, N_P \, \z) -!latex + \sum_{m=1}^{M} \sum_{n=-N}^{N} f_{m,n}\cos(m\t-n \, N_P \, \z) \\ -!latex & = & \sum_j f_j \cos(m_j\t-n_j\z), \label{eq:condensedFourierrepresentation} -!latex \ee -!latex where $N\equiv $ \type{Ntor} and $M\equiv $ \type{Mpol} are given on input, and $N_P \equiv $ \type{Nfp} is the field periodicity. - -!latex \end{enumerate} - -subroutine gi00ab( Mpol, Ntor, Nfp, mn, im, in ) - - implicit none - - INTEGER, intent(in) :: Mpol, Ntor, Nfp, mn - INTEGER, intent(out) :: im(mn), in(mn) - - INTEGER :: imn, mm, nn - - imn = 0 - - ; mm = 0 - ;do nn = 0, Ntor - ; imn = imn+1 ; im(imn) = mm ; in(imn) = nn*Nfp - ;enddo - ; - - do mm = 1, Mpol - do nn = -Ntor, Ntor - imn = imn+1 ; im(imn) = mm ; in(imn) = nn*Nfp - enddo - enddo - - return - -end subroutine gi00ab - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -subroutine getimn(Mpol, Ntor, Nfp, mi, ni, idx) - ! convert m and n to index - implicit none - integer, intent(in) :: Mpol, Ntor, Nfp, mi, ni - integer, intent(out) :: idx - - if (mi.gt.Mpol .or. mi.lt.0 .or. ni.gt.Ntor*Nfp .or. ni.lt.-Ntor*Nfp ) then - idx = 0 - elseif (mi .eq. 0) then - idx = 1 + ni / Nfp - else - idx = 1 + Ntor + (2 * Ntor + 1) * (mi - 1) + (ni / Nfp + Ntor + 1) - end if - -end subroutine getimn - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!latex \subsection{\type{tfft}} - -!latex \begin{enumerate} - -!latex \item This constructs the ``forward'' Fourier transform. - -!latex \item Given a set of data, $(f_{i},g_{i})$ for $i = 1, \dots N_\theta N_\zeta$, on a regular two-dimensional angle grid, -!latex where $\theta_j = 2 \pi j / N_\theta$ for $j = 0, N_\theta-1$, and -!latex $\zeta_k = 2 \pi k / N_\zeta $ for $k = 0, N_\zeta -1$. -!latex The ``packing'' is governed by $i = 1 + j + k N_\theta$. -!latex The ``discrete'' resolution is $N_\theta \equiv $ \type{Nt}, $N_\zeta \equiv $ \type{Nz} and \type{Ntz} $=$ \type{Nt} $\times$ \type{Nz}, -!latex which are set in \link{preset}. -!latex \item The Fourier harmonics consistent with \Eqn{condensedFourierrepresentation} are constructed. -!latex The mode identification labels appearing in \Eqn{condensedFourierrepresentation} are $m_j \equiv $ \type{im(j)} and $n_j \equiv $ \type{in(j)}, -!latex which are set in \link{global} via a call to \type{gi00ab}. - -!latex \end{enumerate} - -subroutine tfft( Nt, Nz, ijreal, ijimag, mn, im, in, efmn, ofmn, cfmn, sfmn, ifail ) - - use constants, only : half, zero, pi2 - - use fileunits, only : ounit - - use inputlist, only : Nfp - use allglobal, only : pi2nfp - - use fftw_interface -#ifdef OPENMP - use OMP_LIB -#endif - implicit none - - intrinsic aimag - - INTEGER :: Nt, Nz, mn, im(1:mn), in(1:mn), Ntz, imn, ifail, mm, nn - REAL :: ijreal(1:Nt*Nz), ijimag(1:Nt*Nz), efmn(1:mn), ofmn(1:mn), cfmn(1:mn), sfmn(1:mn) - - LOGICAL :: Lcheck = .false. - INTEGER :: jj, kk, ithread - !REAL :: jireal(1:Nt*Nz), jiimag(1:Nt*Nz), arg, ca, sa - REAL :: arg, ca, sa - COMPLEX(C_DOUBLE_COMPLEX) :: z1, z2, z3 - - GETTHREAD - !if( Lcheck ) then ; jireal = ijreal ; jiimag = ijimag - !endif - - do jj = 1, Nz ; cplxin(:,jj,ithread) = CMPLX( ijreal((jj-1)*Nt+1:jj*Nt), ijimag((jj-1)*Nt+1:jj*Nt), KIND=C_DOUBLE_COMPLEX ) - enddo - - call fftw_execute_dft( planf, cplxin(:,:,ithread), cplxout(:,:,ithread) ) !Forward transform - Ntz = Nt * Nz - cplxout(:,:,ithread) = cplxout(:,:,ithread) / Ntz - cplxout(1,1,ithread) = half*cplxout(1,1,ithread) - - do imn = 1, mn - mm = im(imn); nn = in(imn) / Nfp - - z1 = cplxout(1 + MOD(Nt - mm, Nt), 1 + MOD(Nz + nn, Nz),ithread) - z2 = cplxout(1 + mm, 1 + MOD(Nz - nn, Nz),ithread) - - z3 = z1 + z2 - efmn(imn) = real(z3); cfmn(imn) = aimag(z3) - - z3 = z1 - z2 - ofmn(imn) = aimag(z3); sfmn(imn) = -real(z3) - enddo - - if( .not.Lcheck ) return - - ijreal(1:Ntz) = zero ; ijimag(1:Ntz) = zero - - do jj = 0, Nt-1 - - do kk = 0, Nz-1 - - do imn = 1, mn ; arg = im(imn) * jj * pi2 / Nt - in(imn) * kk * pi2nfp / Nz ; ca = cos(arg) ; sa = sin(arg) - - ijreal(1+jj+kk*Nt) = ijreal(1+jj+kk*Nt) + efmn(imn) * ca + ofmn(imn) * sa - ijimag(1+jj+kk*Nt) = ijimag(1+jj+kk*Nt) + cfmn(imn) * ca + sfmn(imn) * sa - - enddo - enddo - enddo - - !write(ounit,'("tfft : ",10x," : Fourier reconstruction error =",2es15.5," ;")') sqrt(sum((ijreal-jireal)**2)/Ntz), sqrt(sum((ijimag-jiimag)**2)/Ntz) - - return - -end subroutine tfft - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!latex \subsection{\type{invfft}} - -!latex \begin{enumerate} - -!latex \item Given the Fourier harmonics, the data on a regular angular grid are constructed. - -!latex \item This is the inverse routine to \type{tfft}. - -!latex \end{enumerate} - -subroutine invfft( mn, im, in, efmn, ofmn, cfmn, sfmn, Nt, Nz, ijreal, ijimag ) - - use constants, only : zero, two, half - use inputlist, only : Nfp - use fftw_interface -#ifdef OPENMP - use OMP_LIB -#endif - - implicit none - - INTEGER, intent(in) :: mn, im(mn), in(mn) - REAL , intent(in) :: efmn(mn), ofmn(mn), cfmn(mn), sfmn(mn) - INTEGER, intent(in) :: Nt, Nz - REAL , intent(out) :: ijreal(Nt*Nz), ijimag(Nt*Nz) ! output real space; - - INTEGER :: imn, jj, mm, nn, ithread - - GETTHREAD - - cplxin(:,:,ithread) = zero - - !Copy real arrays to complex - do imn = 1,mn ; mm = im(imn) ; nn = in(imn) / Nfp - cplxin(1 + MOD(Nt - mm, Nt), 1 + MOD(Nz + nn, Nz),ithread) = & - half * CMPLX(efmn(imn) - sfmn(imn), cfmn(imn) + ofmn(imn), KIND=C_DOUBLE_COMPLEX) - cplxin(1 + mm, 1 + MOD(Nz - nn, Nz),ithread) = & - half * CMPLX(efmn(imn) + sfmn(imn), cfmn(imn) - ofmn(imn), KIND=C_DOUBLE_COMPLEX) - enddo - cplxin(1,1,ithread) = two*cplxin(1,1,ithread) - - call fftw_execute_dft(planb, cplxin(:,:,ithread), cplxout(:,:,ithread)) !Inverse transform - - !Copy complex result back to real arrays - do jj=1,Nz - ijreal((jj-1)*Nt+1:jj*Nt) = real(cplxout(:,jj,ithread)) - ijimag((jj-1)*Nt+1:jj*Nt) = aimag(cplxout(:,jj,ithread)) - enddo - - return - -end subroutine invfft - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!latex \subsection{\type{gauleg}} - -!latex \begin{enumerate} - -!latex \item Compute Gaussian integration weights and abscissae. - -!latex \item From Numerical Recipes. - -!latex \end{enumerate} - -subroutine gauleg( n, weight, abscis, ifail ) - - use constants, only : zero, one, two, pi - - implicit none - - intrinsic abs, cos, epsilon - - INTEGER, intent(in) :: n - REAL, dimension(n), intent(out) :: weight, abscis - INTEGER, intent(out) :: ifail - - INTEGER, parameter :: maxiter=16 - INTEGER :: m, j, i, irefl, iter - REAL :: z1,z,pp,p3,p2,p1 - REAL, parameter :: eps = epsilon(z) - - !Error checking - if( n < 1 ) then ; ifail = 2 ; return - endif - - m = (n + 1)/2 !Roots are symmetric in interval, so we only need half - do i=1,m !Loop over desired roots - irefl = n + 1 - i - if (i .ne. irefl) then - z = cos(pi*(i - 0.25)/(n + 0.5)) ! Approximate ith root - else !For an odd number of abscissae, the center must be at zero by symmetry. - z = 0.0 - endif - - !Refine by Newton method - do iter=1,maxiter - p1 = one; p2 = zero ! Initialize recurrence relation - - do j=1,n !Recurrence relation to get P(x) - p3 = p2; p2 = p1 - p1 = ((two*j - one)*z*p2 - (j - one)*p3)/j - enddo !j - - pp = n*(z*p1 - p2)/(z*z - one) !Derivative of P(x) - z1 = z; z = z1 - p1/pp !Newton iteration - if (abs(z - z1) .le. eps) exit !Convergence test - enddo !iter - if (iter > maxiter) then - ifail = 1; return - endif - - abscis(i) = -z; abscis(irefl) = z - weight(i) = two/((one - z*z)*pp*pp) - weight(irefl) = weight(i) - enddo !i - - ifail = 0 -end subroutine gauleg - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -#ifdef DELETETHIS - -!l tex \subsection{\type{svdcmp}} ! not used; SRH: 27 Feb 18; - -subroutine svdcmp(a,m,n,mp,np,w,v) - use constants,only:zero,one - - implicit none - integer nMaX,M,n,MP,nP,i,j,jj,k,l,its,nM - parameter (nMaX=500) - REAL :: a(MP,nP),w(nP),v(nP,nP),rv1(nMaX) - REAL :: c,F,g,H,s,X,Y,Z,scale,anorM,pythag,oone - - !stop "svdcmp : to be deleted?" - - g=zero - scale=zero - anorm=zero - do 25 i=1,n - l=i+1 - rv1(i)=scale*g - g=zero - s=zero - scale=zero - if(i.le.m)then - do 11 k=i,m - scale=scale+abs(a(k,i)) -11 continue - if(scale.ne.zero)then - do 12 k=i,m - a(k,i)=a(k,i)/scale - s=s+a(k,i)*a(k,i) -12 continue - f=a(i,i) - g=-sign(sqrt(s),f) - h=f*g-s - a(i,i)=f-g - do 15 j=l,n - s=zero - do 13 k=i,m - s=s+a(k,i)*a(k,j) -13 continue - f=s/h - do 14 k=i,m - a(k,j)=a(k,j)+f*a(k,i) -14 continue -15 continue - do 16 k=i,m - a(k,i)=scale*a(k,i) -16 continue - endif - endif - w(i)=scale *g - g=zero - s=zero - scale=zero - if((i.le.m).and.(i.ne.n))then - do 17 k=l,n - scale=scale+abs(a(i,k)) -17 continue - if(scale.ne.zero)then - do 18 k=l,n - a(i,k)=a(i,k)/scale - s=s+a(i,k)*a(i,k) -18 continue - f=a(i,l) - g=-sign(sqrt(s),f) - h=f*g-s - a(i,l)=f-g - do 19 k=l,n - rv1(k)=a(i,k)/h -19 continue - do 23 j=l,m - s=zero - do 21 k=l,n - s=s+a(j,k)*a(i,k) -21 continue - do 22 k=l,n - a(j,k)=a(j,k)+s*rv1(k) -22 continue -23 continue - do 24 k=l,n - a(i,k)=scale*a(i,k) -24 continue - endif - endif - anorm=max(anorm,(abs(w(i))+abs(rv1(i)))) -25 continue - do 32 i=n,1,-1 - if(i.lt.n)then - if(g.ne.zero)then - do 26 j=l,n - v(j,i)=(a(i,j)/a(i,l))/g -26 continue - do 29 j=l,n - s=zero - do 27 k=l,n - s=s+a(i,k)*v(k,j) -27 continue - do 28 k=l,n - v(k,j)=v(k,j)+s*v(k,i) -28 continue -29 continue - endif - do 31 j=l,n - v(i,j)=zero - v(j,i)=zero -31 continue - endif - v(i,i)=one - g=rv1(i) - l=i -32 continue - do 39 i=min(m,n),1,-1 - l=i+1 - g=w(i) - do 33 j=l,n - a(i,j)=zero -33 continue - if(g.ne.zero)then - g=one/g - do 36 j=l,n - s=zero - do 34 k=l,m - s=s+a(k,i)*a(k,j) -34 continue - f=(s/a(i,i))*g - do 35 k=i,m - a(k,j)=a(k,j)+f*a(k,i) -35 continue -36 continue - do 37 j=i,m - a(j,i)=a(j,i)*g -37 continue - else - do 38 j= i,m - a(j,i)=zero -38 continue - endif - a(i,i)=a(i,i)+one -39 continue - do 49 k=n,1,-1 - do 48 its=1,30 - do 41 l=k,1,-1 - nm=l-1 - if((abs(rv1(l))+anorm).eq.anorm) goto 2 - if((abs(w(nm))+anorm).eq.anorm) goto 1 -41 continue -1 c=zero - s=one - do 43 i=l,k - f=s*rv1(i) - rv1(i)=c*rv1(i) - if((abs(f)+anorm).eq.anorm) goto 2 - g=w(i) - h=pythag(f,g) - w(i)=h - h=one/h - c= (g*h) - s=-(f*h) - do 42 j=1,m - y=a(j,nm) - z=a(j,i) - a(j,nm)=(y*c)+(z*s) - a(j,i)=-(y*s)+(z*c) -42 continue -43 continue -2 z=w(k) - if(l.eq.k)then - if(z.lt.zero)then - w(k)=-z - do 44 j=1,n - v(j,k)=-v(j,k) -44 continue - endif - goto 3 - endif - if(its.eq.30) stop "svdcmp : no convergence" - x=w(l) - nm=k-1 - y=w(nm) - g=rv1(nm) - h=rv1(k) - f=((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y) - oone=one - g=pythag(f,oone) - f=((x-z)*(x+z)+h*((y/(f+sign(g,f)))-h))/x - c=one - s=one - do 47 j=l,nm - i=j+1 - g=rv1(i) - y=w(i) - h=s*g - g=c*g - z=pythag(f,h) - rv1(j)=z - c=f/z - s=h/z - f= (x*c)+(g*s) - g=-(x*s)+(g*c) - h=y*s - y=y*c - do 45 jj=1,n - x=v(jj,j) - z=v(jj,i) - v(jj,j)= (x*c)+(z*s) - v(jj,i)=-(x*s)+(z*c) -45 continue - z=pythag(f,h) - w(j)=z - if(z.ne.zero)then - z=one/z - c=f*z - s=h*z - endif - f= (c*g)+(s*y) - x=-(s*g)+(c*y) - do 46 jj=1,m - y=a(jj,j) - z=a(jj,i) - a(jj,j)= (y*c)+(z*s) - a(jj,i)=-(y*s)+(z*c) -46 continue -47 continue - rv1(l)=zero - rv1(k)=f - w(k)=x -48 continue -3 continue -49 continue - return - end - -#endif - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -#ifdef DELETETHIS - -!l tex \subsection{\type{pythag}} ! not used; SRH: 27 Feb 18; - -REAL function pythag(a,b) - implicit none - REAL :: a,b - REAL :: absa,absb - - !stop "pythag : to be deleted?" - - absa=abs(a) - absb=abs(b) - if(absa.gt.absb) then - pythag=absa*sqrt(1.+(absb/absa)**2) - else - if(absb.eq.0.) then - pythag=0. - else - pythag=absb*sqrt(1.+(absa/absb)**2) - endif - endif - return -end function pythag - -#endif - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -#ifdef DELETETHIS - -!l tex \subsection{\type{svbksb}} ! not used; SRH: 27 Feb 18; - -subroutine svbksb(u,w,v,M,n,MP,nP,b,X) - - implicit none - integer nMaX,M,n,MP,nP,i,j,jj - parameter (nMaX=10000) - REAL, intent(in) :: b(MP) - REAL :: u(MP,nP),w(nP),v(nP,nP),X(nP),tMP(nMaX) - REAL :: s - - !stop "svbksb : to be deleted?" - - do 12 j=1,n - s=0. - if(w(j).ne.0.)then - do 11 i=1,M - s=s+u(i,j)*b(i) -11 continue - s=s/w(j) - endif - tMP(j)=s -12 continue - do 14 j=1,n - s=0. - do 13 jj=1,n - s=s+v(j,jj)*tMP(jj) -13 continue - X(j)=s -14 continue - return - end - -#endif - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -#ifdef DELETETHIS - -!l tex \subsection{\type{sort}} ! not used; SRH: 27 Feb 18; - - subroutine sort(n,ra) - - implicit none - integer n,l,ir,i,j - REAL :: ra(n),rra - - !stop "sort : to be deleted?" - - if(n.eq.1) return - l=n/2+1 - ir=n -10 continue - if(l.gt.1)then - l=l-1 - rra=ra(l) - else - rra=ra(ir) - ra(ir)=ra(1) - ir=ir-1 - if(ir.eq.1)then - ra(1)=rra - return - endif - endif - i=l - j=l+l -20 if(j.le.ir)then - if(j.lt.ir)then - if(ra(j).lt.ra(j+1))j=j+1 - endif - if(rra.lt.ra(j))then - ra(i)=ra(j) - i=j - j=j+j - else - j=ir+1 - endif - goto 20 - endif - ra(i)=rra - goto 10 - end - -#endif - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -#ifdef DELETETHIS - -!l tex \subsection{\type{singvalues}} ! not used; SRH: 27 Feb 18; - - subroutine singvalues(nrow,ncol,Mat,b,sx,cutoff,wsvd) ! nrow = nconstraints ; ncol = nfreedom - implicit none - INTEGER, intent(in) :: nrow,ncol - REAL,intent(in) :: Mat(nrow,ncol),b(nrow) - integer i,nev - - REAL :: Mato(nrow,ncol) - REAL :: sx(ncol) - REAL :: vsvd(ncol,ncol),wsvd(ncol),wsvdc(ncol) - REAL :: cutoff,wmax,wmin - - sx=0.0;wsvd=0.0;vsvd=0.0;wsvdc=0.0;wmax=0.0;Mato=Mat - - call svdcmp(Mato,nrow,ncol,nrow,ncol,wsvd,vsvd) - wsvdc=wsvd - call sort(ncol,wsvdc) - wmax=wsvdc(ncol) - wmin=abs(wmax)*cutoff - wsvdc=0.0;nev=0 - do i=1,ncol - if(abs(wsvd(i)).ge.wmin) then;wsvdc(i)=wsvd(i);nev=nev+1 - endif - enddo - call svbksb(Mato,wsvdc,vsvd,nrow,ncol,nrow,ncol,b,sx) - !Mat=Mato - call sort(ncol,wsvd) - return - end subroutine singvalues - -#endif - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - -!l tex \end{itemize} - -!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/packab.f90 b/src/packab.F90 similarity index 72% rename from src/packab.f90 rename to src/packab.F90 index b2b5ccc2..652147c8 100644 --- a/src/packab.f90 +++ b/src/packab.F90 @@ -31,7 +31,7 @@ !> @param solution !> @param ideriv subroutine packab( packorunpack, lvol, NN, solution, ideriv ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero @@ -52,15 +52,31 @@ subroutine packab( packorunpack, lvol, NN, solution, ideriv ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - CHARACTER, intent(in) :: packorunpack - INTEGER , intent(in) :: lvol, NN, ideriv - REAL :: solution(1:NN) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + character, intent(in) :: packorunpack + integer , intent(in) :: lvol, NN, ideriv + real(wp) :: solution(1:NN) - INTEGER :: ii, ll, id, llrad + integer :: ii, ll, id, llrad + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(packab) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -79,26 +95,86 @@ subroutine packab( packorunpack, lvol, NN, solution, ideriv ) if( YESstellsym ) then ; ii = 1 - do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Aze(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) + do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Aze(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + enddo ! end of do ll; do ii = 2, mn - do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Aze(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) + do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Aze(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + enddo ! end of do ll; enddo ! end of do ii; else ! NOTstellsym; ; ii = 1 - do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Aze(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) + do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Aze(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + enddo do ii = 2, mn - do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Aze(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Ato(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) - ; ; id = Azo(lvol,0,ii)%i(ll) ; FATAL( packab, id.lt.1 .or. id.gt.NN, unpacking illegal subscript ) + do ll = 0, llrad ; id = Ate(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Aze(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Ato(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + + ; ; id = Azo(lvol,0,ii)%i(ll) ; + if( id.lt.1 .or. id.gt.NN ) then + write(6,'("packab : fatal : myid=",i3," ; id.lt.1 .or. id.gt.NN ; unpacking illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packab : id.lt.1 .or. id.gt.NN : unpacking illegal subscript ;" + endif + enddo ! end of do ll; enddo ! end of do ii; @@ -282,7 +358,12 @@ subroutine packab( packorunpack, lvol, NN, solution, ideriv ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(packab) + +9999 continue + cput = MPI_WTIME() + Tpackab = Tpackab + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/packxi.f90 b/src/packxi.F90 similarity index 84% rename from src/packxi.f90 rename to src/packxi.F90 index b70d01ee..8b663abd 100644 --- a/src/packxi.f90 +++ b/src/packxi.F90 @@ -55,7 +55,7 @@ !> @param[in] LComputeDerivatives !> @param[in] LComputeAxis subroutine packxi( NGdof, position, Mvol, mn, iRbc, iZbs, iRbs, iZbc, packorunpack, LComputeDerivatives, LComputeAxis ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero @@ -76,18 +76,34 @@ subroutine packxi( NGdof, position, Mvol, mn, iRbc, iZbs, iRbs, iZbc, packorunpa !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL, intent(in) :: LComputeDerivatives ! indicates whether derivatives are to be calculated; LOGICAL, intent(in) :: LComputeAxis ! if to recompute the axis - INTEGER, intent(in) :: NGdof, Mvol, mn - REAL :: position(0:NGdof), iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol) - CHARACTER :: packorunpack + integer, intent(in) :: NGdof, Mvol, mn + real(wp) :: position(0:NGdof), iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol) + character :: packorunpack - INTEGER :: lvol, jj, kk, irz, issym, idof, ifail, ivol + integer :: lvol, jj, kk, irz, issym, idof, ifail, ivol + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(packxi) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -113,7 +129,13 @@ subroutine packxi( NGdof, position, Mvol, mn, iRbc, iZbs, iRbs, iZbc, packorunpa idof = idof + 1 #ifdef DEBUG - FATAL( packxi, idof.le.0 .or. idof.gt.NGdof, out of bounds ) + + if( idof.le.0 .or. idof.gt.NGdof ) then + write(6,'("packxi : fatal : myid=",i3," ; idof.le.0 .or. idof.gt.NGdof ; out of bounds ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packxi : idof.le.0 .or. idof.gt.NGdof : out of bounds ;" + endif + #endif select case( packorunpack ) @@ -145,7 +167,13 @@ subroutine packxi( NGdof, position, Mvol, mn, iRbc, iZbs, iRbs, iZbc, packorunpa !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( packxi, idof.ne.NGdof, counting error ) + + if( idof.ne.NGdof ) then + write(6,'("packxi : fatal : myid=",i3," ; idof.ne.NGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "packxi : idof.ne.NGdof : counting error ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -171,14 +199,24 @@ subroutine packxi( NGdof, position, Mvol, mn, iRbc, iZbs, iRbs, iZbc, packorunpa if( (Mvol .ne. 1) .and. (Lfindzero .ne. 0) ) then if (LComputeAxis) then - WCALL( packxi, rzaxis, ( Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), ivol, LComputeDerivatives ) ) ! set coordinate axis; 19 Jul 16; + + cput = MPI_WTIME() + Tpackxi = Tpackxi + ( cput-cpuo ) + call rzaxis( Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), ivol, LComputeDerivatives ) + cpuo = MPI_WTIME() + ! set coordinate axis; 19 Jul 16; endif endif end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(packxi) + +9999 continue + cput = MPI_WTIME() + Tpackxi = Tpackxi + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/pc00aa.f90 b/src/pc00aa.F90 similarity index 83% rename from src/pc00aa.f90 rename to src/pc00aa.F90 index c79d1c27..fabcccc3 100644 --- a/src/pc00aa.f90 +++ b/src/pc00aa.F90 @@ -39,7 +39,7 @@ !> @param[in] mn !> @param ie04dgf subroutine pc00aa( NGdof, position, Nvol, mn, ie04dgf ) ! argument list is optional; - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, ten @@ -56,25 +56,41 @@ subroutine pc00aa( NGdof, position, Nvol, mn, ie04dgf ) ! argument list is optio !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: Nvol, mn, NGdof - REAL , intent(inout) :: position(0:NGdof) - INTEGER :: ie04dgf +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: Nvol, mn, NGdof + real(wp) , intent(inout) :: position(0:NGdof) + integer :: ie04dgf LOGICAL :: LComputeDerivatives!, Lexit = .true. - INTEGER :: niterations, Iwork(1:NGdof+1), iuser(1:2) - REAL :: lEnergy, Gradient(0:NGdof), work(1:13*NGdof), ruser(1:1) - CHARACTER :: smaxstep*34 + integer :: niterations, Iwork(1:NGdof+1), iuser(1:2) + real(wp) :: lEnergy, Gradient(0:NGdof), work(1:13*NGdof), ruser(1:1) + character :: smaxstep*34 external :: pc00ab - BEGIN(pc00aa) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("pc00aa : ", 10x ," : ")') write(ounit,1000) cput-cpus, myid, NGdof, maxstep, maxiter, verify endif @@ -120,7 +136,13 @@ subroutine pc00aa( NGdof, position, Nvol, mn, ie04dgf ) ! argument list is optio case( 1 ) ! extensive test; call E04DKF('Verify = 1') ! extensive test; case default - FATAL(pc00aa, .true., invalid verify supplied on input) + + if( .true. ) then + write(6,'("pc00aa : fatal : myid=",i3," ; .true. ; invalid verify supplied on input;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pc00aa : .true. : invalid verify supplied on input ;" + endif + end select call E04DKF('Iteration Limit = 99999999') @@ -134,7 +156,7 @@ subroutine pc00aa( NGdof, position, Nvol, mn, ie04dgf ) ! argument list is optio call E04DGF( NGdof, pc00ab, niterations, lEnergy, Gradient(1:NGdof), position(1:NGdof), & Iwork(1:NGdof+1), work(1:13*NGdof), iuser(1:2), ruser(1:1), ie04dgf ) - cput = GETTIME + cput = MPI_WTIME() select case( ie04dgf ) case(:-1) ; if( myid.eq.0 ) write(ounit,'("pc00aa : ",f10.2," : user requested termination ; ie04dgf=",i3," ;")')cput-cpus,ie04dgf @@ -146,12 +168,23 @@ subroutine pc00aa( NGdof, position, Nvol, mn, ie04dgf ) ! argument list is optio case( 8) ; if( myid.eq.0 ) write(ounit,'("pc00aa : ",f10.2," : initial gradient too small ; ie04dgf=",i3," ;")')cput-cpus,ie04dgf case( 9) ; if( myid.eq.0 ) write(ounit,'("pc00aa : ",f10.2," : input error ; ie04dgf=",i3," ;")')cput-cpus,ie04dgf case default - FATAL(pc00aa, .true., E04DGF ifail error) + + if( .true. ) then + write(6,'("pc00aa : fatal : myid=",i3," ; .true. ; E04DGF ifail error;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pc00aa : .true. : E04DGF ifail error ;" + endif + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(pc00aa) + +9999 continue + cput = MPI_WTIME() + Tpc00aa = Tpc00aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/pc00ab.f90 b/src/pc00ab.F90 similarity index 86% rename from src/pc00ab.f90 rename to src/pc00ab.F90 index b47907c0..5d526658 100644 --- a/src/pc00ab.f90 +++ b/src/pc00ab.F90 @@ -79,7 +79,7 @@ !> !> subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser ) ! argument fixed by NAG; see pc00aa; - + use mod_kinds, only: wp => dp use constants, only : zero, half, one use numerical, only : @@ -94,16 +94,32 @@ subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER :: mode, NGdof, nstate, iuser(1:2) - REAL :: Position(1:NGdof), Energy, Gradient(1:NGdof), ruser(1:1) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: mode, NGdof, nstate, iuser(1:2) + real(wp) :: Position(1:NGdof), Energy, Gradient(1:NGdof), ruser(1:1) LOGICAL :: LComputeDerivatives, LComputeAxis - INTEGER :: ii, vvol, irz, issym, totaldof, localdof, wflag, iflag!, mi, ni !idof, imn, irz, totaldof, localdof, jj, kk, ll, mi, ni, mj, nj, mk, nk, ml, nl, mjmk - REAL :: force(0:NGdof), gradienterror, rflag + integer :: ii, vvol, irz, issym, totaldof, localdof, wflag, iflag!, mi, ni !idof, imn, irz, totaldof, localdof, jj, kk, ll, mi, ni, mj, nj, mk, nk, ml, nl, mjmk + real(wp) :: force(0:NGdof), gradienterror, rflag + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(pc00ab) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -113,7 +129,12 @@ subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser LComputeDerivatives = .false. LComputeAxis = .true. - WCALL(pc00ab,dforce,( NGdof, Position(1:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis )) + + cput = MPI_WTIME() + Tpc00ab = Tpc00ab + ( cput-cpuo ) + call dforce( NGdof, Position(1:NGdof), force(0:NGdof), LComputeDerivatives, LComputeAxis ) + cpuo = MPI_WTIME() + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -174,24 +195,47 @@ subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser enddo ! end of do vvol; 26 Feb 13; - FATAL(pc00ab, totaldof.ne.NGdof, counting error ) + + if( totaldof.ne.NGdof ) then + write(6,'("pc00ab : fatal : myid=",i3," ; totaldof.ne.NGdof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pc00ab : totaldof.ne.NGdof : counting error ;" + endif + gradienterror = sum( abs( Gradient(1:NGdof) ) ) / NGdof ! only used for screen output; 26 Feb 13; wflag = 1 ; iflag = 0 ; rflag = gradienterror - WCALL(pc00ab,writin,( wflag, iflag, rflag)) ! write restart file etc.; + + cput = MPI_WTIME() + Tpc00ab = Tpc00ab + ( cput-cpuo ) + call writin( wflag, iflag, rflag) + cpuo = MPI_WTIME() + ! write restart file etc.; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! case( 3 ) ! second derivatives; required for E04LYF, which is called by pc02aa; - FATAL(pc00ab, .true., have not yet computed second derivatives ) + + if( .true. ) then + write(6,'("pc00ab : fatal : myid=",i3," ; .true. ; have not yet computed second derivatives ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pc00ab : .true. : have not yet computed second derivatives ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! case default - FATAL(pc00ab, .true., invalid mode provided to pc00ab ) + + if( .true. ) then + write(6,'("pc00ab : fatal : myid=",i3," ; .true. ; invalid mode provided to pc00ab ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pc00ab : .true. : invalid mode provided to pc00ab ;" + endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -199,7 +243,7 @@ subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - cput = GETTIME + cput = MPI_WTIME() if( myid.eq.0 ) write(ounit,1000) cput-cpus, iuser(1:2), mode, Energy, gradienterror, ForceErr !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -214,7 +258,12 @@ subroutine pc00ab( mode, NGdof, Position, Energy, Gradient, nstate, iuser, ruser !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(pc00ab) + +9999 continue + cput = MPI_WTIME() + Tpc00ab = Tpc00ab + ( cput-cpuo ) + return + 1000 format("pc00ab : ",f10.2," : iterations="2i8" ; mode=",i3," ; Energy="es23.15" ; |DF|="es13.5" ; ForceErr="es23.15" ;") diff --git a/src/pp00aa.f90 b/src/pp00aa.F90 similarity index 85% rename from src/pp00aa.f90 rename to src/pp00aa.F90 index dc91ff31..36472577 100644 --- a/src/pp00aa.f90 +++ b/src/pp00aa.F90 @@ -64,7 +64,7 @@ !> !> subroutine pp00aa - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi @@ -87,22 +87,48 @@ subroutine pp00aa !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER :: lnPtrj, ioff, vvol, itrj, lvol - INTEGER, allocatable :: utflag(:), numTrajs(:) - REAL :: sti(1:2), ltransform(1:2) - REAL, allocatable :: data(:,:,:,:), fiota(:,:) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: lnPtrj, ioff, vvol, itrj, lvol + integer, allocatable :: utflag(:), numTrajs(:) + real(wp) :: sti(1:2), ltransform(1:2) + real(wp), allocatable :: data(:,:,:,:), fiota(:,:) integer :: id, numTraj, recvId integer :: status(MPI_STATUS_SIZE) - BEGIN(pp00aa) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + ! count how many Poincare trajectories should be computed in total ; executed on each CPU allocate(numTrajs(1:Mvol)) do vvol = 1, Mvol - LREGION(vvol) ! sets e.g. Lcoordinatesingularity + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! sets e.g. Lcoordinatesingularity if( Lcoordinatesingularity ) then ; ioff = 1 ! keep away from coordinate axis; else ; ioff = 0 endif @@ -129,7 +155,17 @@ subroutine pp00aa if( myid.eq.modulo(vvol-1,ncpu) .and. myid.lt.Mvol) then ! the following is in parallel; 20 Jun 14; ! lower bound for radial indices - LREGION(vvol) ! sets e.g. Lcoordinatesingularity + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! sets e.g. Lcoordinatesingularity if( Lcoordinatesingularity ) then ; ioff = 1 ! keep away from coordinate axis; else ; ioff = 0 endif @@ -141,9 +177,18 @@ subroutine pp00aa !write(*,'(ai2ai2ai2ai2)') "CPU ",myid," works on trajectories ",ioff," to ",lnPtrj," in volume ",vvol - SALLOCATE( data, (ioff:lnPtrj, 1:4,0:Nz-1,1:nPpts), zero ) ! for block writing to file (allows faster reading of output data files for post-processing plotting routines); - SALLOCATE( utflag, (ioff:lnPtrj ), 0 ) ! error flag that indicates if fieldlines successfully followed; 22 Apr 13; - SALLOCATE( fiota, (ioff:lnPtrj, 1:2 ), zero ) ! will always need fiota(0,1:2); + + allocate( data(ioff:lnPtrj, 1:4,0:Nz-1,1:nPpts), stat=astat ) + data(ioff:lnPtrj, 1:4,0:Nz-1,1:nPpts) = zero + ! for block writing to file (allows faster reading of output data files for post-processing plotting routines); + + allocate( utflag(ioff:lnPtrj ), stat=astat ) + utflag(ioff:lnPtrj ) = 0 + ! error flag that indicates if fieldlines successfully followed; 22 Apr 13; + + allocate( fiota(ioff:lnPtrj, 1:2 ), stat=astat ) + fiota(ioff:lnPtrj, 1:2 ) = zero + ! will always need fiota(0,1:2); !$OMP PARALLEL DO SHARED(lnPtrj,ioff,Wpp00aa,Nz,data,fiota,utflag,iota,oita,myid,vvol,cpus,Lconstraint,nPpts,ppts) PRIVATE(itrj,sti) do itrj = ioff, lnPtrj ! initialize Poincare plot with trajectories regularly spaced between interfaces along \t=0; @@ -154,10 +199,15 @@ subroutine pp00aa if( itrj.eq.lnPtrj ) sti(1) = one ! avoid machine precision errors; 08 Feb 16; ! call actual field line integration subroutine - CALL( pp00aa, pp00ab, ( vvol, sti(1:2), Nz, nPpts, data(itrj,1:4,0:Nz-1,1:nPpts), fiota(itrj,1:2), utflag(itrj) ) ) + + cput = MPI_WTIME() + Tpp00aa = Tpp00aa + ( cput-cpuo ) + call pp00ab( vvol, sti(1:2), Nz, nPpts, data(itrj,1:4,0:Nz-1,1:nPpts), fiota(itrj,1:2), utflag(itrj) ) + cpuo = MPI_WTIME() + if( Wpp00aa ) then - cput = GETTIME + cput = MPI_WTIME() if( Lconstraint.eq.1 ) then if( itrj.eq.0 ) write(ounit,1002) cput-cpus, myid, vvol, itrj, sti(1:2), utflag(itrj), fiota(itrj,2), fiota(itrj,2)-oita(vvol-1) if( itrj.gt.0 .and. itrj.lt.lnPtrj ) write(ounit,1002) cput-cpus, myid, vvol, itrj, sti(1:2), utflag(itrj), fiota(itrj,2) @@ -258,9 +308,15 @@ subroutine pp00aa !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - DALLOCATE(data) - DALLOCATE(utflag) - DALLOCATE(fiota) + + deallocate(data,stat=astat) + + + deallocate(utflag,stat=astat) + + + deallocate(fiota,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! endif ! myid.eq.modulo(vvol-1,ncpu) @@ -273,7 +329,12 @@ subroutine pp00aa call finalize_flt_output endif - RETURN(pp00aa) + +9999 continue + cput = MPI_WTIME() + Tpp00aa = Tpp00aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/pp00ab.f90 b/src/pp00ab.F90 similarity index 76% rename from src/pp00ab.f90 rename to src/pp00ab.F90 index a4963797..74614874 100644 --- a/src/pp00ab.f90 +++ b/src/pp00ab.F90 @@ -30,7 +30,7 @@ !> @param fittedtransform !> @param[out] utflag subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two, pi2 @@ -47,38 +47,66 @@ subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, Nz, nPpts - INTEGER, intent(out) :: utflag - REAL :: sti(1:2), poincaredata(1:4,0:Nz-1,1:nPpts), fittedtransform(1:2), dzeta +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol, Nz, nPpts + integer, intent(out) :: utflag + real(wp) :: sti(1:2), poincaredata(1:4,0:Nz-1,1:nPpts), fittedtransform(1:2), dzeta - INTEGER :: jj, kk - REAL :: ppt(1:4) + integer :: jj, kk + real(wp) :: ppt(1:4) - INTEGER, parameter :: Lrwork = 20*Node - REAL :: zst, zend, st(1:Node), rwork(1:Lrwork), tol, stz(1:3), RpZ(1:3), leastfit(1:5) - CHARACTER :: RA + integer, parameter :: Lrwork = 20*Node + real(wp) :: zst, zend, st(1:Node), rwork(1:Lrwork), tol, stz(1:3), RpZ(1:3), leastfit(1:5) + character :: RA - INTEGER, parameter :: Lenwrk = 32*Node - INTEGER :: rkmethod, outch - REAL :: hstart, thres(1:Node), rkwork(1:Lenwrk), mchpes, dwarf - REAL :: zgot, ygot(1:Node), ypgot(1:Node), ymax(1:Node) - CHARACTER :: rktask + integer, parameter :: Lenwrk = 32*Node + integer :: rkmethod, outch + real(wp) :: hstart, thres(1:Node), rkwork(1:Lenwrk), mchpes, dwarf + real(wp) :: zgot, ygot(1:Node), ypgot(1:Node), ymax(1:Node) + character :: rktask LOGICAL :: errass, mesage external :: bfield external :: SETUP, UT, ENVIRN - BEGIN(pp00ab) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! ivol = lvol ! required to pass through to bfield; #ifdef DEBUG - FATAL(pp00ab, lvol.lt.1.or.lvol.gt.Mvol, invalid volume ) - FATAL(pp00ab, abs(sti(1)).gt.one, illegal radial coordinate ) + + if( lvol.lt.1.or.lvol.gt.Mvol ) then + write(6,'("pp00ab : fatal : myid=",i3," ; lvol.lt.1.or.lvol.gt.Mvol ; invalid volume ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pp00ab : lvol.lt.1.or.lvol.gt.Mvol : invalid volume ;" + endif + + + if( abs(sti(1)).gt.one ) then + write(6,'("pp00ab : fatal : myid=",i3," ; abs(sti(1)).gt.one ; illegal radial coordinate ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pp00ab : abs(sti(1)).gt.one : illegal radial coordinate ;" + endif + #endif dzeta = pi2nfp @@ -127,7 +155,12 @@ subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) 1002 format("pp00ab : ", 10x ," : myid=",i3," ; lvol=",i3," ; "3x" : (s,t)=("f21.17" ,"f21.17" ) ; "3x" ; outside domain ;") - CALL( pp00ab, stzxyz, ( lvol, stz(1:3), RpZ(1:3) ) ) ! map to cylindrical; + + cput = MPI_WTIME() + Tpp00ab = Tpp00ab + ( cput-cpuo ) + call stzxyz( lvol, stz(1:3), RpZ(1:3) ) + cpuo = MPI_WTIME() + ! map to cylindrical; ppt(3:4)=(/ RpZ(1), RpZ(3) /) ! cylindrical coordinates; @@ -139,13 +172,18 @@ subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) call SETUP(Node, zst, st(1:Node), zend, tol, thres(1:Node), rkmethod, rktask, errass, hstart, rkwork(1:Lenwrk), Lenwrk, mesage) - CALL( pp00ab, UT, (bfield, zend, zgot, ygot(1:Node), ypgot(1:Node), ymax(1:Node), rkwork(1:Lenwrk), utflag) ) ! integrate to next plane; + + cput = MPI_WTIME() + Tpp00ab = Tpp00ab + ( cput-cpuo ) + call UT(bfield, zend, zgot, ygot(1:Node), ypgot(1:Node), ymax(1:Node), rkwork(1:Lenwrk), utflag) + cpuo = MPI_WTIME() + ! integrate to next plane; zst = zend st(1:Node) = ygot(1:Node) - cput = GETTIME + cput = MPI_WTIME() select case( utflag ) ! 1 2 3 4 5 6 case( 1 ) ; ! give screen output if error is encountered; !123456789012345678901234567890123456789012345678901234567890123 case( 2 ) ; write(ounit,2001) cput-cpus, myid, lvol, jj, kk, utflag, "step size too small (try RK method 2) " @@ -154,7 +192,13 @@ subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) case( 5 ) ; write(ounit,2001) cput-cpus, myid, lvol, jj, kk, utflag, "odetol/thres too small or RK method too low " case( 6 ) ; write(ounit,2001) cput-cpus, myid, lvol, jj, kk, utflag, "integration interrupted (error assessment not possible " case default - FATAL(pp00ab,.true.,illegal value of ifail returned from UT) + + if( .true. ) then + write(6,'("pp00ab : fatal : myid=",i3," ; .true. ; illegal value of ifail returned from UT;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "pp00ab : .true. : illegal value of ifail returned from UT ;" + endif + end select 2001 format("pp00ab : ",f10.2," : myid=",i3," ; lvol=",i3," ; (jj,kk)=("i4" ,"i4" ); ifail="i2" ; "a63) @@ -185,7 +229,12 @@ subroutine pp00ab( lvol, sti, Nz, nPpts, poincaredata, fittedtransform, utflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(pp00ab) + +9999 continue + cput = MPI_WTIME() + Tpp00ab = Tpp00ab + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/preset.f90 b/src/preset.F90 similarity index 74% rename from src/preset.f90 rename to src/preset.F90 index 2ee5a16a..3a2493dd 100644 --- a/src/preset.f90 +++ b/src/preset.F90 @@ -12,7 +12,7 @@ !> \ingroup grp_initialization !> subroutine preset - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, mu0 @@ -31,21 +31,37 @@ subroutine preset !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER :: innout, idof, jk, ll, ii, ifail, ideriv, vvol, mi, ni, mj, nj, mk, nk, mimj, ninj, mkmj, nknj, jj, kk, lvol, mm, nn, imn - INTEGER :: lquad, igauleg, maxIquad, Mrad, jquad, Lcurvature, zerdof, iret, work1, work2 - REAL :: teta, zeta, arg, lss, cszeta(0:1), error +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: innout, idof, jk, ll, ii, ifail, ideriv, vvol, mi, ni, mj, nj, mk, nk, mimj, ninj, mkmj, nknj, jj, kk, lvol, mm, nn, imn + integer :: lquad, igauleg, maxIquad, Mrad, jquad, Lcurvature, zerdof, iret, work1, work2 + real(wp) :: teta, zeta, arg, lss, cszeta(0:1), error LOGICAL :: LComputeAxis LOGICAL :: Lchangeangle - INTEGER :: nb, ix, ij, ip, idx_mode - REAL :: xx + integer :: nb, ix, ij, ip, idx_mode + real(wp) :: xx !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - BEGIN(preset) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -57,7 +73,13 @@ subroutine preset case( 0 ) ; YESstellsym = .false. ; NOTstellsym = .true. case( 1 ) ; YESstellsym = .true. ; NOTstellsym = .false. case default ; - FATAL( readin, .true., illegal Istellsym ) + + if( .true. ) then + write(6,'("readin : fatal : myid=",i3," ; .true. ; illegal Istellsym ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : .true. : illegal Istellsym ;" + endif + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -67,13 +89,22 @@ subroutine preset !latex \item The number of plasma volumes is \internal{Mvol}=\inputvar{Nvol}+\inputvar{Lfreebound}; !latex \end{enumerate} - FATAL( readin, Lfreebound.lt.0 .or. Lfreebound.gt.1, illegal Lfreebound ) + + if( Lfreebound.lt.0 .or. Lfreebound.gt.1 ) then + write(6,'("readin : fatal : myid=",i3," ; Lfreebound.lt.0 .or. Lfreebound.gt.1 ; illegal Lfreebound ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : Lfreebound.lt.0 .or. Lfreebound.gt.1 : illegal Lfreebound ;" + endif + Mvol = Nvol + Lfreebound !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( beltramierror,(1:Mvol,1:9), zero) + + allocate( beltramierror(1:Mvol,1:9), stat=astat ) + beltramierror(1:Mvol,1:9) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -93,8 +124,14 @@ subroutine preset mn = 1 + Ntor + Mpol * ( 2 * Ntor + 1 ) ! Fourier resolution of interface geometry & vector potential; - SALLOCATE( im, (1:mn), 0 ) - SALLOCATE( in, (1:mn), 0 ) + + allocate( im(1:mn), stat=astat ) + im(1:mn) = 0 + + + allocate( in(1:mn), stat=astat ) + in(1:mn) = 0 + call gi00ab( Mpol, Ntor, Nfp, mn, im(1:mn), in(1:mn) ) ! this sets the im and in mode identification arrays; @@ -106,8 +143,14 @@ subroutine preset !latex \item This is used in \link{lforce}, \link{bfield}, \link{stzxyz}, \link{coords}, \link{jo00aa}, \link{ma00aa}, \link{sc00aa} and \link{tr00ab}. !latex \end{enumerate} - SALLOCATE( halfmm, (1:mn), im(1:mn) * half ) - SALLOCATE( regumm, (1:mn), im(1:mn) * half ) + + allocate( halfmm(1:mn), stat=astat ) + halfmm(1:mn) = im(1:mn) * half + + + allocate( regumm(1:mn), stat=astat ) + regumm(1:mn) = im(1:mn) * half + if( Mregular.ge.2 ) then @@ -130,8 +173,14 @@ subroutine preset mne = 1 + lNtor + lMpol * ( 2 * lNtor + 1 ) ! resolution of metrics; enhanced resolution; see metrix; - SALLOCATE( ime, (1:mne), 0 ) - SALLOCATE( ine, (1:mne), 0 ) + + allocate( ime(1:mne), stat=astat ) + ime(1:mne) = 0 + + + allocate( ine(1:mne), stat=astat ) + ine(1:mne) = 0 + call gi00ab( lMpol, lNtor, Nfp, mne, ime(1:mne), ine(1:mne) ) @@ -147,8 +196,14 @@ subroutine preset mns = 1 + sNtor + sMpol * ( 2 * sNtor + 1 ) ! resolution of straight-field line transformation on interfaces; see tr00ab; soon to be redundant; - SALLOCATE( ims, (1:mns), 0 ) - SALLOCATE( ins, (1:mns), 0 ) + + allocate( ims(1:mns), stat=astat ) + ims(1:mns) = 0 + + + allocate( ins(1:mns), stat=astat ) + ins(1:mns) = 0 + call gi00ab( sMpol, sNtor, Nfp, mns, ims(1:mns), ins(1:mns) ) ! note that the field periodicity factor is included in ins; @@ -180,24 +235,60 @@ subroutine preset !latex \item \type{iVns}, \type{iVnc}, \type{iBns} and \type{iBns} : Fourier harmonics of normal field at computational boundary; !latex \end{enumerate} - SALLOCATE( iRbc, (1:mn,0:Mvol), zero ) ! interface Fourier harmonics; - SALLOCATE( iZbs, (1:mn,0:Mvol), zero ) - SALLOCATE( iRbs, (1:mn,0:Mvol), zero ) - SALLOCATE( iZbc, (1:mn,0:Mvol), zero ) + + allocate( iRbc(1:mn,0:Mvol), stat=astat ) + iRbc(1:mn,0:Mvol) = zero + ! interface Fourier harmonics; + + allocate( iZbs(1:mn,0:Mvol), stat=astat ) + iZbs(1:mn,0:Mvol) = zero + + + allocate( iRbs(1:mn,0:Mvol), stat=astat ) + iRbs(1:mn,0:Mvol) = zero + + + allocate( iZbc(1:mn,0:Mvol), stat=astat ) + iZbc(1:mn,0:Mvol) = zero + if( Lperturbed.eq.1 ) then - SALLOCATE( dRbc, (1:mn,0:Mvol), zero ) ! interface Fourier harmonics; - SALLOCATE( dZbs, (1:mn,0:Mvol), zero ) - SALLOCATE( dRbs, (1:mn,0:Mvol), zero ) - SALLOCATE( dZbc, (1:mn,0:Mvol), zero ) + + allocate( dRbc(1:mn,0:Mvol), stat=astat ) + dRbc(1:mn,0:Mvol) = zero + ! interface Fourier harmonics; + + allocate( dZbs(1:mn,0:Mvol), stat=astat ) + dZbs(1:mn,0:Mvol) = zero + + + allocate( dRbs(1:mn,0:Mvol), stat=astat ) + dRbs(1:mn,0:Mvol) = zero + + + allocate( dZbc(1:mn,0:Mvol), stat=astat ) + dZbc(1:mn,0:Mvol) = zero + endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( iVns, (1:mn), zero ) - SALLOCATE( iBns, (1:mn), zero ) - SALLOCATE( iVnc, (1:mn), zero ) - SALLOCATE( iBnc, (1:mn), zero ) + + allocate( iVns(1:mn), stat=astat ) + iVns(1:mn) = zero + + + allocate( iBns(1:mn), stat=astat ) + iBns(1:mn) = zero + + + allocate( iVnc(1:mn), stat=astat ) + iVnc(1:mn) = zero + + + allocate( iBnc(1:mn), stat=astat ) + iBnc(1:mn) = zero + !SALLOCATE( lRbc, (1:mn), zero ) ! not used; SRH: 27 Feb 18; !SALLOCATE( lZbs, (1:mn), zero ) @@ -214,7 +305,10 @@ subroutine preset !latex \internal{ajk[i]} $\equiv 0 $ if $m_i \ne 0$. !latex \end{enumerate} - SALLOCATE( ajk, (1:mn), zero ) ! this must be allocated & assigned now, as it is used in readin; primarily used in packxi; 02 Jan 15; + + allocate( ajk(1:mn), stat=astat ) + ajk(1:mn) = zero + ! this must be allocated & assigned now, as it is used in readin; primarily used in packxi; 02 Jan 15; do kk = 1, mn ; mk = im(kk) ; nk = in(kk) @@ -369,23 +463,39 @@ subroutine preset !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - ; RlBCAST( iRbc(1:mn,0:Mvol), (Mvol+1)*mn, 0 ) + ; + call MPI_BCAST(iRbc(1:mn,0:Mvol),(Mvol+1)*mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + if( Igeometry.eq.3 ) then - ;RlBCAST( iZbs(1:mn,0:Mvol), (Mvol+1)*mn, 0 ) ! only required for ii > 1 ; + ; + call MPI_BCAST(iZbs(1:mn,0:Mvol),(Mvol+1)*mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! only required for ii > 1 ; endif if( NOTstellsym ) then - ;RlBCAST( iRbs(1:mn,0:Mvol), (Mvol+1)*mn, 0 ) ! only required for ii > 1 ; + ; + call MPI_BCAST(iRbs(1:mn,0:Mvol),(Mvol+1)*mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! only required for ii > 1 ; if( Igeometry.eq.3 ) then - RlBCAST( iZbc(1:mn,0:Mvol), (Mvol+1)*mn, 0 ) + + call MPI_BCAST(iZbc(1:mn,0:Mvol),(Mvol+1)*mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + endif endif if( Lfreebound.eq.1 ) then - ;RlBCAST( iVns(1:mn), mn, 0 ) ! only required for ii > 1 ; - ;RlBCAST( iBns(1:mn), mn, 0 ) ! only required for ii > 1 ; + ; + call MPI_BCAST(iVns(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! only required for ii > 1 ; + ; + call MPI_BCAST(iBns(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! only required for ii > 1 ; if( NOTstellsym ) then - RlBCAST( iVnc(1:mn), mn, 0 ) - RlBCAST( iBnc(1:mn), mn, 0 ) + + call MPI_BCAST(iVnc(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(iBnc(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + endif endif @@ -457,7 +567,7 @@ subroutine preset NGdof = ( Mvol-1 ) * LGdof - if( Wpreset ) then ; cput = GETTIME ; write(ounit,'("preset : ",f10.2," : myid=",i3," ; NGdof=",i9," ;")') cput-cpus, myid, NGdof + if( Wpreset ) then ; cput = MPI_WTIME() ; write(ounit,'("preset : ",f10.2," : myid=",i3," ; NGdof=",i9," ;")') cput-cpus, myid, NGdof endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -505,8 +615,14 @@ subroutine preset !> \f$\psi_{pol,i} \rightarrow \psi_{pol,i} / \psi_{0}\f$, where \f$\psi_{0} \equiv \psi_{tor,N}\f$ on input. !> - SALLOCATE( dtflux, (1:Mvol), zero ) - SALLOCATE( dpflux, (1:Mvol), zero ) + + allocate( dtflux(1:Mvol), stat=astat ) + dtflux(1:Mvol) = zero + + + allocate( dpflux(1:Mvol), stat=astat ) + dpflux(1:Mvol) = zero + select case( Igeometry ) case( 1 ) ; dtflux(1) = tflux(1) ; dpflux(1) = pflux(1) ! Cartesian ; this is the "inverse" operation defined in xspech; 09 Mar 17; @@ -561,7 +677,10 @@ subroutine preset !> and \f$w \equiv\,\f$\c wpoloidal. !> - SALLOCATE( sweight, (1:Mvol), zero ) + + allocate( sweight(1:Mvol), stat=astat ) + sweight(1:Mvol) = zero + !sweight(1:Mvol) = upsilon * tflux(1:Mvol)**wpoloidal ! toroidal flux in vacuum region is not constant; 11 July 18; do vvol = 1, Mvol ; sweight(vvol) = upsilon * (vvol*one/Nvol)**wpoloidal ! 11 July 18; enddo @@ -605,9 +724,18 @@ subroutine preset !> !> - SALLOCATE( TT, (0:Mrad,0:1,0:1), zero ) - SALLOCATE(RTT, (0:Lrad(1),0:Mpol,0:1,0:1), zero ) - SALLOCATE(RTM, (0:Lrad(1),0:Mpol), zero ) + + allocate( TT(0:Mrad,0:1,0:1), stat=astat ) + TT(0:Mrad,0:1,0:1) = zero + + + allocate( RTT(0:Lrad(1),0:Mpol,0:1,0:1), stat=astat ) + RTT(0:Lrad(1),0:Mpol,0:1,0:1) = zero + + + allocate( RTM(0:Lrad(1),0:Mpol), stat=astat ) + RTM(0:Lrad(1),0:Mpol) = zero + call get_cheby( -one, Mrad, TT(:,0,:)) call get_cheby( one , Mrad, TT(:,1,:)) @@ -628,7 +756,10 @@ subroutine preset !> then \c ImagneticOK is set to \c .true. . !> - SALLOCATE( ImagneticOK, (1:Mvol), .false. ) + + allocate( ImagneticOK(1:Mvol), stat=astat ) + ImagneticOK(1:Mvol) = .false. + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -688,9 +819,18 @@ subroutine preset !> Also, take care that the sign of the sine harmonics in the above expressions will change for these cases. !> - SALLOCATE( ki, (1:mn,0:1), 0 ) - SALLOCATE( kija, (1:mn,1:mn,0:1), 0 ) - SALLOCATE( kijs, (1:mn,1:mn,0:1), 0 ) + + allocate( ki(1:mn,0:1), stat=astat ) + ki(1:mn,0:1) = 0 + + + allocate( kija(1:mn,1:mn,0:1), stat=astat ) + kija(1:mn,1:mn,0:1) = 0 + + + allocate( kijs(1:mn,1:mn,0:1), stat=astat ) + kijs(1:mn,1:mn,0:1) = 0 + do ii = 1, mn ; mi = im(ii) ; ni = in(ii) @@ -734,8 +874,14 @@ subroutine preset if( Igeometry.eq.2 ) then ! standard cylindrical; 04 Dec 14; - SALLOCATE( djkp, (1:mn,1:mn), 0 ) ! only used in volume; trigonometric identities; 04 Dec 14; - SALLOCATE( djkm, (1:mn,1:mn), 0 ) ! only used in volume; trigonometric identities; 04 Dec 14; + + allocate( djkp(1:mn,1:mn), stat=astat ) + djkp(1:mn,1:mn) = 0 + ! only used in volume; trigonometric identities; 04 Dec 14; + + allocate( djkm(1:mn,1:mn), stat=astat ) + djkm(1:mn,1:mn) = 0 + ! only used in volume; trigonometric identities; 04 Dec 14; do ii = 1, mn ; mi = im(ii) ; ni = in(ii) do jj = 1, mn ; mj = im(jj) ; nj = in(jj) @@ -750,11 +896,23 @@ subroutine preset !> **iotakki** - SALLOCATE( iotakkii, (1:mn ), 0 ) ! used to identify matrix elements in straight-field-line angle transformation; - SALLOCATE( iotaksub, (1:mn,1:mns), 0 ) - SALLOCATE( iotaksgn, (1:mn,1:mns), 0 ) - SALLOCATE( iotakadd, (1:mn,1:mns), 0 ) + allocate( iotakkii(1:mn ), stat=astat ) + iotakkii(1:mn ) = 0 + ! used to identify matrix elements in straight-field-line angle transformation; + + + allocate( iotaksub(1:mn,1:mns), stat=astat ) + iotaksub(1:mn,1:mns) = 0 + + + allocate( iotaksgn(1:mn,1:mns), stat=astat ) + iotaksgn(1:mn,1:mns) = 0 + + + allocate( iotakadd(1:mn,1:mns), stat=astat ) + iotakadd(1:mn,1:mns) = 0 + do kk = 1, mn ; mk = im(kk) ; nk = in(kk) @@ -803,15 +961,30 @@ subroutine preset ! Allocate space for the toroidal current array in each interface - SALLOCATE( IPDt, (1:Mvol), zero) + + allocate( IPDt(1:Mvol), stat=astat ) + IPDt(1:Mvol) = zero + if( Lfreebound.eq.1 ) then - SALLOCATE( IPDtDpf, (1:Mvol , 1:Mvol ), zero) + + allocate( IPDtDpf(1:Mvol , 1:Mvol ), stat=astat ) + IPDtDpf(1:Mvol , 1:Mvol ) = zero + else - SALLOCATE( IPDtDpf, (1:Mvol-1, 1:Mvol-1), zero) + + allocate( IPDtDpf(1:Mvol-1, 1:Mvol-1), stat=astat ) + IPDtDpf(1:Mvol-1, 1:Mvol-1) = zero + endif - SALLOCATE( cheby, (0:Mrad,0:2), zero ) - SALLOCATE( zernike, (0:Lrad(1), 0:Mpol, 0:2), zero ) + + allocate( cheby(0:Mrad,0:2), stat=astat ) + cheby(0:Mrad,0:2) = zero + + + allocate( zernike(0:Lrad(1), 0:Mpol, 0:2), stat=astat ) + zernike(0:Lrad(1), 0:Mpol, 0:2) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -832,11 +1005,24 @@ subroutine preset !> also see jo00aa(), where \c Iquad\f$_v\f$ is used to compute the volume integrals of \f$||\nabla\times{\bf B} - \mu {\bf B}||\f$. !> - SALLOCATE( Iquad, (1:Mvol), 0 ) ! 16 Jan 13; + + allocate( Iquad(1:Mvol), stat=astat ) + Iquad(1:Mvol) = 0 + ! 16 Jan 13; do vvol = 1, Mvol - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + if( Nquad.gt.0 ) then ; Iquad(vvol) = Nquad else @@ -848,8 +1034,14 @@ subroutine preset maxIquad = maxval(Iquad(1:Mvol)) - SALLOCATE( gaussianweight , (1:maxIquad,1:Mvol), zero ) ! perhaps it would be neater to make this a structure; 26 Jan 16; - SALLOCATE( gaussianabscissae, (1:maxIquad,1:Mvol), zero ) + + allocate( gaussianweight (1:maxIquad,1:Mvol), stat=astat ) + gaussianweight (1:maxIquad,1:Mvol) = zero + ! perhaps it would be neater to make this a structure; 26 Jan 16; + + allocate( gaussianabscissae(1:maxIquad,1:Mvol), stat=astat ) + gaussianabscissae(1:maxIquad,1:Mvol) = zero + do vvol = 1, Mvol @@ -858,13 +1050,19 @@ subroutine preset call gauleg( lquad, gaussianweight(1:lquad,vvol), gaussianabscissae(1:lquad,vvol), igauleg ) ! JAB; 28 Jul 17 if( myid.eq.0 ) then - cput= GETTIME + cput= MPI_WTIME() select case( igauleg ) ! 123456789012345 case( 0 ) ; if( Wpreset ) write(ounit,1000) cput-cpus, vvol, igauleg, "success ", gaussianabscissae(1:lquad,vvol) case( 1 ) ; write(ounit,1000) cput-cpus, vvol, igauleg, "failed ", gaussianabscissae(1:lquad,vvol) case( 2 ) ; write(ounit,1000) cput-cpus, vvol, igauleg, "input error ", gaussianabscissae(1:lquad,vvol) case default ; write(ounit,1000) cput-cpus, vvol, igauleg, "weird ", gaussianabscissae(1:lquad,vvol) - FATAL( preset, .true., weird ifail returned by gauleg ) + + if( .true. ) then + write(6,'("preset : fatal : myid=",i3," ; .true. ; weird ifail returned by gauleg ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : .true. : weird ifail returned by gauleg ;" + endif + end select ; ; if( Wpreset ) write(ounit,1001) gaussianweight(1:lquad,vvol) endif @@ -893,7 +1091,13 @@ subroutine preset if (LBnewton .or. LBsequad) Lconstraint = 2 if (Lconstraint .eq. 2) then - FATAL( preset, Lfreebound.eq.1, The combination of helicity constraint and free boundary is under construction ) + + if( Lfreebound.eq.1 ) then + write(6,'("preset : fatal : myid=",i3," ; Lfreebound.eq.1 ; The combination of helicity constraint and free boundary is under construction ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : Lfreebound.eq.1 : The combination of helicity constraint and free boundary is under construction ;" + endif + if (Igeometry .eq. 3 .and. myid.eq.0) then write(ounit, *) 'WARNING: The Hessian matrix needs further review for Igeometry = 3' write(ounit, *) ' However, it can still serve the purpose of Lfindzero = 2' @@ -901,7 +1105,7 @@ subroutine preset endif if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("preset : ",f10.2," : LBsequad="L2" , LBnewton="L2" , LBlinear="L2" ;")')cput-cpus, LBsequad, LBnewton, LBlinear endif @@ -916,7 +1120,10 @@ subroutine preset !>
  • this is only used in dforce() in constructing the force-imbalance vector
  • !> - SALLOCATE( BBweight, (1:mn), opsilon * exp( - escale * ( im(1:mn)**2 + (in(1:mn)/Nfp)**2 ) ) ) + + allocate( BBweight(1:mn), stat=astat ) + BBweight(1:mn) = opsilon * exp( - escale * ( im(1:mn)**2 + (in(1:mn)/Nfp)**2 ) ) + if( myid.eq.0 .and. escale.gt.small ) then do ii = 1, mn ; write(ounit,'("preset : " 10x " : myid="i3" ; ("i3","i3") : BBweight="es13.5" ;")') myid, im(ii), in(ii)/Nfp, BBweight(ii) @@ -934,7 +1141,10 @@ subroutine preset !> where \f$p \equiv\,\f$\c pcondense . !> - SALLOCATE( mmpp, (1:mn), zero ) + + allocate( mmpp(1:mn), stat=astat ) + mmpp(1:mn) = zero + do ii = 1, mn ; mi = im(ii) @@ -974,40 +1184,124 @@ subroutine preset !> An initial state is required for iterative solvers of the Beltrami fields, see \c LBeltrami . !> - SALLOCATE( NAdof, (1:Mvol ), 0 ) ! Beltrami degrees-of-freedom in each annulus; - SALLOCATE( Nfielddof,(1:Mvol ), 0 ) ! Beltrami degrees-of-freedom in each annulus, field only; - SALLOCATE( NdMASmax, (1:Mvol ), 0 ) ! The maximum size of sparse matrix for GMRES preconditioning; - SALLOCATE( NdMAS , (1:Mvol ), 0 ) ! The actual size of sparse matrix for GMRES preconditioning; - - NALLOCATE( Ate , (1:Mvol,-2:2,1:mn) ) ! recall that this is type:sub-grid; 31 Jan 13; - NALLOCATE( Aze , (1:Mvol,-2:2,1:mn) ) ! -2 : for use of matrix-free solver ; -1 : for use of force gradient - NALLOCATE( Ato , (1:Mvol,-2:2,1:mn) ) ! 0 : normal data - NALLOCATE( Azo , (1:Mvol,-2:2,1:mn) ) ! 1:2: use to compute derivative w.r.t. fluxes - - SALLOCATE( Fso , (1:Mvol, 1:mn), 0 ) ! these will become redundant if/when Lagrange multipliers are used to enforce bounday constraints; 26 Jan 16; - SALLOCATE( Fse , (1:Mvol, 1:mn), 0 ) - - SALLOCATE( Lma , (1:Mvol, 1:mn), 0 ) ! degree of freedom index; for Lagrange multiplier; 08 Feb 16; - SALLOCATE( Lmb , (1:Mvol, 1:mn), 0 ) - SALLOCATE( Lmc , (1:Mvol, 1:mn), 0 ) ! only need Lmc(2:mn) ; only for NOTstellsym; 08 Feb 16; - SALLOCATE( Lmd , (1:Mvol, 1:mn), 0 ) ! only need Lmd(2:mn) ; only for NOTstellsym; 08 Feb 16; - SALLOCATE( Lme , (1:Mvol, 1:mn), 0 ) ! only need Lme(2:mn) ; - SALLOCATE( Lmf , (1:Mvol, 1:mn), 0 ) ! only need Lmf(2:mn) ; only for NOTstellsym; 08 Feb 16; - SALLOCATE( Lmg , (1:Mvol, 1:mn), 0 ) ! only need Lmg(1 ) ; - SALLOCATE( Lmh , (1:Mvol, 1:mn), 0 ) ! only need Lmh(1 ) ; - - SALLOCATE( Lmavalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmbvalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmcvalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmdvalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmevalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmfvalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmgvalue, (1:Mvol, 1:mn), zero ) - SALLOCATE( Lmhvalue, (1:Mvol, 1:mn), zero ) + + allocate( NAdof(1:Mvol ), stat=astat ) + NAdof(1:Mvol ) = 0 + ! Beltrami degrees-of-freedom in each annulus; + + allocate( Nfielddof(1:Mvol ), stat=astat ) + Nfielddof(1:Mvol ) = 0 + ! Beltrami degrees-of-freedom in each annulus, field only; + + allocate( NdMASmax(1:Mvol ), stat=astat ) + NdMASmax(1:Mvol ) = 0 + ! The maximum size of sparse matrix for GMRES preconditioning; + + allocate( NdMAS (1:Mvol ), stat=astat ) + NdMAS (1:Mvol ) = 0 + ! The actual size of sparse matrix for GMRES preconditioning; + + + allocate(Ate (1:Mvol,-2:2,1:mn) ,stat=astat) + ! recall that this is type:sub-grid; 31 Jan 13; + + allocate(Aze (1:Mvol,-2:2,1:mn) ,stat=astat) + ! -2 : for use of matrix-free solver ; -1 : for use of force gradient + + allocate(Ato (1:Mvol,-2:2,1:mn) ,stat=astat) + ! 0 : normal data + + allocate(Azo (1:Mvol,-2:2,1:mn) ,stat=astat) + ! 1:2: use to compute derivative w.r.t. fluxes + + + allocate( Fso (1:Mvol, 1:mn), stat=astat ) + Fso (1:Mvol, 1:mn) = 0 + ! these will become redundant if/when Lagrange multipliers are used to enforce bounday constraints; 26 Jan 16; + + allocate( Fse (1:Mvol, 1:mn), stat=astat ) + Fse (1:Mvol, 1:mn) = 0 + + + + allocate( Lma (1:Mvol, 1:mn), stat=astat ) + Lma (1:Mvol, 1:mn) = 0 + ! degree of freedom index; for Lagrange multiplier; 08 Feb 16; + + allocate( Lmb (1:Mvol, 1:mn), stat=astat ) + Lmb (1:Mvol, 1:mn) = 0 + + + allocate( Lmc (1:Mvol, 1:mn), stat=astat ) + Lmc (1:Mvol, 1:mn) = 0 + ! only need Lmc(2:mn) ; only for NOTstellsym; 08 Feb 16; + + allocate( Lmd (1:Mvol, 1:mn), stat=astat ) + Lmd (1:Mvol, 1:mn) = 0 + ! only need Lmd(2:mn) ; only for NOTstellsym; 08 Feb 16; + + allocate( Lme (1:Mvol, 1:mn), stat=astat ) + Lme (1:Mvol, 1:mn) = 0 + ! only need Lme(2:mn) ; + + allocate( Lmf (1:Mvol, 1:mn), stat=astat ) + Lmf (1:Mvol, 1:mn) = 0 + ! only need Lmf(2:mn) ; only for NOTstellsym; 08 Feb 16; + + allocate( Lmg (1:Mvol, 1:mn), stat=astat ) + Lmg (1:Mvol, 1:mn) = 0 + ! only need Lmg(1 ) ; + + allocate( Lmh (1:Mvol, 1:mn), stat=astat ) + Lmh (1:Mvol, 1:mn) = 0 + ! only need Lmh(1 ) ; + + + allocate( Lmavalue(1:Mvol, 1:mn), stat=astat ) + Lmavalue(1:Mvol, 1:mn) = zero + + + allocate( Lmbvalue(1:Mvol, 1:mn), stat=astat ) + Lmbvalue(1:Mvol, 1:mn) = zero + + + allocate( Lmcvalue(1:Mvol, 1:mn), stat=astat ) + Lmcvalue(1:Mvol, 1:mn) = zero + + + allocate( Lmdvalue(1:Mvol, 1:mn), stat=astat ) + Lmdvalue(1:Mvol, 1:mn) = zero + + + allocate( Lmevalue(1:Mvol, 1:mn), stat=astat ) + Lmevalue(1:Mvol, 1:mn) = zero + + + allocate( Lmfvalue(1:Mvol, 1:mn), stat=astat ) + Lmfvalue(1:Mvol, 1:mn) = zero + + + allocate( Lmgvalue(1:Mvol, 1:mn), stat=astat ) + Lmgvalue(1:Mvol, 1:mn) = zero + + + allocate( Lmhvalue(1:Mvol, 1:mn), stat=astat ) + Lmhvalue(1:Mvol, 1:mn) = zero + do vvol = 1, Mvol - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + if( Lcoordinatesingularity ) then zerdof = 0 ! count Zernike degree of freedom 30 Jun 19 @@ -1084,19 +1378,43 @@ subroutine preset do ideriv = -2, 2 ! loop over derivatives; 14 Jan 13; - SALLOCATE( Ate(vvol,ideriv,ii)%s, (0:Lrad(vvol)), zero ) - SALLOCATE( Aze(vvol,ideriv,ii)%s, (0:Lrad(vvol)), zero ) - SALLOCATE( Ato(vvol,ideriv,ii)%s, (0:Lrad(vvol)), zero ) - SALLOCATE( Azo(vvol,ideriv,ii)%s, (0:Lrad(vvol)), zero ) + + allocate( Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)), stat=astat ) + Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)) = zero + + + allocate( Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)), stat=astat ) + Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)) = zero + + + allocate( Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)), stat=astat ) + Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)) = zero + + + allocate( Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)), stat=astat ) + Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)) = zero + enddo ! end of do ideriv; ; ideriv = 0 - SALLOCATE( Ate(vvol,ideriv,ii)%i, (0:Lrad(vvol)), 0 ) ! degree of freedom index; 17 Jan 13; - SALLOCATE( Aze(vvol,ideriv,ii)%i, (0:Lrad(vvol)), 0 ) - SALLOCATE( Ato(vvol,ideriv,ii)%i, (0:Lrad(vvol)), 0 ) - SALLOCATE( Azo(vvol,ideriv,ii)%i, (0:Lrad(vvol)), 0 ) + + allocate( Ate(vvol,ideriv,ii)%i(0:Lrad(vvol)), stat=astat ) + Ate(vvol,ideriv,ii)%i(0:Lrad(vvol)) = 0 + ! degree of freedom index; 17 Jan 13; + + allocate( Aze(vvol,ideriv,ii)%i(0:Lrad(vvol)), stat=astat ) + Aze(vvol,ideriv,ii)%i(0:Lrad(vvol)) = 0 + + + allocate( Ato(vvol,ideriv,ii)%i(0:Lrad(vvol)), stat=astat ) + Ato(vvol,ideriv,ii)%i(0:Lrad(vvol)) = 0 + + + allocate( Azo(vvol,ideriv,ii)%i(0:Lrad(vvol)), stat=astat ) + Azo(vvol,ideriv,ii)%i(0:Lrad(vvol)) = 0 + enddo ! end of do ii; @@ -1184,8 +1502,20 @@ subroutine preset enddo ! end of do ii; 25 Jan 13; - FATAL( preset, idof.ne.NAdof(vvol), need to count Beltrami degrees-of-freedom more carefully for coordinate singularity ) - FATAL( preset, (idof+1)**2.ge.HUGE(idof)), NAdof too big, should be smaller than maximum of int32 type ) + + if( idof.ne.NAdof(vvol) ) then + write(6,'("preset : fatal : myid=",i3," ; idof.ne.NAdof(vvol) ; need to count Beltrami degrees-of-freedom more carefully for coordinate singularity ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : idof.ne.NAdof(vvol) : need to count Beltrami degrees-of-freedom more carefully for coordinate singularity ;" + endif + + + if( (idof+1)**2.ge.HUGE(idof) ) then + write(6,'("preset : fatal : myid=",i3," ; (idof+1)**2.ge.HUGE(idof) ; ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : (idof+1)**2.ge.HUGE(idof) : ;" + endif + ! NAdof too big, should be smaller than maximum of int32 type ) else ! .not.Lcoordinatesingularity; @@ -1236,12 +1566,30 @@ subroutine preset ! enddo !endif - FATAL( preset, idof.ne.NAdof(vvol), need to count degrees-of-freedom more carefully for new matrix ) - FATAL( preset, (idof+1)**2.ge.HUGE(idof)), NAdof too big, should be smaller than maximum of int32 type ) + + if( idof.ne.NAdof(vvol) ) then + write(6,'("preset : fatal : myid=",i3," ; idof.ne.NAdof(vvol) ; need to count degrees-of-freedom more carefully for new matrix ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : idof.ne.NAdof(vvol) : need to count degrees-of-freedom more carefully for new matrix ;" + endif + + + if( (idof+1)**2.ge.HUGE(idof) ) then + write(6,'("preset : fatal : myid=",i3," ; (idof+1)**2.ge.HUGE(idof) ; ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : (idof+1)**2.ge.HUGE(idof) : ;" + endif + !, NAdof too big, should be smaller than maximum of int32 type ) endif ! end of if( Lcoordinatesingularity ) ; - FATAL( preset, idof.ne.NAdof(vvol), impossible logic ) + + if( idof.ne.NAdof(vvol) ) then + write(6,'("preset : fatal : myid=",i3," ; idof.ne.NAdof(vvol) ; impossible logic ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : idof.ne.NAdof(vvol) : impossible logic ;" + endif + do ii = 1, mn do jj = 0, Lrad(vvol) @@ -1259,13 +1607,18 @@ subroutine preset !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - if( Linitgues.eq.2 ) then ; WCALL( preset, ra00aa, ('R') ) ! read initial guess for Beltrami field from file; 02 Jan 15; + if( Linitgues.eq.2 ) then ; + cput = MPI_WTIME() + Tpreset = Tpreset + ( cput-cpuo ) + call ra00aa('R') + cpuo = MPI_WTIME() + ! read initial guess for Beltrami field from file; 02 Jan 15; endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( myid.eq.0 ) then ! 17 Oct 12; - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("preset : ", 10x ," : ")') write(ounit,'("preset : ",f10.2," : Nquad="i4" ; mn="i5" ; NGdof="i6" ; NAdof="16(i6",")" ...")') cput-cpus, Nquad, mn, NGdof, NAdof(1:min(Mvol,16)) endif @@ -1284,34 +1637,91 @@ subroutine preset endif if( myid.eq.0 ) then ! 17 Oct 12; - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("preset : ", 10x ," : ")') write(ounit,'("preset : ",f10.2," : Nt="i6" ; Nz="i6" ; Ntz="i9" ;")') cput-cpus, Nt, Nz, Ntz endif - SALLOCATE( iRij, (1:Ntz,0:Mvol), zero ) ! interface geometry in real space; ! 18 Jul 14; - SALLOCATE( iZij, (1:Ntz,0:Mvol), zero ) ! - SALLOCATE( dRij, (1:Ntz,1:Mvol), zero ) ! interface geometry in real space; poloidal derivative; ! 18 Jul 14; - SALLOCATE( dZij, (1:Ntz,1:Mvol), zero ) - SALLOCATE( tRij, (1:Ntz,0:Mvol), zero ) ! interface geometry in real space; poloidal derivative; ! 18 Jul 14; - SALLOCATE( tZij, (1:Ntz,0:Mvol), zero ) - SALLOCATE( Rij, (1:Ntz,0:3,0:3 ), zero ) ! these are used for inverse fft to reconstruct real space geometry from interpolated Fourier harmonics; - SALLOCATE( Zij, (1:Ntz,0:3,0:3 ), zero ) - SALLOCATE( sg , (1:Ntz,0:3 ), zero ) - SALLOCATE( guvij, (1:Ntz,0:3,0:3,-1:3), zero ) ! need this on higher resolution grid for accurate Fourier decomposition; - SALLOCATE( gvuij, (1:Ntz,0:3,0:3 ), zero ) ! need this on higher resolution grid for accurate Fourier decomposition; 10 Dec 15; + allocate( iRij(1:Ntz,0:Mvol), stat=astat ) + iRij(1:Ntz,0:Mvol) = zero + ! interface geometry in real space; ! 18 Jul 14; + + allocate( iZij(1:Ntz,0:Mvol), stat=astat ) + iZij(1:Ntz,0:Mvol) = zero + ! + + allocate( dRij(1:Ntz,1:Mvol), stat=astat ) + dRij(1:Ntz,1:Mvol) = zero + ! interface geometry in real space; poloidal derivative; ! 18 Jul 14; + + allocate( dZij(1:Ntz,1:Mvol), stat=astat ) + dZij(1:Ntz,1:Mvol) = zero + + + allocate( tRij(1:Ntz,0:Mvol), stat=astat ) + tRij(1:Ntz,0:Mvol) = zero + ! interface geometry in real space; poloidal derivative; ! 18 Jul 14; + + allocate( tZij(1:Ntz,0:Mvol), stat=astat ) + tZij(1:Ntz,0:Mvol) = zero + + + + allocate( Rij(1:Ntz,0:3,0:3 ), stat=astat ) + Rij(1:Ntz,0:3,0:3 ) = zero + ! these are used for inverse fft to reconstruct real space geometry from interpolated Fourier harmonics; + + allocate( Zij(1:Ntz,0:3,0:3 ), stat=astat ) + Zij(1:Ntz,0:3,0:3 ) = zero + + + allocate( sg (1:Ntz,0:3 ), stat=astat ) + sg (1:Ntz,0:3 ) = zero + + + allocate( guvij(1:Ntz,0:3,0:3,-1:3), stat=astat ) + guvij(1:Ntz,0:3,0:3,-1:3) = zero + ! need this on higher resolution grid for accurate Fourier decomposition; + + allocate( gvuij(1:Ntz,0:3,0:3 ), stat=astat ) + gvuij(1:Ntz,0:3,0:3 ) = zero + ! need this on higher resolution grid for accurate Fourier decomposition; 10 Dec 15; if ((Lfindzero .eq. 2) .or. (Lcheck.eq.5 .or. LHevalues .or. LHevectors .or. LHmatrix .or. Lperturbed.eq.1)) then - SALLOCATE( dRadR, (1:mn,0:1,0:1,1:mn), zero ) ! calculated in rzaxis; 19 Sep 16; - SALLOCATE( dRadZ, (1:mn,0:1,0:1,1:mn), zero ) - SALLOCATE( dZadR, (1:mn,0:1,0:1,1:mn), zero ) - SALLOCATE( dZadZ, (1:mn,0:1,0:1,1:mn), zero ) - - SALLOCATE( dRodR, (1:Ntz,0:3,1:mn), zero ) ! calculated in rzaxis; 19 Sep 16; - SALLOCATE( dRodZ, (1:Ntz,0:3,1:mn), zero ) - SALLOCATE( dZodR, (1:Ntz,0:3,1:mn), zero ) - SALLOCATE( dZodZ, (1:Ntz,0:3,1:mn), zero ) + + allocate( dRadR(1:mn,0:1,0:1,1:mn), stat=astat ) + dRadR(1:mn,0:1,0:1,1:mn) = zero + ! calculated in rzaxis; 19 Sep 16; + + allocate( dRadZ(1:mn,0:1,0:1,1:mn), stat=astat ) + dRadZ(1:mn,0:1,0:1,1:mn) = zero + + + allocate( dZadR(1:mn,0:1,0:1,1:mn), stat=astat ) + dZadR(1:mn,0:1,0:1,1:mn) = zero + + + allocate( dZadZ(1:mn,0:1,0:1,1:mn), stat=astat ) + dZadZ(1:mn,0:1,0:1,1:mn) = zero + + + + allocate( dRodR(1:Ntz,0:3,1:mn), stat=astat ) + dRodR(1:Ntz,0:3,1:mn) = zero + ! calculated in rzaxis; 19 Sep 16; + + allocate( dRodZ(1:Ntz,0:3,1:mn), stat=astat ) + dRodZ(1:Ntz,0:3,1:mn) = zero + + + allocate( dZodR(1:Ntz,0:3,1:mn), stat=astat ) + dZodR(1:Ntz,0:3,1:mn) = zero + + + allocate( dZodZ(1:Ntz,0:3,1:mn), stat=astat ) + dZodZ(1:Ntz,0:3,1:mn) = zero + endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1337,48 +1747,144 @@ subroutine preset !> **gzzmne, gzzmno: metric information** !> These are defined in metrix() , and used in ma00aa(). !> - SALLOCATE( goomne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( goomno, (0:mne, maxIquad), zero ) - SALLOCATE( gssmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gssmno, (0:mne, maxIquad), zero ) - SALLOCATE( gstmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gstmno, (0:mne, maxIquad), zero ) - SALLOCATE( gszmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gszmno, (0:mne, maxIquad), zero ) - SALLOCATE( gttmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gttmno, (0:mne, maxIquad), zero ) - SALLOCATE( gtzmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gtzmno, (0:mne, maxIquad), zero ) - SALLOCATE( gzzmne, (0:mne, maxIquad), zero ) ! workspace for Fourier decomposition of metric terms; - SALLOCATE( gzzmno, (0:mne, maxIquad), zero ) + + allocate( goomne(0:mne, maxIquad), stat=astat ) + goomne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( goomno(0:mne, maxIquad), stat=astat ) + goomno(0:mne, maxIquad) = zero + + + allocate( gssmne(0:mne, maxIquad), stat=astat ) + gssmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gssmno(0:mne, maxIquad), stat=astat ) + gssmno(0:mne, maxIquad) = zero + + + allocate( gstmne(0:mne, maxIquad), stat=astat ) + gstmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gstmno(0:mne, maxIquad), stat=astat ) + gstmno(0:mne, maxIquad) = zero + + + allocate( gszmne(0:mne, maxIquad), stat=astat ) + gszmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gszmno(0:mne, maxIquad), stat=astat ) + gszmno(0:mne, maxIquad) = zero + + + allocate( gttmne(0:mne, maxIquad), stat=astat ) + gttmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gttmno(0:mne, maxIquad), stat=astat ) + gttmno(0:mne, maxIquad) = zero + + + allocate( gtzmne(0:mne, maxIquad), stat=astat ) + gtzmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gtzmno(0:mne, maxIquad), stat=astat ) + gtzmno(0:mne, maxIquad) = zero + + + allocate( gzzmne(0:mne, maxIquad), stat=astat ) + gzzmne(0:mne, maxIquad) = zero + ! workspace for Fourier decomposition of metric terms; + + allocate( gzzmno(0:mne, maxIquad), stat=astat ) + gzzmno(0:mne, maxIquad) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( ijreal, (1:Ntz), zero ) ! real space grid; - SALLOCATE( ijimag, (1:Ntz), zero ) - SALLOCATE( jireal, (1:Ntz), zero ) - SALLOCATE( jiimag, (1:Ntz), zero ) - SALLOCATE( jkreal, (1:Ntz), zero ) - SALLOCATE( jkimag, (1:Ntz), zero ) - SALLOCATE( kjreal, (1:Ntz), zero ) - SALLOCATE( kjimag, (1:Ntz), zero ) + allocate( ijreal(1:Ntz), stat=astat ) + ijreal(1:Ntz) = zero + ! real space grid; + + allocate( ijimag(1:Ntz), stat=astat ) + ijimag(1:Ntz) = zero + + + allocate( jireal(1:Ntz), stat=astat ) + jireal(1:Ntz) = zero + + + allocate( jiimag(1:Ntz), stat=astat ) + jiimag(1:Ntz) = zero + + + + allocate( jkreal(1:Ntz), stat=astat ) + jkreal(1:Ntz) = zero + + + allocate( jkimag(1:Ntz), stat=astat ) + jkimag(1:Ntz) = zero + + + allocate( kjreal(1:Ntz), stat=astat ) + kjreal(1:Ntz) = zero + + + allocate( kjimag(1:Ntz), stat=astat ) + kjimag(1:Ntz) = zero + + + + allocate( cplxin(1:Nt,1:Nz,nthreads), stat=astat ) + cplxin(1:Nt,1:Nz,nthreads) = zero + + + allocate( cplxout(1:Nt,1:Nz,nthreads), stat=astat ) + cplxout(1:Nt,1:Nz,nthreads) = zero - SALLOCATE( cplxin, (1:Nt,1:Nz,nthreads), zero ) - SALLOCATE( cplxout, (1:Nt,1:Nz,nthreads), zero ) ! Create and save optimal plans for forward and inverse 2D fast Fourier transforms with FFTW. -JAB; 25 Jul 2017 planf = fftw_plan_dft_2d( Nz, Nt, cplxin(:,:,1), cplxout(:,:,1), FFTW_FORWARD, FFTW_MEASURE + FFTW_DESTROY_INPUT ) planb = fftw_plan_dft_2d( Nz, Nt, cplxin(:,:,1), cplxout(:,:,1), FFTW_BACKWARD, FFTW_MEASURE + FFTW_DESTROY_INPUT ) - SALLOCATE( efmn, (1:mne), zero ) ! Fourier harmonics workspace; 24 Apr 13; - SALLOCATE( ofmn, (1:mne), zero ) - SALLOCATE( cfmn, (1:mne), zero ) - SALLOCATE( sfmn, (1:mne), zero ) - SALLOCATE( evmn, (1:mne), zero ) - SALLOCATE( odmn, (1:mne), zero ) - SALLOCATE( comn, (1:mne), zero ) - SALLOCATE( simn, (1:mne), zero ) + + allocate( efmn(1:mne), stat=astat ) + efmn(1:mne) = zero + ! Fourier harmonics workspace; 24 Apr 13; + + allocate( ofmn(1:mne), stat=astat ) + ofmn(1:mne) = zero + + + allocate( cfmn(1:mne), stat=astat ) + cfmn(1:mne) = zero + + + allocate( sfmn(1:mne), stat=astat ) + sfmn(1:mne) = zero + + + allocate( evmn(1:mne), stat=astat ) + evmn(1:mne) = zero + + + allocate( odmn(1:mne), stat=astat ) + odmn(1:mne) = zero + + + allocate( comn(1:mne), stat=astat ) + comn(1:mne) = zero + + + allocate( simn(1:mne), stat=astat ) + simn(1:mne) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1391,14 +1897,38 @@ subroutine preset !> \f} !> - SALLOCATE( gteta, (1:Ntz), zero ) - SALLOCATE( gzeta, (1:Ntz), zero ) - SALLOCATE( cosi, (1:Ntz,1:mn), zero ) - SALLOCATE( sini, (1:Ntz,1:mn), zero ) + allocate( gteta(1:Ntz), stat=astat ) + gteta(1:Ntz) = zero + + + allocate( gzeta(1:Ntz), stat=astat ) + gzeta(1:Ntz) = zero + + + + allocate( cosi(1:Ntz,1:mn), stat=astat ) + cosi(1:Ntz,1:mn) = zero + + + allocate( sini(1:Ntz,1:mn), stat=astat ) + sini(1:Ntz,1:mn) = zero + + + + if( Nz.eq.0 ) then + write(6,'("preset : fatal : myid=",i3," ; Nz.eq.0 ; illegal division ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : Nz.eq.0 : illegal division ;" + endif + + + if( Nt.eq.0 ) then + write(6,'("preset : fatal : myid=",i3," ; Nt.eq.0 ; illegal division ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : Nt.eq.0 : illegal division ;" + endif - FATAL( preset, Nz.eq.0, illegal division ) - FATAL( preset, Nt.eq.0, illegal division ) do ii = 1, mn ; mi = im(ii) ; ni = in(ii) ! loop over Fourier harmonics; @@ -1471,7 +2001,12 @@ subroutine preset case( 2 ) ; vvol = Mvol end select - WCALL( preset, rzaxis, ( Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), vvol, .false. ) ) ! set coordinate axis; 19 Jul 16; + + cput = MPI_WTIME() + Tpreset = Tpreset + ( cput-cpuo ) + call rzaxis( Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), vvol, .false. ) + cpuo = MPI_WTIME() + ! set coordinate axis; 19 Jul 16; endif ! end of if( Igeometry.eq.3 ) then ; 19 Jul 16; @@ -1497,8 +2032,14 @@ subroutine preset !> and used only for the initialization of the surfaces taking into account axis information if provided. !> - SALLOCATE( psifactor, (1:mn,1:Mvol), zero ) - SALLOCATE( inifactor, (1:mn,1:Mvol), zero ) + + allocate( psifactor(1:mn,1:Mvol), stat=astat ) + psifactor(1:mn,1:Mvol) = zero + + + allocate( inifactor(1:mn,1:Mvol), stat=astat ) + inifactor(1:mn,1:Mvol) = zero + psifactor(1:mn,1:Mvol) = one inifactor(1:mn,1:Mvol) = one @@ -1533,7 +2074,13 @@ subroutine preset case default - FATAL( readin, .true., invalid Igeometry for construction of psifactor ) + + if( .true. ) then + write(6,'("readin : fatal : myid=",i3," ; .true. ; invalid Igeometry for construction of psifactor ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "readin : .true. : invalid Igeometry for construction of psifactor ;" + endif + end select @@ -1556,7 +2103,13 @@ subroutine preset case( 2 ) ! cylindrical - standard; 20 Apr 13; - FATAL( preset, Linitialize.ne.1, geometrical initialization under construction for cylindrical ) + + if( Linitialize.ne.1 ) then + write(6,'("preset : fatal : myid=",i3," ; Linitialize.ne.1 ; geometrical initialization under construction for cylindrical ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : Linitialize.ne.1 : geometrical initialization under construction for cylindrical ;" + endif + do vvol = 1, Nvol-1 ;iRbc(1:mn,vvol) = iRbc(1:mn,Nvol) * psifactor(1:mn,vvol) @@ -1567,11 +2120,23 @@ subroutine preset case( 3 ) ! toroidal; 20 Apr 13; - FATAL( preset, Linitialize.lt.0, geometrical initialization under construction for toroidal ) ! see commented-out source below; 19 Jul 16; + + if( Linitialize.lt.0 ) then + write(6,'("preset : fatal : myid=",i3," ; Linitialize.lt.0 ; geometrical initialization under construction for toroidal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : Linitialize.lt.0 : geometrical initialization under construction for toroidal ;" + endif + ! see commented-out source below; 19 Jul 16; lvol = Nvol-1 + Linitialize - FATAL( preset, lvol.gt.Mvol, perhaps illegal combination of Linitialize and Lfreebound ) + + if( lvol.gt.Mvol ) then + write(6,'("preset : fatal : myid=",i3," ; lvol.gt.Mvol ; perhaps illegal combination of Linitialize and Lfreebound ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "preset : lvol.gt.Mvol : perhaps illegal combination of Linitialize and Lfreebound ;" + endif + ! do vvol = 1, Nvol-1 ! 19 Jul 16; ! ;iRbc(1:mn,vvol) = iRbc(1:mn,0) + ( iRbc(1:mn,Nvol) - iRbc(1:mn,0) ) * psifactor(1:mn,vvol) ! 19 Jul 16; @@ -1608,8 +2173,14 @@ subroutine preset !> **Bsupumn and Bsupvmn** - SALLOCATE( Bsupumn, (1:Nvol,0:1,1:mn), zero ) ! Fourier components of {\bf B}\cdot\nabla \theta on boundary; required for virtual casing; - SALLOCATE( Bsupvmn, (1:Nvol,0:1,1:mn), zero ) ! Fourier components of {\bf B}\cdot\nabla \zeta on boundary; + + allocate( Bsupumn(1:Nvol,0:1,1:mn), stat=astat ) + Bsupumn(1:Nvol,0:1,1:mn) = zero + ! Fourier components of {\bf B}\cdot\nabla \theta on boundary; required for virtual casing; + + allocate( Bsupvmn(1:Nvol,0:1,1:mn), stat=astat ) + Bsupvmn(1:Nvol,0:1,1:mn) = zero + ! Fourier components of {\bf B}\cdot\nabla \zeta on boundary; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1649,38 +2220,101 @@ subroutine preset !>
  • The values of \c diotadxup are assigned in mp00aa() after calling tr00ab().
  • !> - SALLOCATE( diotadxup, (0:1,-1:2,1:Mvol), zero ) ! measured rotational transform on inner/outer interfaces in each annulus; - SALLOCATE( dItGpdxtp, (0:1,-1:2,1:Mvol), zero ) ! measured plasma and linking currents ; - SALLOCATE( glambda, (1:Ntz+1,0:2,0:1,1:Mvol), zero ) ! save initial guesses for iterative calculation of rotational-transform; 21 Apr 13; + allocate( diotadxup(0:1,-1:2,1:Mvol), stat=astat ) + diotadxup(0:1,-1:2,1:Mvol) = zero + ! measured rotational transform on inner/outer interfaces in each annulus; + + allocate( dItGpdxtp(0:1,-1:2,1:Mvol), stat=astat ) + dItGpdxtp(0:1,-1:2,1:Mvol) = zero + ! measured plasma and linking currents ; + + + allocate( glambda(1:Ntz+1,0:2,0:1,1:Mvol), stat=astat ) + glambda(1:Ntz+1,0:2,0:1,1:Mvol) = zero + ! save initial guesses for iterative calculation of rotational-transform; 21 Apr 13; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! ! Construction of ``force''; - SALLOCATE( Bemn, (1:mn,1:Mvol,0:1), zero ) - SALLOCATE( Bomn, (1:mn,1:Mvol,0:1), zero ) - SALLOCATE( Iomn, (1:mn,1:Mvol ), zero ) - SALLOCATE( Iemn, (1:mn,1:Mvol ), zero ) - SALLOCATE( Somn, (1:mn,1:Mvol,0:1), zero ) - SALLOCATE( Semn, (1:mn,1:Mvol,0:1), zero ) - SALLOCATE( Pomn, (1:mn,1:Mvol,0:2), zero ) - SALLOCATE( Pemn, (1:mn,1:Mvol,0:2), zero ) - SALLOCATE( BBe , (1:Mvol-1), zero ) - SALLOCATE( IIo , (1:Mvol-1), zero ) - SALLOCATE( BBo , (1:Mvol-1), zero ) - SALLOCATE( IIe , (1:Mvol-1), zero ) + allocate( Bemn(1:mn,1:Mvol,0:1), stat=astat ) + Bemn(1:mn,1:Mvol,0:1) = zero + + + allocate( Bomn(1:mn,1:Mvol,0:1), stat=astat ) + Bomn(1:mn,1:Mvol,0:1) = zero + + + allocate( Iomn(1:mn,1:Mvol ), stat=astat ) + Iomn(1:mn,1:Mvol ) = zero + + + allocate( Iemn(1:mn,1:Mvol ), stat=astat ) + Iemn(1:mn,1:Mvol ) = zero + + + allocate( Somn(1:mn,1:Mvol,0:1), stat=astat ) + Somn(1:mn,1:Mvol,0:1) = zero + + + allocate( Semn(1:mn,1:Mvol,0:1), stat=astat ) + Semn(1:mn,1:Mvol,0:1) = zero + + + allocate( Pomn(1:mn,1:Mvol,0:2), stat=astat ) + Pomn(1:mn,1:Mvol,0:2) = zero + + + allocate( Pemn(1:mn,1:Mvol,0:2), stat=astat ) + Pemn(1:mn,1:Mvol,0:2) = zero + + + + allocate( BBe (1:Mvol-1), stat=astat ) + BBe (1:Mvol-1) = zero + + + allocate( IIo (1:Mvol-1), stat=astat ) + IIo (1:Mvol-1) = zero + + + allocate( BBo (1:Mvol-1), stat=astat ) + BBo (1:Mvol-1) = zero + + + allocate( IIe (1:Mvol-1), stat=astat ) + IIe (1:Mvol-1) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( Btemn, (1:mn,0:1,1:Mvol), zero ) ! these are declared in global, calculated in sc00aa, broadcast in xspech, and written to file in hdfint; - SALLOCATE( Bzemn, (1:mn,0:1,1:Mvol), zero ) - SALLOCATE( Btomn, (1:mn,0:1,1:Mvol), zero ) - SALLOCATE( Bzomn, (1:mn,0:1,1:Mvol), zero ) - SALLOCATE( Bloweremn, (1:mn, 3), zero) ! these are declared in global, calculated in getbco, used in mtrxhs - SALLOCATE( Bloweromn, (1:mn, 3), zero) + allocate( Btemn(1:mn,0:1,1:Mvol), stat=astat ) + Btemn(1:mn,0:1,1:Mvol) = zero + ! these are declared in global, calculated in sc00aa, broadcast in xspech, and written to file in hdfint; + + allocate( Bzemn(1:mn,0:1,1:Mvol), stat=astat ) + Bzemn(1:mn,0:1,1:Mvol) = zero + + + allocate( Btomn(1:mn,0:1,1:Mvol), stat=astat ) + Btomn(1:mn,0:1,1:Mvol) = zero + + + allocate( Bzomn(1:mn,0:1,1:Mvol), stat=astat ) + Bzomn(1:mn,0:1,1:Mvol) = zero + + + + allocate( Bloweremn(1:mn, 3), stat=astat ) + Bloweremn(1:mn, 3) = zero + ! these are declared in global, calculated in getbco, used in mtrxhs + + allocate( Bloweromn(1:mn, 3), stat=astat ) + Bloweromn(1:mn, 3) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -1697,7 +2331,10 @@ subroutine preset ! Allocate matrix to store the last solution of GMRES as initialization LILUprecond = .false. if (Lmatsolver.eq.2 .or. Lmatsolver.eq.3) then ! use GMRES - SALLOCATE(GMRESlastsolution, (MAXVAL(NAdof),0:2,1:Mvol), zero ) + + allocate( GMRESlastsolution(MAXVAL(NAdof),0:2,1:Mvol), stat=astat ) + GMRESlastsolution(MAXVAL(NAdof),0:2,1:Mvol) = zero + GMRESlastsolution = zero if (LGMRESprec .eq. 1) LILUprecond = .true. endif @@ -1710,29 +2347,55 @@ subroutine preset NOTMatrixFree = .true. endif - SALLOCATE( vvolume , (1:Mvol), zero ) ! volume integral of \sqrt g; - SALLOCATE( lBBintegral, (1:Mvol), zero ) ! volume integral of B.B ; - SALLOCATE( lABintegral, (1:Mvol), zero ) ! volume integral of A.B ; + + allocate( vvolume (1:Mvol), stat=astat ) + vvolume (1:Mvol) = zero + ! volume integral of \sqrt g; + + allocate( lBBintegral(1:Mvol), stat=astat ) + lBBintegral(1:Mvol) = zero + ! volume integral of B.B ; + + allocate( lABintegral(1:Mvol), stat=astat ) + lABintegral(1:Mvol) = zero + ! volume integral of A.B ; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( YESstellsym ) lmns = 1 + (mns-1) ! number of independent degrees-of-freedom in angle transformation; 30 Jan 13; if( NOTstellsym ) lmns = 1 + (mns-1) + (mns-1) ! number of independent degrees-of-freedom in angle transformation; 30 Jan 13; - SALLOCATE( dlambdaout, (1:lmns,1:Mvol,0:1), zero ) + + allocate( dlambdaout(1:lmns,1:Mvol,0:1), stat=astat ) + dlambdaout(1:lmns,1:Mvol,0:1) = zero + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if (Lfreebound > 0) then ! Only do for free-boundary; 7 Nov 18; - SALLOCATE( Dxyz, (1:3,1:Ntz), zero ) ! Cartesian components of computational boundary; position; 14 Apr 17; - SALLOCATE( Nxyz, (1:3,1:Ntz), zero ) ! Cartesian components of computational boundary; normal ; 14 Apr 17; - SALLOCATE( Jxyz, (1:Ntz,1:3), zero ) ! Cartesian components of virtual casing surface current; needs to be recalculated at each iteration; + allocate( Dxyz(1:3,1:Ntz), stat=astat ) + Dxyz(1:3,1:Ntz) = zero + ! Cartesian components of computational boundary; position; 14 Apr 17; + + allocate( Nxyz(1:3,1:Ntz), stat=astat ) + Nxyz(1:3,1:Ntz) = zero + ! Cartesian components of computational boundary; normal ; 14 Apr 17; + + + allocate( Jxyz(1:Ntz,1:3), stat=astat ) + Jxyz(1:Ntz,1:3) = zero + ! Cartesian components of virtual casing surface current; needs to be recalculated at each iteration; lvol = Mvol ; lss = one ; Lcurvature = 1 ; Lcoordinatesingularity = .false. ! will only require normal field on outer interface = computational boundary; - WCALL( preset, coords,( lvol, lss, Lcurvature, Ntz, mn ) ) ! will need Rij, Zij; THE COMPUTATIONAL BOUNDARY DOES NOT CHANGE; + + cput = MPI_WTIME() + Tpreset = Tpreset + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! will need Rij, Zij; THE COMPUTATIONAL BOUNDARY DOES NOT CHANGE; do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz @@ -1746,7 +2409,13 @@ subroutine preset Dxyz(1:3,jk) = (/ teta , zeta , Rij(jk,0,0) /) Nxyz(1:3,jk) = (/ - Rij(jk,2,0), -Rij(jk,3,0), one /) case( 2 ) ! Igeometry = 2 ; - FATAL( bnorml, .true., free-boundary calculations not yet implemented in cylindrical geometry ) + + if( .true. ) then + write(6,'("bnorml : fatal : myid=",i3," ; .true. ; free-boundary calculations not yet implemented in cylindrical geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "bnorml : .true. : free-boundary calculations not yet implemented in cylindrical geometry ;" + endif + case( 3 ) ! Igeometry = 3 ; Dxyz(1:3,jk) = (/ Rij(jk,0,0) * cszeta(0), Rij(jk,0,0) * cszeta(1), Zij(jk,0,0) /) Nxyz(1:3,jk) = (/ Rij(jk,2,0) * cszeta(1) * Zij(jk,3,0) - Zij(jk,2,0) * ( Rij(jk,3,0) * cszeta(1) + Rij(jk,0,0) * cszeta(0) ), & @@ -1768,7 +2437,12 @@ subroutine preset Localconstraint = .true. endif - RETURN(preset) + +9999 continue + cput = MPI_WTIME() + Tpreset = Tpreset + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/ra00aa.f90 b/src/ra00aa.F90 similarity index 69% rename from src/ra00aa.f90 rename to src/ra00aa.F90 index 81553e76..1cf56b30 100644 --- a/src/ra00aa.f90 +++ b/src/ra00aa.F90 @@ -44,7 +44,7 @@ !> !> @param[in] writeorread 'W' to write the vector potential; 'R' to read it subroutine ra00aa( writeorread ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero @@ -63,19 +63,35 @@ subroutine ra00aa( writeorread ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - CHARACTER, intent(in) :: writeorread +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + character, intent(in) :: writeorread LOGICAL :: exist - INTEGER :: vvol, oldMvol, oldMpol, oldNtor, oldmn, oldNfp, oldLrad, ii, jj, minLrad, llmodnp, ideriv, sumLrad - INTEGER, allocatable :: oldim(:), oldin(:) - REAL , allocatable :: oldAte(:), oldAze(:), oldAto(:), oldAzo(:) - REAL , allocatable :: allAte(:,:), allAze(:,:), allAto(:,:), allAzo(:,:) + integer :: vvol, oldMvol, oldMpol, oldNtor, oldmn, oldNfp, oldLrad, ii, jj, minLrad, llmodnp, ideriv, sumLrad + integer, allocatable :: oldim(:), oldin(:) + real(wp) , allocatable :: oldAte(:), oldAze(:), oldAto(:), oldAzo(:) + real(wp) , allocatable :: allAte(:,:), allAze(:,:), allAto(:,:), allAzo(:,:) - BEGIN(ra00aa) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -94,11 +110,41 @@ subroutine ra00aa( writeorread ) if( myid.eq.0 ) then do vvol = 1, Mvol do ii = 1, mn ! loop over Fourier harmonics; - FATAL( ra00aa, .not.allocated(Ate(vvol,ideriv,ii)%s), error ) - FATAL( ra00aa, .not.allocated(Aze(vvol,ideriv,ii)%s), error ) - FATAL( ra00aa, .not.allocated(Ato(vvol,ideriv,ii)%s), error ) - FATAL( ra00aa, .not.allocated(Azo(vvol,ideriv,ii)%s), error ) - FATAL( ra00aa, Lrad(vvol).le.0, error ) ! TODO: probably not needed, since a lot other things do not work as well if Lrad is messed up + + if( .not.allocated(Ate(vvol,ideriv,ii)%s) ) then + write(6,'("ra00aa : fatal : myid=",i3," ; .not.allocated(Ate(vvol,ideriv,ii)%s) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : .not.allocated(Ate(vvol,ideriv,ii)%s) : error ;" + endif + + + if( .not.allocated(Aze(vvol,ideriv,ii)%s) ) then + write(6,'("ra00aa : fatal : myid=",i3," ; .not.allocated(Aze(vvol,ideriv,ii)%s) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : .not.allocated(Aze(vvol,ideriv,ii)%s) : error ;" + endif + + + if( .not.allocated(Ato(vvol,ideriv,ii)%s) ) then + write(6,'("ra00aa : fatal : myid=",i3," ; .not.allocated(Ato(vvol,ideriv,ii)%s) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : .not.allocated(Ato(vvol,ideriv,ii)%s) : error ;" + endif + + + if( .not.allocated(Azo(vvol,ideriv,ii)%s) ) then + write(6,'("ra00aa : fatal : myid=",i3," ; .not.allocated(Azo(vvol,ideriv,ii)%s) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : .not.allocated(Azo(vvol,ideriv,ii)%s) : error ;" + endif + + + if( Lrad(vvol).le.0 ) then + write(6,'("ra00aa : fatal : myid=",i3," ; Lrad(vvol).le.0 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : Lrad(vvol).le.0 : error ;" + endif + ! TODO: probably not needed, since a lot other things do not work as well if Lrad is messed up enddo ! end of do ii; 6 Feb 13; enddo ! end of do vvol; 6 Feb 13; endif ! end of if( myid.eq.0 ) ; 6 Feb 13; @@ -132,7 +178,12 @@ subroutine ra00aa( writeorread ) sumLrad = sum(Lrad(1:Mvol)+1) ! HDF5 calls have to be done from all CPUs when using the collective API - WCALL( ra00aa, write_vector_potential, (sumLrad, allAte, allAze, allAto, allAzo) ) + + cput = MPI_WTIME() + Tra00aa = Tra00aa + ( cput-cpuo ) + call write_vector_potential(sumLrad, allAte, allAze, allAto, allAzo) + cpuo = MPI_WTIME() + ! clean up after yourself deallocate(allAte) @@ -170,8 +221,14 @@ subroutine ra00aa( writeorread ) if( oldMvol.ne.Mvol ) then ; write(ounit,'("ra00aa : ",f10.2," : myid=",i3," ; error ; inconsistent Mvol ;")') cput-cpus, myid ; goto 9997 endif - SALLOCATE( oldim, (1:oldmn), 0 ) - SALLOCATE( oldin, (1:oldmn), 0 ) + + allocate( oldim(1:oldmn), stat=astat ) + oldim(1:oldmn) = 0 + + + allocate( oldin(1:oldmn), stat=astat ) + oldin(1:oldmn) = 0 + read(aunit,iostat=ios) oldim(1:oldmn) read(aunit,iostat=ios) oldin(1:oldmn) @@ -182,10 +239,22 @@ subroutine ra00aa( writeorread ) minLrad = min(oldLrad,Lrad(vvol)) - SALLOCATE( oldAte, (0:oldLrad), zero ) - SALLOCATE( oldAze, (0:oldLrad), zero ) - SALLOCATE( oldAto, (0:oldLrad), zero ) - SALLOCATE( oldAzo, (0:oldLrad), zero ) + + allocate( oldAte(0:oldLrad), stat=astat ) + oldAte(0:oldLrad) = zero + + + allocate( oldAze(0:oldLrad), stat=astat ) + oldAze(0:oldLrad) = zero + + + allocate( oldAto(0:oldLrad), stat=astat ) + oldAto(0:oldLrad) = zero + + + allocate( oldAzo(0:oldLrad), stat=astat ) + oldAzo(0:oldLrad) = zero + do jj = 1, oldmn @@ -204,15 +273,27 @@ subroutine ra00aa( writeorread ) enddo ! end of do jj; 26 Feb 13; - DALLOCATE(oldAte) - DALLOCATE(oldAze) - DALLOCATE(oldAto) - DALLOCATE(oldAzo) + + deallocate(oldAte,stat=astat) + + + deallocate(oldAze,stat=astat) + + + deallocate(oldAto,stat=astat) + + + deallocate(oldAzo,stat=astat) + enddo ! end of do vvol; 26 Feb 13; - DALLOCATE(oldim) - DALLOCATE(oldin) + + deallocate(oldim,stat=astat) + + + deallocate(oldin,stat=astat) + 9997 continue @@ -227,13 +308,21 @@ subroutine ra00aa( writeorread ) llmodnp = 0 ! this node contains the information that is to be broadcast; 26 Feb 13; do ii = 1, mn - RlBCAST( Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, llmodnp ) - RlBCAST( Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, llmodnp ) + + call MPI_BCAST(Ate(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Aze(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + enddo !if( NOTstellsym ) then do ii = 1, mn - RlBCAST( Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, llmodnp ) - RlBCAST( Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)), Lrad(vvol)+1, llmodnp ) + + call MPI_BCAST(Ato(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Azo(vvol,ideriv,ii)%s(0:Lrad(vvol)),Lrad(vvol)+1,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + enddo !endif @@ -243,13 +332,24 @@ subroutine ra00aa( writeorread ) case default - FATAL(ra00aa, .true., invalid writeorread flag supplied on input ) + + if( .true. ) then + write(6,'("ra00aa : fatal : myid=",i3," ; .true. ; invalid writeorread flag supplied on input ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "ra00aa : .true. : invalid writeorread flag supplied on input ;" + endif + end select !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(ra00aa) + +9999 continue + cput = MPI_WTIME() + Tra00aa = Tra00aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/rzaxis.f90 b/src/rzaxis.F90 similarity index 90% rename from src/rzaxis.f90 rename to src/rzaxis.F90 index 0156e62b..28ecc58e 100644 --- a/src/rzaxis.f90 +++ b/src/rzaxis.F90 @@ -72,11 +72,8 @@ !> @param inZbc !> @param[in] ivol !> @param LcomputeDerivatives -!#ifdef DEBUG -!recursive subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivatives ) -!#else subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivatives ) -!#endif + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -103,39 +100,61 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL, intent(in) :: LComputeDerivatives ! indicates whether derivatives are to be calculated; - INTEGER, intent(in) :: Mvol, mn, ivol - REAL :: inRbc(1:mn,0:Mvol), inZbs(1:mn,0:Mvol), inRbs(1:mn,0:Mvol), inZbc(1:mn,0:Mvol) - REAL :: jRbc(1:mn,0:Mvol), jZbs(1:mn,0:Mvol), jRbs(1:mn,0:Mvol), jZbc(1:mn,0:Mvol) - REAL :: tmpRbc(1:mn,0:Mvol), tmpZbs(1:mn,0:Mvol), tmpRbs(1:mn,0:Mvol), tmpZbc(1:mn,0:Mvol) ! use as temp matrices to store iRbc etc + integer, intent(in) :: Mvol, mn, ivol + real(wp) :: inRbc(1:mn,0:Mvol), inZbs(1:mn,0:Mvol), inRbs(1:mn,0:Mvol), inZbc(1:mn,0:Mvol) + real(wp) :: jRbc(1:mn,0:Mvol), jZbs(1:mn,0:Mvol), jRbs(1:mn,0:Mvol), jZbc(1:mn,0:Mvol) + real(wp) :: tmpRbc(1:mn,0:Mvol), tmpZbs(1:mn,0:Mvol), tmpRbs(1:mn,0:Mvol), tmpZbc(1:mn,0:Mvol) ! use as temp matrices to store iRbc etc - REAL :: jacbase(1:Ntz), jacbasec(1:mn), jacbases(1:mn) ! the 2D Jacobian and its Fourier - REAL :: junkc(1:mn), junks(1:mn) ! these are junk matrices used for fft + real(wp) :: jacbase(1:Ntz), jacbasec(1:mn), jacbases(1:mn) ! the 2D Jacobian and its Fourier + real(wp) :: junkc(1:mn), junks(1:mn) ! these are junk matrices used for fft - INTEGER :: jvol, ii, ifail, jj, id, issym, irz, imn - INTEGER :: idJc, idJs, idRc, idRs, idZc, idZs + integer :: jvol, ii, ifail, jj, id, issym, irz, imn + integer :: idJc, idJs, idRc, idRs, idZc, idZs - INTEGER :: Lcurvature + integer :: Lcurvature - INTEGER :: Njac, idgetrf, idgetrs ! internal variables used in Jacobian method - REAL, allocatable :: jacrhs(:), djacrhs(:), jacmat(:,:), djacmat(:,:), solution(:), LU(:,:) ! internal matrices used in Jacobian method - INTEGER, allocatable :: ipiv(:) ! internal matrices used in Jacobian method + integer :: Njac, idgetrf, idgetrs ! internal variables used in Jacobian method + real(wp), allocatable :: jacrhs(:), djacrhs(:), jacmat(:,:), djacmat(:,:), solution(:), LU(:,:) ! internal matrices used in Jacobian method + integer, allocatable :: ipiv(:) ! internal matrices used in Jacobian method #ifdef DEBUG ! Debug variables - REAL :: dx, threshold ! used to check result with finite difference. - REAL :: newRbc(1:mn,0:Mvol), newZbs(1:mn,0:Mvol), newRbs(1:mn,0:Mvol), newZbc(1:mn,0:Mvol) + real(wp) :: dx, threshold ! used to check result with finite difference. + real(wp) :: newRbc(1:mn,0:Mvol), newZbs(1:mn,0:Mvol), newRbs(1:mn,0:Mvol), newZbc(1:mn,0:Mvol) +#endif + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 #endif - BEGIN(rzaxis) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( rzaxis, ivol.gt.Mvol, perhaps illegal combination Linitialize=2 and Lfreebound=0 ) + + if( ivol.gt.Mvol ) then + write(6,'("rzaxis : fatal : myid=",i3," ; ivol.gt.Mvol ; perhaps illegal combination Linitialize=2 and Lfreebound=0 ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "rzaxis : ivol.gt.Mvol : perhaps illegal combination Linitialize=2 and Lfreebound=0 ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -219,7 +238,7 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ #ifdef DEBUG if( Wrzaxis ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("rzaxis : ", 10x ," : ")') write(ounit,'("rzaxis : ",f10.2," : myid=",i3," ; inner : Rbc=[", 999(es23.15," ,"))') cput-cpus, myid, inRbc(1:Ntor+1,ivol) write(ounit,'("rzaxis : ",f10.2," : myid=",i3," ; axis : Rbc=[", 999(es23.15," ,"))') cput-cpus, myid, inRbc(1:Ntor+1,jvol) @@ -242,8 +261,20 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ #ifdef DEBUG if (LComputeDerivatives) then - FATAL( rzaxis, .not.allocated(cosi), fatal ) - FATAL( rzaxis, .not.allocated(sini), fatal ) + + if( .not.allocated(cosi) ) then + write(6,'("rzaxis : fatal : myid=",i3," ; .not.allocated(cosi) ; fatal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "rzaxis : .not.allocated(cosi) : fatal ;" + endif + + + if( .not.allocated(sini) ) then + write(6,'("rzaxis : fatal : myid=",i3," ; .not.allocated(sini) ; fatal ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "rzaxis : .not.allocated(sini) : fatal ;" + endif + endif #endif @@ -376,11 +407,26 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ Njac = 2 * (2 * Ntoraxis + 1) end if - SALLOCATE( jacrhs, (1:Njac), zero ) - SALLOCATE( jacmat, (1:Njac, 1:Njac), zero ) - SALLOCATE( LU, (1:Njac, 1:Njac), zero ) - SALLOCATE( solution, (1:Njac), zero ) - SALLOCATE( ipiv, (1:Njac), 0) + + allocate( jacrhs(1:Njac), stat=astat ) + jacrhs(1:Njac) = zero + + + allocate( jacmat(1:Njac, 1:Njac), stat=astat ) + jacmat(1:Njac, 1:Njac) = zero + + + allocate( LU(1:Njac, 1:Njac), stat=astat ) + LU(1:Njac, 1:Njac) = zero + + + allocate( solution(1:Njac), stat=astat ) + solution(1:Njac) = zero + + + allocate( ipiv(1:Njac), stat=astat ) + ipiv(1:Njac) = 0 + ! replace iRbc to use subroutine coords iRbc(1:mn,1) = jRbc(1:mn, ivol) @@ -410,7 +456,12 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ idRs = 2 * Ntoraxis + 1 idZc = 3 * Ntoraxis + 2 - WCALL( rzaxis, coords, (1, one, Lcurvature, Ntz, mn )) + + cput = MPI_WTIME() + Trzaxis = Trzaxis + ( cput-cpuo ) + call coords(1, one, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + jacbase = sg(1:Ntz,0) / Rij(1:Ntz,0,0) ! extract the baseline 2D jacobian, note the definition here does not have the R factor @@ -563,8 +614,14 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ dZadZ = zero ! allocate the temp matrices - SALLOCATE( djacrhs, (1:Njac), zero ) - SALLOCATE( djacmat, (1:Njac, 1:Njac), zero ) + + allocate( djacrhs(1:Njac), stat=astat ) + djacrhs(1:Njac) = zero + + + allocate( djacmat(1:Njac, 1:Njac), stat=astat ) + djacmat(1:Njac, 1:Njac) = zero + dBdX%L = .true. ! will need derivatives; @@ -587,7 +644,12 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ if (im(imn).eq.0) then ! the jacobian on the RHS does not depend on m=0 terms jacbase = zero else - WCALL( rzaxis, coords, (1, one, Lcurvature, Ntz, mn )) ! the derivative of Jabobian w.r.t. geometry is computed by coords + + cput = MPI_WTIME() + Trzaxis = Trzaxis + ( cput-cpuo ) + call coords(1, one, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! the derivative of Jabobian w.r.t. geometry is computed by coords jacbase = sg(1:Ntz,1) end if @@ -772,8 +834,12 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ end do ! imn = 1, mn ! deallocate the matrices - DALLOCATE( djacrhs ) - DALLOCATE( djacmat ) + + deallocate(djacrhs ,stat=astat) + + + deallocate(djacmat ,stat=astat) + dBdX%L = .FALSE. @@ -793,7 +859,12 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ iZbc(1:Ntoraxis+1,0) = -solution(3*Ntoraxis+2:4*Ntoraxis+2) + iZbc(1:Ntoraxis+1,0) endif - WCALL( rzaxis, coords, (1, one, Lcurvature, Ntz, mn )) + + cput = MPI_WTIME() + Trzaxis = Trzaxis + ( cput-cpuo ) + call coords(1, one, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + jacbase = sg(1:Ntz,0) / Rij(1:Ntz,0,0) ! extract the baseline 2D jacobian call tfft( Nt, Nz, jacbase, Rij, & @@ -839,11 +910,21 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ endif ! YESstellsym ! Deallocate - DALLOCATE( jacrhs ) - DALLOCATE( jacmat ) - DALLOCATE( LU ) - DALLOCATE( solution ) - DALLOCATE( ipiv ) + + deallocate(jacrhs ,stat=astat) + + + deallocate(jacmat ,stat=astat) + + + deallocate(LU ,stat=astat) + + + deallocate(solution ,stat=astat) + + + deallocate(ipiv ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -855,14 +936,19 @@ subroutine rzaxis( Mvol, mn, inRbc, inZbs, inRbs, inZbc, ivol, LcomputeDerivativ !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(rzaxis) + +9999 continue + cput = MPI_WTIME() + Trzaxis = Trzaxis + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! end subroutine rzaxis subroutine fndiff_rzaxis( Mvol, mn, ivol, jRbc, jRbs, jZbc, JZbs, imn, irz, issym ) - + use mod_kinds, only: wp => dp use constants, only : zero, one, half, two use numerical, only : vsmall @@ -878,16 +964,32 @@ subroutine fndiff_rzaxis( Mvol, mn, ivol, jRbc, jRbs, jZbc, JZbs, imn, irz, issy dRadR, dRadZ, dZadR, dZadZ, & NOTstellsym -LOCALS - INTEGER, intent(in) :: Mvol, mn, ivol, imn, irz, issym - REAL, intent(in) :: jRbc(1:mn,0:Mvol), jZbs(1:mn,0:Mvol), jRbs(1:mn,0:Mvol), jZbc(1:mn,0:Mvol) +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: Mvol, mn, ivol, imn, irz, issym + real(wp), intent(in) :: jRbc(1:mn,0:Mvol), jZbs(1:mn,0:Mvol), jRbs(1:mn,0:Mvol), jZbc(1:mn,0:Mvol) - INTEGER :: jvol, ii - REAL :: dx, threshold ! used to check result with finite difference. - REAL :: newRbc(1:mn,0:Mvol), newZbs(1:mn,0:Mvol), newRbs(1:mn,0:Mvol), newZbc(1:mn,0:Mvol) + integer :: jvol, ii + real(wp) :: dx, threshold ! used to check result with finite difference. + real(wp) :: newRbc(1:mn,0:Mvol), newZbs(1:mn,0:Mvol), newRbs(1:mn,0:Mvol), newZbc(1:mn,0:Mvol) + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif -BEGIN( rzaxis ) threshold = 1e-8 ! print with difference between FD and analytical more than this threshold dx = 1e-8 * jRbc(1,ivol) @@ -946,7 +1048,12 @@ subroutine fndiff_rzaxis( Mvol, mn, ivol, jRbc, jRbs, jZbc, JZbs, imn, irz, issy enddo -RETURN( rzaxis ) + +9999 continue + cput = MPI_WTIME() + Trzaxis = Trzaxis + ( cput-cpuo ) + return + end subroutine fndiff_rzaxis diff --git a/src/sphdf5.F90 b/src/sphdf5.F90 new file mode 100644 index 00000000..14c08572 --- /dev/null +++ b/src/sphdf5.F90 @@ -0,0 +1,1409 @@ +!> \file +!> \brief Writes all the output information to \c ext.sp.h5. +!> +!> If the output file already exists, it will be deleted and replaced +!> by an empty one, which gets filled in with the updated data. +!> All calls to the HDF5 API are filtered to only happen from MPI rank-0 +!> to be able to use the serial HDF5 library. +!> Parallel HDF5 was considered in the past, but abandoned due to very +!> subtle and irreproducible errors. + +!> \brief writing the HDF5 output file +!> \ingroup grp_output +module sphdf5 + use mod_kinds, only: wp => dp + use fileunits, only: ounit + use allglobal, only: myid, cpus, MPI_COMM_SPEC, ext, skip_write + use constants, only: version + use hdf5 + use h5utils + + implicit none + +! integer :: hdfier !< error flag for HDF5 library +! integer :: rank !< rank of data to write using macros + integer(hid_t) :: file_id !< default file ID used in macros +! integer(hid_t) :: space_id !< default dataspace ID used in macros +! integer(hid_t) :: dset_id !< default dataset ID used in macros +! integer(hsize_t) :: onedims(1:1) !< dimension specifier for one-dimensional data used in macros +! integer(hsize_t) :: twodims(1:2) !< dimension specifier for two-dimensional data used in macros +! integer(hsize_t) :: threedims(1:3) !< dimension specifier for three-dimensional data used in macros +! logical :: grp_exists !< flags used to signal if a group already exists +! logical :: var_exists !< flags used to signal if a variable already exists + + integer(hid_t) :: iteration_dset_id !< Dataset identifier for "iteration" + integer(hid_t) :: dataspace !< dataspace for extension by 1 iteration object + integer(hid_t) :: memspace !< memspace for extension by 1 iteration object + integer(hsize_t), dimension(1) :: old_data_dims !< current dimensions of "iterations" dataset + integer(hsize_t), dimension(1) :: data_dims !< new dimensions for "iterations" dataset + integer(hsize_t), dimension(1) :: max_dims !< maximum dimensions for "iterations" dataset + integer(hid_t) :: plist_id !< Property list identifier used to activate dataset transfer property + integer(hid_t) :: dt_nDcalls_id !< Memory datatype identifier (for "nDcalls" dataset in "/grid") + integer(hid_t) :: dt_Energy_id !< Memory datatype identifier (for "Energy" dataset in "/grid") + integer(hid_t) :: dt_ForceErr_id !< Memory datatype identifier (for "ForceErr" dataset in "/grid") + integer(hid_t) :: dt_iRbc_id !< Memory datatype identifier (for "iRbc" dataset in "/grid") + integer(hid_t) :: dt_iZbs_id !< Memory datatype identifier (for "iZbs" dataset in "/grid") + integer(hid_t) :: dt_iRbs_id !< Memory datatype identifier (for "iRbs" dataset in "/grid") + integer(hid_t) :: dt_iZbc_id !< Memory datatype identifier (for "iZbc" dataset in "/grid") + + integer, parameter :: rankP=3 !< rank of Poincare data + integer, parameter :: rankT=2 !< rank of rotational transform data + + integer(hid_t) :: grpPoincare !< group for Poincare data + integer(HID_T) :: dset_id_t !< Dataset identifier for \f$\theta\f$ coordinate of field line following + integer(HID_T) :: dset_id_s !< Dataset identifier for \f$s\f$ coordinate of field line following + integer(HID_T) :: dset_id_R !< Dataset identifier for \f$R\f$ coordinate of field line following + integer(HID_T) :: dset_id_Z !< Dataset identifier for \f$Z\f$ coordinate of field line following + integer(HID_T) :: dset_id_success !< Dataset identifier for success flag of trajectories to follow + integer(HID_T) :: filespace_t !< Dataspace identifier in file for \f$\theta\f$ coordinate of field line following + integer(HID_T) :: filespace_s !< Dataspace identifier in file for \f$s\f$ coordinate of field line following + integer(HID_T) :: filespace_R !< Dataspace identifier in file for \f$R\f$ coordinate of field line following + integer(HID_T) :: filespace_Z !< Dataspace identifier in file for \f$Z\f$ coordinate of field line following + integer(HID_T) :: filespace_success !< Dataspace identifier in file for success flag of trajectories to follow + integer(HID_T) :: memspace_t !< Dataspace identifier in memory for \f$\theta\f$ coordinate of field line following + integer(HID_T) :: memspace_s !< Dataspace identifier in memory for \f$s\f$ coordinate of field line following + integer(HID_T) :: memspace_R !< Dataspace identifier in memory for \f$R\f$ coordinate of field line following + integer(HID_T) :: memspace_Z !< Dataspace identifier in memory for \f$Z\f$ coordinate of field line following + integer(HID_T) :: memspace_success !< Dataspace identifier in memory for success flag of trajectories to follow + + integer(hid_t) :: grpTransform !< group for rotational transform data + integer(HID_T) :: dset_id_diotadxup !< Dataset identifier for diotadxup (derivative of rotational transform ?) + integer(HID_T) :: dset_id_fiota !< Dataset identifier for fiota ( rotational transform ?) + integer(HID_T) :: filespace_diotadxup !< Dataspace identifier in file for diotadxup + integer(HID_T) :: filespace_fiota !< Dataspace identifier in file for fiota + integer(HID_T) :: memspace_diotadxup !< Dataspace identifier in memory for diotadxup + integer(HID_T) :: memspace_fiota !< Dataspace identifier in memory for fiota + +private +public init_outfile, & + mirror_input_to_outfile, & + init_convergence_output, & + write_convergence_output, & + write_grid, & + init_flt_output, & + write_poincare, & + write_transform, & + finalize_flt_output, & + write_vector_potential, & + hdfint, & + finish_outfile + +contains + +!> \brief Initialize the interface to the HDF5 library and open the output file. +!> \ingroup grp_output +!> +subroutine init_outfile() + integer :: hdfier !< error flag for HDF5 library + integer(hid_t) :: dset_id + + if (myid .eq. 0 .and. .not. skip_write) then + + ! initialize Fortran interface to the HDF5 library + call h5open_f(hdfier) + + ! (en/dis)able HDF5 internal error messages; + ! sphdf5 has its own error messages coming from the macros + call h5eset_auto_f(internalHdf5Msg, hdfier) + + ! Create the file + call h5fcreate_f(trim(ext)//".sp.h5", H5F_ACC_TRUNC_F, file_id, hdfier ) + + ! write version number + call HWRITERV_LO(file_id, "version", 1, (/ version /), dset_id) + call H5DESCR_CDSET(dset_id, "version of SPEC") + + endif ! myid.eq.0 +end subroutine ! init_outfile + +!> \brief Mirror input variables into output file. +!> \ingroup grp_output +!> +!> The goal of this routine is to have an exact copy of the input file contents +!> that were used to parameterize a given SPEC run. +!> This also serves to check after the run if SPEC correctly understood the text-based input file. +subroutine mirror_input_to_outfile + + use inputlist + use allglobal , only : myid, Mvol, skip_write + use h5utils + + integer(hid_t) :: dset_id + integer(hid_t) :: grpInput + integer(hid_t) :: grpInputPhysics, grpInputNumerics, grpInputLocal, grpInputGlobal, grpInputDiagnostics + + if (myid.eq.0 .and. .not.skip_write) then + + call HDEFGRP( file_id, "input", grpInput ) + call H5DESCR( grpInput, "group for mirrored input data") + +! the following variables constitute the namelist/physicslist/; note that all variables in namelist need to be broadcasted in readin; +! they go into ext.h5/input/physics + + ! the calls used here work as follows: + ! step 1. HWRITEIV_LO e.g. write(s an) i(nteger) v(ariable) and l(eaves) o(pen) the dataset, so that in + ! step 2a. an attribute with descr(iptive) information can be attached to the dataset and finally, in + ! step 2b. the attribute is closed and also we c(lose the) d(ata)set. + + call HDEFGRP( grpInput, "physics", grpInputPhysics) + call H5DESCR( grpInputPhysics, "physics inputs") + + call HWRITEIV_LO( grpInputPhysics, "Igeometry" , 1, (/ Igeometry /), dset_id) + call H5DESCR_CDSET(dset_id, "geometry identifier") + call HWRITEIV_LO( grpInputPhysics, "Istellsym" , 1, (/ Istellsym /), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetry flag") + call HWRITEIV_LO( grpInputPhysics, "Lfreebound" , 1, (/ Lfreebound /),dset_id) + call H5DESCR_CDSET( dset_id, "free boundary flag") + call HWRITERV_LO( grpInputPhysics, "phiedge" , 1, (/ phiedge /),dset_id) + call H5DESCR_CDSET( dset_id, "total enclosed toroidal magnetic flux") + call HWRITERV_LO( grpInputPhysics, "curtor" , 1, (/ curtor /),dset_id) + call H5DESCR_CDSET( dset_id, "total enclosed toroidal current") + call HWRITERV_LO( grpInputPhysics, "curpol" , 1,(/ curpol /),dset_id) + call H5DESCR_CDSET( dset_id, "total enclosed poloidal current") + call HWRITERV_LO( grpInputPhysics, "gamma" , 1, (/ gamma /),dset_id) + call H5DESCR_CDSET( dset_id, "adiabatic index") + call HWRITEIV_LO( grpInputPhysics, "Nfp" , 1, (/ Nfp /),dset_id) + call H5DESCR_CDSET( dset_id, "number of stellarator field periods") + call HWRITEIV_LO( grpInputPhysics, "Nvol" , 1, (/ Nvol /),dset_id) + call H5DESCR_CDSET( dset_id, "number of volumes") + call HWRITEIV_LO( grpInputPhysics, "Mpol" , 1, (/ Mpol /),dset_id) + call H5DESCR_CDSET( dset_id, "maximum poloidal mode number") + call HWRITEIV_LO( grpInputPhysics, "Ntor" , 1, (/ Ntor /),dset_id) + call H5DESCR_CDSET( dset_id, "maximum toroidal mode number") + call HWRITEIV_LO( grpInputPhysics, "Lrad" , Mvol, Lrad(1:Mvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "degree of radial Chebychev polynomials") + call HWRITEIV_LO( grpInputPhysics, "Lconstraint", 1, (/ Lconstraint /),dset_id) + call H5DESCR_CDSET( dset_id, "type of constraint to enforce") + call HWRITEIV_LO( grpInputPhysics, "Lreflect", 1, (/ Lreflect /),dset_id) + call H5DESCR_CDSET( dset_id, "whether to reflect the perturbation on both boundaries for slab geometry") + call HWRITERV_LO( grpInputPhysics, "tflux" , Mvol, tflux(1:Mvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "toroidal magnetic flux in volumes") + call HWRITERV_LO( grpInputPhysics, "pflux" , Mvol, pflux(1:Mvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "poloidal magnetic flux in volumes") + call HWRITERV_LO( grpInputPhysics, "helicity" , Nvol, helicity(1:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "helicity profile") + call HWRITERV_LO( grpInputPhysics, "pscale" , 1, (/ pscale /),dset_id) + call H5DESCR_CDSET( dset_id, "scaling factor for pressure") + call HWRITERV_LO( grpInputPhysics, "pressure" , Nvol, pressure(1:Nvol), dset_id) + call H5DESCR_CDSET( dset_id, "pressure profile") + call HWRITEIV_LO( grpInputPhysics, "Ladiabatic" , 1, (/ Ladiabatic /),dset_id) + call H5DESCR_CDSET( dset_id, "adiabatic flag") + call HWRITERV_LO( grpInputPhysics, "adiabatic" , Mvol, adiabatic(1:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "adiabatic profile (?)") + call HWRITERV_LO( grpInputPhysics, "mu" , (1+Nvol), mu(1:Mvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "Beltrami parameter, parallel current profile") + call HWRITERV_LO( grpInputPhysics, "Ivolume" ,(1+Nvol), Ivolume(1:Mvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "Volume current, externally driven, parallel current profile") + call HWRITERV_LO( grpInputPhysics, "Isurf" , (Mvol ), Isurf(1:Mvol ) ,dset_id) + call H5DESCR_CDSET( dset_id, "Surface current, currents that are not volume currents (pressure driven, shielding currents)") + call HWRITEIV_LO( grpInputPhysics, "pl" ,(1+Mvol), pl(0:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "pl ?") + call HWRITEIV_LO( grpInputPhysics, "ql" , (1+Mvol), ql(0:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "ql ?") + call HWRITEIV_LO( grpInputPhysics, "pr" , (1+Mvol), pr(0:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "pr ?") + call HWRITEIV_LO( grpInputPhysics, "qr" , (1+Mvol), qr(0:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "qr ?") + call HWRITERV_LO( grpInputPhysics, "iota" , (1+Nvol), iota(0:Nvol) ,dset_id) + call H5DESCR_CDSET( dset_id, "rotational transform profile on inside of ideal interfaces") + call HWRITEIV_LO( grpInputPhysics, "lp" , (1+Mvol), lp(0:Nvol) , dset_id) + call H5DESCR_CDSET( dset_id, "lp ?") + call HWRITEIV_LO( grpInputPhysics, "lq" , (1+Mvol), lq(0:Nvol) , dset_id) + call H5DESCR_CDSET( dset_id, "lq ?") + call HWRITEIV_LO( grpInputPhysics, "rp" , (1+Mvol), rp(0:Nvol) , dset_id) + call H5DESCR_CDSET( dset_id, "rp ?") + call HWRITEIV_LO( grpInputPhysics, "rq" , (1+Mvol), rq(0:Nvol) , dset_id) + call H5DESCR_CDSET( dset_id, "rq ?") + call HWRITERV_LO( grpInputPhysics, "oita" , (1+Nvol), oita(0:Nvol) , dset_id) + call H5DESCR_CDSET( dset_id, "rotational transform profile on outside of ideal interfaces") + call HWRITERV_LO( grpInputPhysics, "rtor" , 1, (/ rtor /), dset_id) + call H5DESCR_CDSET( dset_id, "for aspect ratio in slab") + call HWRITERV_LO( grpInputPhysics, "rpol" , 1, (/ rpol /), dset_id) + call H5DESCR_CDSET( dset_id, "for aspect ratio in slab") + + call HWRITERV_LO( grpInputPhysics, "Rac" , (1+Ntor), Rac(0:Ntor) , dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric coordinate axis R cosine Fourier coefficients") + call HWRITERV_LO( grpInputPhysics, "Zas" , (1+Ntor), Zas(0:Ntor) , dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric coordinate axis Z sine Fourier coefficients") + call HWRITERV_LO( grpInputPhysics, "Ras" , (1+Ntor), Ras(0:Ntor) , dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric coordinate axis R sine Fourier coefficients") + call HWRITERV_LO( grpInputPhysics, "Zac" , (1+Ntor), Zac(0:Ntor) , dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric coordinate axis Z cosine Fourier coefficients") + + call HWRITERA_LO( grpInputPhysics, "Rbc", (2*Ntor+1), (2*Mpol+1), Rbc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric boundary R cosine Fourier coefficients") + call HWRITERA_LO( grpInputPhysics, "Zbs", (2*Ntor+1), (2*Mpol+1), Zbs(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric boundary Z sine Fourier coefficients") + call HWRITERA_LO( grpInputPhysics, "Rbs", (2*Ntor+1), (2*Mpol+1), Rbs(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric boundary R sine Fourier coefficients") + call HWRITERA_LO( grpInputPhysics, "Zbc", (2*Ntor+1), (2*Mpol+1), Zbc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric boundary Z cosine Fourier coefficients") + + call HWRITERA_LO( grpInputPhysics, "Rwc", (2*Ntor+1), (2*Mpol+1), Rwc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric boundary R cosine Fourier coefficients of wall") + call HWRITERA_LO( grpInputPhysics, "Zws", (2*Ntor+1), (2*Mpol+1), Zws(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric boundary Z sine Fourier coefficients of wall") + call HWRITERA_LO( grpInputPhysics, "Rws", (2*Ntor+1), (2*Mpol+1), Rws(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric boundary R sine Fourier coefficients of wall") + call HWRITERA_LO( grpInputPhysics, "Zwc", (2*Ntor+1), (2*Mpol+1), Zwc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric boundary Z cosine Fourier coefficients of wall") + + call HWRITERA_LO( grpInputPhysics, "Vns", (2*Ntor+1), (2*Mpol+1), Vns(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric normal field sine Fourier coefficients at boundary; vacuum component") + call HWRITERA_LO( grpInputPhysics, "Bns", (2*Ntor+1), (2*Mpol+1), Bns(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "stellarator symmetric normal field sine Fourier coefficients at boundary; plasma component") + call HWRITERA_LO( grpInputPhysics, "Vnc", (2*Ntor+1), (2*Mpol+1), Vnc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric normal field cosine Fourier coefficients at boundary; vacuum component") + call HWRITERA_LO( grpInputPhysics, "Bnc", (2*Ntor+1), (2*Mpol+1), Bnc(-Ntor:Ntor,-Mpol:Mpol), dset_id) + call H5DESCR_CDSET( dset_id, "non-stellarator symmetric normal field cosine Fourier coefficients at boundary; plasma component") + + call HWRITERV_LO( grpInputPhysics, "mupftol", 1, (/ mupftol /), dset_id) + call H5DESCR_CDSET( dset_id, "mupftol") + call HWRITEIV_LO( grpInputPhysics, "mupfits", 1, (/ mupfits /), dset_id) + call H5DESCR_CDSET( dset_id, "mupfits") + + call HCLOSEGRP( grpInputPhysics) + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +! the following variables constitute the namelist/numericlist/; note that all variables in namelist need to be broadcasted in readin; +! they go into ext.h5/input/numerics + + call HDEFGRP( grpInput, "numerics", grpInputNumerics) + + call HWRITEIV( grpInputNumerics, "Linitialize" , 1, (/ Linitialize /)) + call HWRITEIV( grpInputNumerics, "Lzerovac" , 1, (/ Lzerovac /)) + call HWRITEIV( grpInputNumerics, "Ndiscrete" , 1, (/ Ndiscrete /)) + call HWRITEIV( grpInputNumerics, "Nquad" , 1, (/ Nquad /)) + call HWRITEIV( grpInputNumerics, "iMpol" , 1, (/ iMpol /)) + call HWRITEIV( grpInputNumerics, "iNtor" , 1, (/ iNtor /)) + call HWRITEIV( grpInputNumerics, "Lsparse" , 1, (/ Lsparse /)) + call HWRITEIV( grpInputNumerics, "Lsvdiota" , 1, (/ Lsvdiota /)) + call HWRITEIV( grpInputNumerics, "imethod" , 1, (/ imethod /)) + call HWRITEIV( grpInputNumerics, "iorder" , 1, (/ iorder /)) + call HWRITEIV( grpInputNumerics, "iprecon" , 1, (/ iprecon /)) + call HWRITERV( grpInputNumerics, "iotatol" , 1, (/ iotatol /)) + call HWRITEIV( grpInputNumerics, "Lextrap" , 1, (/ Lextrap /)) + call HWRITEIV( grpInputNumerics, "Mregular" , 1, (/ Mregular /)) + call HWRITEIV( grpInputNumerics, "Lrzaxis" , 1, (/ Lrzaxis /)) + call HWRITEIV( grpInputNumerics, "Ntoraxis" , 1, (/ Ntoraxis /)) + + call HCLOSEGRP( grpInputNumerics) + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +! the following variables constitute the namelist/locallist/; note that all variables in namelist need to be broadcasted in readin; +! they go into ext.h5/input/local + + call HDEFGRP( grpInput, "local", grpInputLocal ) + + call HWRITEIV( grpInputLocal, "LBeltrami" , 1, (/ LBeltrami /)) + call HWRITEIV( grpInputLocal, "Linitgues" , 1, (/ Linitgues /)) + call HWRITEIV( grpInputLocal, "Lposdef" , 1, (/ Lposdef /)) ! redundant; + call HWRITERV( grpInputLocal, "maxrndgues" , 1, (/ maxrndgues /)) + call HWRITEIV( grpInputLocal, "Lmatsolver" , 1, (/ Lmatsolver /)) + call HWRITEIV( grpInputLocal, "LGMRESprec" , 1, (/ LGMRESprec /)) + call HWRITERV( grpInputLocal, "epsGMRES" , 1, (/ epsGMRES /)) + call HWRITERV( grpInputLocal, "epsILU" , 1, (/ epsILU /)) + + call HCLOSEGRP( grpInputLocal ) + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +! the following variables constitute the namelist/globallist/; note that all variables in namelist need to be broadcasted in readin; +! they go into ext.h5/input/global + + call HDEFGRP( grpInput, "global", grpInputGlobal ) + + call HWRITEIV( grpInputGlobal, "Lfindzero" , 1, (/ Lfindzero /)) + call HWRITERV( grpInputGlobal, "escale" , 1, (/ escale /)) + call HWRITERV( grpInputGlobal, "opsilon" , 1, (/ opsilon /)) + call HWRITERV( grpInputGlobal, "pcondense" , 1, (/ pcondense /)) + call HWRITERV( grpInputGlobal, "epsilon" , 1, (/ epsilon /)) + call HWRITERV( grpInputGlobal, "wpoloidal" , 1, (/ wpoloidal /)) + call HWRITERV( grpInputGlobal, "upsilon" , 1, (/ upsilon /)) + call HWRITERV( grpInputGlobal, "forcetol" , 1, (/ forcetol /)) + call HWRITERV( grpInputGlobal, "c05xmax" , 1, (/ c05xmax /)) + call HWRITERV( grpInputGlobal, "c05xtol" , 1, (/ c05xtol /)) + call HWRITERV( grpInputGlobal, "c05factor" , 1, (/ c05factor /)) + call HWRITELV( grpInputGlobal, "LreadGF" , 1, (/ LreadGF /)) + call HWRITEIV( grpInputGlobal, "mfreeits" , 1, (/ mfreeits /)) + call HWRITERV( grpInputGlobal, "bnstol" , 1, (/ bnstol /)) ! redundant; + call HWRITERV( grpInputGlobal, "bnsblend" , 1, (/ bnsblend /)) ! redundant; + call HWRITERV( grpInputGlobal, "gBntol" , 1, (/ gBntol /)) + call HWRITERV( grpInputGlobal, "gBnbld" , 1, (/ gBnbld /)) + call HWRITERV( grpInputGlobal, "vcasingeps" , 1, (/ vcasingeps /)) + call HWRITERV( grpInputGlobal, "vcasingtol" , 1, (/ vcasingtol /)) + call HWRITEIV( grpInputGlobal, "vcasingits" , 1, (/ vcasingits /)) + call HWRITEIV( grpInputGlobal, "vcasingper" , 1, (/ vcasingper /)) + call HWRITEIV( grpInputGlobal, "mcasingcal" , 1, (/ mcasingcal /)) ! redundant; + + call HCLOSEGRP( grpInputGlobal ) + +!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! + +! the following variables constitute the namelist/diagnosticslist/; note that all variables in namelist need to be broadcasted in readin; +! they go into ext.h5/input/diagnostics + + call HDEFGRP( grpInput, "diagnostics", grpInputDiagnostics ) + + call HWRITERV( grpInputDiagnostics, "odetol" , 1, (/ odetol /)) + call HWRITERV( grpInputDiagnostics, "absreq" , 1, (/ absreq /)) ! redundant; + call HWRITERV( grpInputDiagnostics, "relreq" , 1, (/ relreq /)) ! redundant; + call HWRITERV( grpInputDiagnostics, "absacc" , 1, (/ absacc /)) ! redundant; + call HWRITERV( grpInputDiagnostics, "epsr" , 1, (/ epsr /)) ! redundant; + call HWRITEIV( grpInputDiagnostics, "nPpts" , 1, (/ nPpts /)) + call HWRITERV( grpInputDiagnostics, "Ppts" , 1, (/ Ppts /)) + call HWRITEIV( grpInputDiagnostics, "nPtrj" , Mvol, nPtrj(1:Mvol) ) + call HWRITELV( grpInputDiagnostics, "LHevalues" , 1, (/ LHevalues /)) + call HWRITELV( grpInputDiagnostics, "LHevectors" , 1, (/ LHevectors /)) + call HWRITELV( grpInputDiagnostics, "LHmatrix" , 1, (/ LHmatrix /)) + call HWRITEIV( grpInputDiagnostics, "Lperturbed" , 1, (/ Lperturbed /)) + call HWRITEIV( grpInputDiagnostics, "dpp" , 1, (/ dpp /)) + call HWRITEIV( grpInputDiagnostics, "dqq" , 1, (/ dqq /)) + call HWRITEIV( grpInputDiagnostics, "Lcheck" , 1, (/ Lcheck /)) + call HWRITELV( grpInputDiagnostics, "Ltiming" , 1, (/ Ltiming /)) + call HWRITEIV( grpInputDiagnostics, "Lerrortype" , 1, (/ Lerrortype /)) + call HWRITEIV( grpInputDiagnostics, "Ngrid" , 1, (/ Ngrid /)) + call HWRITERV( grpInputDiagnostics, "fudge" , 1, (/ fudge /)) ! redundant; + call HWRITERV( grpInputDiagnostics, "scaling" , 1, (/ scaling /)) ! redundant; + + call HCLOSEGRP( grpInputDiagnostics ) + + call HCLOSEGRP( grpInput ) + + endif ! myid.eq.0 + +end subroutine mirror_input_to_outfile + +!> \brief Prepare convergence evolution output. +!> \ingroup grp_output +!> +!>
      +!>
    • The group \c iterations is created in the output file. +!> This group contains the interface geometry at each iteration, which is useful for constructing movies illustrating the convergence. +!> The data structure in use is an unlimited array of the following compound datatype: +!> ```C +!> DATATYPE H5T_COMPOUND { +!> H5T_NATIVE_INTEGER "nDcalls"; +!> H5T_NATIVE_DOUBLE "Energy"; +!> H5T_NATIVE_DOUBLE "ForceErr"; +!> H5T_ARRAY { [Mvol+1][mn] H5T_NATIVE_DOUBLE } "iRbc"; +!> H5T_ARRAY { [Mvol+1][mn] H5T_NATIVE_DOUBLE } "iZbs"; +!> H5T_ARRAY { [Mvol+1][mn] H5T_NATIVE_DOUBLE } "iRbs"; +!> H5T_ARRAY { [Mvol+1][mn] H5T_NATIVE_DOUBLE } "iZbc"; +!> } +!> ``` +!>
    • +!>
    +subroutine init_convergence_output + + use allglobal, only : mn, Mvol + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer(hid_t) :: iteration_dspace_id !< dataspace for "iteration" + integer(hid_t) :: iteration_dtype_id !< Compound datatype for "iteration" + integer(hid_t) :: iRZbscArray_id !< Memory datatype identifier + integer(size_t) :: iteration_dtype_size !< Size of the "iteration" datatype + integer(size_t) :: type_size_i !< Size of the integer datatype + integer(size_t) :: type_size_d !< Size of the double precision datatype + integer(size_t) :: offset !< Member's offset + integer(hid_t) :: crp_list !< Dataset creation property identifier + integer, parameter :: rank = 1 !< logging rank: convergence logging is one-dimensional + integer(hsize_t), dimension(rank) :: maxdims !< convergence logging maximum dimensions => will be unlimited + integer(hsize_t), dimension(rank) :: dims = (/ 0 /) !< current convergence logging dimensions + integer(hsize_t), dimension(rank) :: dimsc = (/ 1 /) !< chunking length ??? + integer(size_t) :: irbc_size_template !< size ofiRbc array in iterations logging + integer(size_t) :: irbc_size !< size ofiRbc array in iterations logging + + integer :: hdfier !< error flag for HDF5 library + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + ! Set dataset transfer property to preserve partially initialized fields + ! during write/read to/from dataset with compound datatype. + call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdfier) + + call h5pset_preserve_f(plist_id, .TRUE., hdfier) + + maxdims = (/ H5S_UNLIMITED_F /) ! unlimited array size: "converge" until you get bored + call h5screate_simple_f(rank, dims, iteration_dspace_id, hdfier, maxdims) ! Create the dataspace with zero initial size and allow it to grow + + call h5pcreate_f(H5P_DATASET_CREATE_F, crp_list, hdfier) ! dataset creation property list with chunking + + call h5pset_chunk_f(crp_list, rank, dimsc, hdfier) + + ! declare "iteration" compound datatype + ! declare array parts + call h5tarray_create_f(H5T_NATIVE_DOUBLE, 2, int((/mn, Mvol+1/),hsize_t), iRZbscArray_id, hdfier) ! create array datatypes for i{R,Z}b{c,s} + call h5tget_size_f(iRZbscArray_id, irbc_size, hdfier) + call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_i, hdfier) ! size of an integer field + call h5tget_size_f(H5T_NATIVE_DOUBLE, type_size_d, hdfier) ! size of a double field + iteration_dtype_size = 2*type_size_i + 2*type_size_d + 4*irbc_size ! wflag, nDcalls, Energy, ForceErr, i{R,Z}b{c,s} + + call h5tcreate_f(H5T_COMPOUND_F, iteration_dtype_size, iteration_dtype_id, hdfier) ! create compound datatype + + offset = 0 ! offset for first field starts at 0 + + call h5tinsert_f(iteration_dtype_id, "nDcalls", offset, H5T_NATIVE_INTEGER, hdfier) ! insert "nDcalls" field in datatype + offset = offset + type_size_i ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "Energy", offset, H5T_NATIVE_DOUBLE, hdfier) ! insert "Energy" field in datatype + offset = offset + type_size_d ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "ForceErr", offset, H5T_NATIVE_DOUBLE, hdfier) ! insert "ForceErr" field in datatype + offset = offset + type_size_d ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "iRbc", offset, iRZbscArray_id, hdfier) ! insert "iRbc" field in datatype + offset = offset + irbc_size ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "iZbs", offset, iRZbscArray_id, hdfier) ! insert "iZbs" field in datatype + offset = offset + irbc_size ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "iRbs", offset, iRZbscArray_id, hdfier) ! insert "iRbs" field in datatype + offset = offset + irbc_size ! increment offset by size of field + + call h5tinsert_f(iteration_dtype_id, "iZbc", offset, iRZbscArray_id, hdfier) ! insert "iZbc" field in datatype + offset = offset + irbc_size ! increment offset by size of field + + call h5dcreate_f(file_id, "iterations", iteration_dtype_id, iteration_dspace_id, & ! create dataset with compound type + & iteration_dset_id, hdfier, crp_list) + + call h5sclose_f(iteration_dspace_id, hdfier) ! Terminate access to the data space (does not show up in obj_count below) + ! --> only needed for creation of dataset + + ! Create memory types. We have to create a compound datatype + ! for each member we want to write. + call h5tcreate_f(H5T_COMPOUND_F, type_size_i, dt_nDcalls_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, type_size_d, dt_Energy_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, type_size_d, dt_ForceErr_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, irbc_size, dt_iRbc_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, irbc_size, dt_iZbs_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, irbc_size, dt_iRbs_id, hdfier) + call h5tcreate_f(H5T_COMPOUND_F, irbc_size, dt_iZbc_id, hdfier) + + offset = 0 + call h5tinsert_f(dt_nDcalls_id, "nDcalls", offset, H5T_NATIVE_INTEGER, hdfier) + call h5tinsert_f(dt_Energy_id, "Energy", offset, H5T_NATIVE_DOUBLE, hdfier) + call h5tinsert_f(dt_ForceErr_id, "ForceErr", offset, H5T_NATIVE_DOUBLE, hdfier) + call h5tinsert_f(dt_iRbc_id, "iRbc", offset, iRZbscArray_id, hdfier) + call h5tinsert_f(dt_iZbs_id, "iZbs", offset, iRZbscArray_id, hdfier) + call h5tinsert_f(dt_iRbs_id, "iRbs", offset, iRZbscArray_id, hdfier) + call h5tinsert_f(dt_iZbc_id, "iZbc", offset, iRZbscArray_id, hdfier) + + ! create memspace with size of compound object to append + dims(1) = 1 ! only append one iteration at a time + call h5screate_simple_f(rank, dims, memspace, hdfier) + + call h5pclose_f(crp_list, hdfier) + call h5tclose_f(iteration_dtype_id, hdfier) ! Terminate access to the datatype + call h5tclose_f(iRZbscArray_id, hdfier) ! Terminate access to the datatype + + endif ! myid.eq.0 + +end subroutine init_convergence_output + + +!> \brief Write convergence output (evolution of interface geometry, force, etc). +!> \ingroup grp_output +!> +subroutine write_convergence_output( nDcalls, ForceErr ) + + use allglobal, only : myid, mn, Mvol, Energy, iRbc, iZbs, iRbs, iZbc, MPI_COMM_SPEC + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer :: hdfier !< error flag for HDF5 library + + integer, intent(in) :: nDcalls + real(wp) , intent(in) :: ForceErr + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + ! append updated values to "iterations" dataset + + ! open dataspace to get current state of dataset + call h5dget_space_f(iteration_dset_id, dataspace, hdfier) + + ! get current size of dataset + call h5sget_simple_extent_dims_f(dataspace, old_data_dims, max_dims, hdfier) + + if( hdfier.ne.1 ) then + write(6,'("sphdf5 : fatal : myid=",i3," ; hdfier.ne.1 ; rank of convergence dataspace is not 1 ;")') myid +! call MPI_ABORT(MPI_COMM_SPEC, 1) ! TODO: get this to compile again... + stop "sphdf5 : hdfier.ne.1 : rank of convergence dataspace is not 1 ;" + endif + + + ! blow up dataset to new size + data_dims = old_data_dims+1 + call h5dset_extent_f(iteration_dset_id, data_dims, hdfier) + + ! get dataspace slab corresponding to region which the iterations dataset was extended by + call h5dget_space_f(iteration_dset_id, dataspace, hdfier) ! re-select dataspace to update size info in HDF5 lib + call h5sselect_hyperslab_f(dataspace, H5S_SELECT_SET_F, old_data_dims, (/ INT(1, HSIZE_T) /), hdfier) ! newly appended slab is at old size and 1 long + + ! write next iteration object + call h5dwrite_f(iteration_dset_id, dt_nDcalls_id, nDcalls, INT((/1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_Energy_id, Energy, INT((/1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_ForceErr_id, ForceErr, INT((/1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_iRbc_id, iRbc, INT((/mn,Mvol+1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_iZbs_id, iZbs, INT((/mn,Mvol+1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_iRbs_id, iRbs, INT((/mn,Mvol+1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + call h5dwrite_f(iteration_dset_id, dt_iZbc_id, iZbc, INT((/mn,Mvol+1/), HSIZE_T), hdfier, & + mem_space_id=memspace, file_space_id=dataspace, xfer_prp=plist_id) + + ! dataspace to appended object should be closed now + ! MAYBE we otherwise keep all the iterations in memory? + call h5sclose_f(dataspace, hdfier) + + endif ! myid.eq.0 + +end subroutine write_convergence_output + +!> \brief Write the magnetic field on a grid. +!> \ingroup grp_output +!> +!> The magnetic field is evaluated on a regular grid in \f$(s, \theta, \zeta)\f$ +!> and the corresponding cylindrical coordinates \f$(R,Z)\f$ +!> as well as the cylindrical components of the magnetic field \f$(B^R, B^\varphi, B^Z)\f$ +!> are written out. +subroutine write_grid + + use constants + use allglobal, only : myid, ijreal, ijimag, jireal, & + & Nt, Nz, Ntz, Mvol, pi2nfp, ivol, mn, Node, gBzeta, & + & Lcoordinatesingularity, Lplasmaregion, Lvacuumregion, & + & Rij, Zij, sg + use inputlist, only : Lrad, Igeometry, Nvol, Ngrid, rtor, rpol + use cputiming, only : Tsphdf5 + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer(hid_t) :: grpGrid + integer :: sumLrad, alongLrad, Ngrid_local, Ngrid_sum + integer :: vvol, ii, jj, kk, jk, Lcurvature + real(wp) :: lss, teta, zeta, st(1:Node), Bst(1:Node) + real(wp) , allocatable :: Rij_grid(:,:), Zij_grid(:,:), sg_grid(:,:), ijreal_grid(:,:), ijimag_grid(:,:), jireal_grid(:,:) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + if (myid.eq.0 .and. .not.skip_write) then + + ijreal(1:Ntz) = zero + ijimag(1:Ntz) = zero + jireal(1:Ntz) = zero + + call HDEFGRP( file_id, "grid", grpGrid ) + + ! Igeometry already is in input, Mvol already is in output + call HWRITEIV( grpGrid, "Nt" , 1, (/ Nt /)) + call HWRITEIV( grpGrid, "Nz" , 1, (/ Nz /)) + call HWRITEIV( grpGrid, "Ntz" , 1, (/ Ntz /)) + call HWRITERV( grpGrid, "pi2nfp" , 1, (/ pi2nfp /)) + + ! combine all radial parts into one dimension as Lrad values can be different for different volumes + if (Ngrid .lt. 0) then + sumLrad = sum(Lrad(1:Mvol)+1) + else + sumLrad = (Ngrid + 1) * Mvol + endif + + + allocate( Rij_grid(1:sumLrad, 1:Ntz), stat=astat ) + Rij_grid(1:sumLrad, 1:Ntz) = zero + + allocate( Zij_grid(1:sumLrad, 1:Ntz), stat=astat ) + Zij_grid(1:sumLrad, 1:Ntz) = zero + + allocate( sg_grid(1:sumLrad, 1:Ntz), stat=astat ) + sg_grid(1:sumLrad, 1:Ntz) = zero + + allocate( ijreal_grid(1:sumLrad, 1:Ntz), stat=astat ) + ijreal_grid(1:sumLrad, 1:Ntz) = zero + + allocate( ijimag_grid(1:sumLrad, 1:Ntz), stat=astat ) + ijimag_grid(1:sumLrad, 1:Ntz) = zero + + allocate( jireal_grid(1:sumLrad, 1:Ntz), stat=astat ) + jireal_grid(1:sumLrad, 1:Ntz) = zero + + Ngrid_sum = 0 + + do vvol = 1, Mvol + ivol = vvol + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then + Lcoordinatesingularity = .false. + else + Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then + Lplasmaregion = .true. + else + Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + ! sets Lcoordinatesingularity and Lplasmaregion ; + + if (Ngrid .lt. 0) then + Ngrid_local = Lrad(vvol) ! default + else + Ngrid_local = Ngrid + endif + if (Ngrid_local .eq. 0) cycle ! nothing to output + + do ii = 0, Ngrid_local ! sub-grid; + lss = ii * two / Ngrid_local - one + if( Lcoordinatesingularity .and. ii.eq.0 ) then + Lcurvature = 0 ! Jacobian is not defined; + else + Lcurvature = 1 ! compute Jacobian ; + endif + + cput = MPI_WTIME() + Tsphdf5 = Tsphdf5 + ( cput-cpuo ) + call coords( vvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! only Rij(0,:) and Zij(0,:) are required; Rmn & Zmn are available; + + alongLrad = Ngrid_sum+ii+1 + + Rij_grid(alongLrad,1:Ntz) = Rij(1:Ntz,0,0) + Zij_grid(alongLrad,1:Ntz) = Zij(1:Ntz,0,0) + sg_grid (alongLrad,1:Ntz) = sg(1:Ntz,0) + + if( Lcurvature.eq.1 ) then + + select case (Igeometry) + + case (3) + do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz + do jj = 0, Nt-1 ; teta = jj * pi2 / Nt ; jk = 1 + jj + kk*Nt ; st(1:2) = (/ lss, teta /) + + cput = MPI_WTIME() + Tsphdf5 = Tsphdf5 + ( cput-cpuo ) + call bfield( zeta, st(1:Node), Bst(1:Node) ) + cpuo = MPI_WTIME() + + ijreal(jk) = ( Rij(jk,1,0) * Bst(1) + Rij(jk,2,0) * Bst(2) + Rij(jk,3,0) * one ) * gBzeta / sg(jk,0) ! BR; + ijimag(jk) = ( one ) * gBzeta / sg(jk,0) ! Bp; + jireal(jk) = ( Zij(jk,1,0) * Bst(1) + Zij(jk,2,0) * Bst(2) + Zij(jk,3,0) * one ) * gBzeta / sg(jk,0) ! BZ; + enddo + enddo + + case (1) + do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz + do jj = 0, Nt-1 ; teta = jj * pi2 / Nt ; jk = 1 + jj + kk*Nt ; st(1:2) = (/ lss, teta /) + + cput = MPI_WTIME() + Tsphdf5 = Tsphdf5 + ( cput-cpuo ) + call bfield( zeta, st(1:Node), Bst(1:Node) ) + cpuo = MPI_WTIME() + + ijreal(jk) = ( Rij(jk,1,0) * Bst(1) + Rij(jk,2,0) * Bst(2) + Rij(jk,3,0) * one ) * gBzeta / sg(jk,0) ! BR; + ijimag(jk) = ( rpol ) * gBzeta / sg(jk,0) ! Bzeta; + jireal(jk) = ( + rtor * Bst(2) ) * gBzeta / sg(jk,0) ! Btheta; + enddo + enddo + + case (2) + do kk = 0, Nz-1 ; zeta = kk * pi2nfp / Nz + do jj = 0, Nt-1 ; teta = jj * pi2 / Nt ; jk = 1 + jj + kk*Nt ; st(1:2) = (/ lss, teta /) + + cput = MPI_WTIME() + Tsphdf5 = Tsphdf5 + ( cput-cpuo ) + call bfield( zeta, st(1:Node), Bst(1:Node) ) + cpuo = MPI_WTIME() + + ijreal(jk) = ( Rij(jk,1,0) * Bst(1) + Rij(jk,2,0) * Bst(2) + Rij(jk,3,0) * one ) * gBzeta / sg(jk,0) ! BR; + ijimag(jk) = ( one ) * gBzeta / sg(jk,0) ! Bp; + jireal(jk) = ( Bst(2) ) * gBzeta / sg(jk,0) ! BZ; + enddo + enddo + + end select !Igeometry + endif ! end of if( Lcurvature.eq.1 ) ; + + ijreal_grid(alongLrad,1:Ntz) = ijreal(1:Ntz) + ijimag_grid(alongLrad,1:Ntz) = ijimag(1:Ntz) + jireal_grid(alongLrad,1:Ntz) = jireal(1:Ntz) + + enddo ! end of do ii; + + Ngrid_sum = Ngrid_sum + Ngrid_local + 1 ! offset for storing data + + enddo ! end of do vvol; + + call HWRITERA( grpGrid, "Rij", sumLrad, Ntz, Rij_grid ) + call HWRITERA( grpGrid, "Zij", sumLrad, Ntz, Zij_grid ) + call HWRITERA( grpGrid, "sg", sumLrad, Ntz, sg_grid ) + call HWRITERA( grpGrid, "BR", sumLrad, Ntz, ijreal_grid ) + call HWRITERA( grpGrid, "Bp", sumLrad, Ntz, ijimag_grid ) + call HWRITERA( grpGrid, "BZ", sumLrad, Ntz, jireal_grid ) + + deallocate(Rij_grid ,stat=astat) + deallocate(Zij_grid ,stat=astat) + deallocate(sg_grid ,stat=astat) + deallocate(ijreal_grid ,stat=astat) + deallocate(ijimag_grid ,stat=astat) + deallocate(jireal_grid ,stat=astat) + + call HCLOSEGRP( grpGrid ) + + endif ! myid.eq.0 +end subroutine ! write_grid + +!> \brief Initialize field line tracing output group and create array datasets. +!> \ingroup grp_output +!> +!> The field-line tracing diagnostic is parallelized over volumes, +!> where all threads/ranks produce individual output. +!> This is gathered in the output file, stacked over the radial dimension. +!> The \c success flag signals if the integrator was successful in following +!> the fieldline for the derired number of toroidal periods. +!> +!> @param[in] numTrajTotal total number of Poincare trajectories +subroutine init_flt_output( numTrajTotal ) + + use allglobal, only : Nz, Mvol, lmns + use inputlist, only : nPpts + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer, intent(in) :: numTrajTotal ! total number of trajectories + integer(HSIZE_T), dimension(rankP) :: dims_traj ! Dataset dimensions. + integer(HSIZE_T), dimension(rankP) :: length ! Dataset dimensions. + + integer :: hdfier !< error flag for HDF5 library + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + if (myid.eq.0 .and. .not.skip_write) then + + ! create Poincare group in HDF5 file + call HDEFGRP( file_id, "poincare", grpPoincare ) + + dims_traj = (/ Nz, nPpts, numTrajTotal /) ! dimensions for whole Poincare dataset + length = (/ Nz, nPpts, 1 /) ! which is written in these slice lengths + + ! Create the data space for the dataset. + call h5screate_simple_f(rankP, dims_traj, filespace_t, hdfier) + call h5screate_simple_f(rankP, dims_traj, filespace_s, hdfier) + call h5screate_simple_f(rankP, dims_traj, filespace_R, hdfier) + call h5screate_simple_f(rankP, dims_traj, filespace_Z, hdfier) + call h5screate_simple_f(1, int((/ numTrajTotal /),HSIZE_T), filespace_success, hdfier) + + ! Create the dataset with default properties. + call h5dcreate_f(grpPoincare, "t", H5T_NATIVE_DOUBLE, filespace_t, dset_id_t, hdfier) + call h5dcreate_f(grpPoincare, "s", H5T_NATIVE_DOUBLE, filespace_s, dset_id_s, hdfier) + call h5dcreate_f(grpPoincare, "R", H5T_NATIVE_DOUBLE, filespace_R, dset_id_R, hdfier) + call h5dcreate_f(grpPoincare, "Z", H5T_NATIVE_DOUBLE, filespace_Z, dset_id_Z, hdfier) + call h5dcreate_f(grpPoincare, "success", H5T_NATIVE_INTEGER, filespace_success, dset_id_success, hdfier) + + ! filespaces can be closed as soon as datasets are created + call h5sclose_f(filespace_t, hdfier) + call h5sclose_f(filespace_s, hdfier) + call h5sclose_f(filespace_R, hdfier) + call h5sclose_f(filespace_Z, hdfier) + call h5sclose_f(filespace_success, hdfier) + + ! Select hyperslab in the file. + call h5dget_space_f(dset_id_t, filespace_t, hdfier) + call h5dget_space_f(dset_id_s, filespace_s, hdfier) + call h5dget_space_f(dset_id_R, filespace_R, hdfier) + call h5dget_space_f(dset_id_Z, filespace_Z, hdfier) + call h5dget_space_f(dset_id_success, filespace_success, hdfier) + + ! Each process defines dataset in memory and writes it to the hyperslab in the file. + call h5screate_simple_f(rankP, length, memspace_t, hdfier) + call h5screate_simple_f(rankP, length, memspace_s, hdfier) + call h5screate_simple_f(rankP, length, memspace_R, hdfier) + call h5screate_simple_f(rankP, length, memspace_Z, hdfier) + call h5screate_simple_f(1, int((/ 1 /),HSIZE_T), memspace_success, hdfier) + + ! create rotational transform group in HDF5 file + call HDEFGRP( file_id, "transform", grpTransform ) + + ! Create the data space for the dataset. + call h5screate_simple_f(rankT, int((/ 2,Mvol/),HSIZE_T), filespace_diotadxup, hdfier) + call h5screate_simple_f(rankT, int((/numTrajTotal, 2/),HSIZE_T), filespace_fiota , hdfier) + + ! Create the dataset with default properties. + call h5dcreate_f(grpTransform, "diotadxup", H5T_NATIVE_DOUBLE, filespace_diotadxup, dset_id_diotadxup, hdfier) + call h5dcreate_f(grpTransform, "fiota", H5T_NATIVE_DOUBLE, filespace_fiota , dset_id_fiota , hdfier) + + ! filespaces can be closed as soon as datasets are created + call h5sclose_f(filespace_diotadxup, hdfier) + call h5sclose_f(filespace_fiota , hdfier) + + ! Select hyperslab in the file. + call h5dget_space_f(dset_id_diotadxup, filespace_diotadxup, hdfier) + call h5dget_space_f(dset_id_fiota , filespace_fiota , hdfier) + + ! Each process defines dataset in memory and writes it to the hyperslab in the file. + call h5screate_simple_f(rankT, int((/2,1/),HSIZE_T), memspace_diotadxup, hdfier) + + endif ! myid.eq.0 + +end subroutine init_flt_output + +!> \brief Write a hyperslab of Poincare data corresponding to the output of one parallel worker. +!> \ingroup grp_output +!> +!> @param offset radial offset at which the data belongs +!> @param data output from field-line tracing +!> @param success flags to indicate if integrator was successful +subroutine write_poincare( offset, data, success ) + + use allglobal, only : Nz + use inputlist, only : nPpts + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + integer :: hdfier !< error flag for HDF5 library + + integer, intent(in) :: offset, success(:) + real(wp), intent(in) :: data(:,:,:) + integer(hsize_t), dimension(3) :: length + integer(HSIZE_T), dimension(2) :: dims_singleTraj ! dimensions of single trajectory data + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + dims_singleTraj = (/ Nz, nPpts /) + length = (/ Nz, nPpts, 1 /) + + ! On entry, Fortran does not know that indexing in data is from 0 to Nz-1. + ! Hence, use default indices 1:Nz in this routine + call h5sselect_hyperslab_f(filespace_t, H5S_SELECT_SET_F, int((/0,0,offset/),HSSIZE_T), length, hdfier) + call h5dwrite_f(dset_id_t, H5T_NATIVE_DOUBLE, data(1,1:Nz,1:nPpts), dims_singleTraj, hdfier, & + file_space_id=filespace_t, mem_space_id=memspace_t ) + + call h5sselect_hyperslab_f(filespace_s, H5S_SELECT_SET_F, int((/0,0,offset/),HSSIZE_T), length, hdfier) + call h5dwrite_f(dset_id_s, H5T_NATIVE_DOUBLE, data(2,1:Nz,1:nPpts), dims_singleTraj, hdfier, & + file_space_id=filespace_s, mem_space_id=memspace_s ) + + call h5sselect_hyperslab_f(filespace_R, H5S_SELECT_SET_F, int((/0,0,offset/),HSSIZE_T), length, hdfier) + call h5dwrite_f(dset_id_R, H5T_NATIVE_DOUBLE, data(3,1:Nz,1:nPpts), dims_singleTraj, hdfier, & + file_space_id=filespace_R, mem_space_id=memspace_R ) + + call h5sselect_hyperslab_f(filespace_Z, H5S_SELECT_SET_F, int((/0,0,offset/),HSSIZE_T), length, hdfier) + call h5dwrite_f(dset_id_Z, H5T_NATIVE_DOUBLE, data(4,1:Nz,1:nPpts), dims_singleTraj, hdfier, & + file_space_id=filespace_Z, mem_space_id=memspace_Z ) + + call h5sselect_hyperslab_f(filespace_success, H5S_SELECT_SET_F, int((/offset/),HSSIZE_T), int((/1/), HSIZE_T), hdfier) + call h5dwrite_f(dset_id_success, H5T_NATIVE_INTEGER, success, int((/1/), HSIZE_T), hdfier, & + file_space_id=filespace_success, mem_space_id=memspace_success ) + + endif ! myid.eq.0 + +end subroutine write_poincare + +!> \brief Write the rotational transform output from field line following. +!> \ingroup grp_output +!> +!> @param offset radial offset at which the data belongs +!> @param length length of dataset to write +!> @param lvol nested volume index +!> @param diotadxup derivative of rotational transform (?) +!> @param fiota rotational transform +subroutine write_transform( offset, length, lvol, diotadxup, fiota ) + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer, intent(in) :: offset, length, lvol + real(wp), intent(in) :: diotadxup(:), fiota(:,:) + integer :: hdfier !< error flag for HDF5 library + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + call h5sselect_hyperslab_f(filespace_diotadxup, H5S_SELECT_SET_F, int((/0,lvol-1/),HSSIZE_T), int((/2,1/),HSSIZE_T), hdfier) + call h5dwrite_f(dset_id_diotadxup, H5T_NATIVE_DOUBLE, diotadxup, int((/2,1/),HSSIZE_T), hdfier, & + file_space_id=filespace_diotadxup, mem_space_id=memspace_diotadxup ) + + ! length of fiota piece to write here may change, so open and close memspace each time a new hyperslab is written + call h5screate_simple_f(rankT, int((/length,2/),HSIZE_T), memspace_fiota , hdfier) + + call h5sselect_hyperslab_f(filespace_fiota, H5S_SELECT_SET_F, int((/offset,0/),HSSIZE_T), int((/length,2/),HSSIZE_T), hdfier) + call h5dwrite_f(dset_id_fiota, H5T_NATIVE_DOUBLE, fiota(1:length,1:2), int((/length,2/),HSSIZE_T), hdfier, & + file_space_id=filespace_fiota, mem_space_id=memspace_fiota ) + + call h5sclose_f(memspace_fiota, hdfier) + + endif ! myid.eq.0 + +end subroutine write_transform + +!> \brief Finalize Poincare output. +!> \ingroup grp_output +!> +!> This closes the still-open datasets related to field-line tracing, +!> which had to be kept open during the tracing to be able to write +!> the outputs directly when a given worker thread is finished. +subroutine finalize_flt_output + use allglobal, only: myid, skip_write + integer :: hdfier !< error flag for HDF5 library + + if (myid.eq.0 .and. .not.skip_write) then + + ! close filespaces + call h5sclose_f(filespace_t, hdfier) + call h5sclose_f(filespace_s, hdfier) + call h5sclose_f(filespace_R, hdfier) + call h5sclose_f(filespace_Z, hdfier) + call h5sclose_f(filespace_success, hdfier) + call h5sclose_f(filespace_diotadxup, hdfier) + call h5sclose_f(filespace_fiota, hdfier) + + ! close dataspaces + call h5sclose_f(memspace_t, hdfier) + call h5sclose_f(memspace_s, hdfier) + call h5sclose_f(memspace_R, hdfier) + call h5sclose_f(memspace_Z, hdfier) + call h5sclose_f(memspace_success, hdfier) + call h5sclose_f(memspace_diotadxup, hdfier) + ! memspace_fiota is re-opened/closed in each iteration (see write_transform) + + ! close datasets + call h5dclose_f(dset_id_t, hdfier) + call h5dclose_f(dset_id_s, hdfier) + call h5dclose_f(dset_id_R, hdfier) + call h5dclose_f(dset_id_Z, hdfier) + call h5dclose_f(dset_id_success, hdfier) + call h5dclose_f(dset_id_diotadxup, hdfier) + call h5dclose_f(dset_id_fiota, hdfier) + + ! close groups + call HCLOSEGRP( grpPoincare ) + call HCLOSEGRP( grpTransform ) + + endif ! myid.eq.0 + +end subroutine finalize_flt_output + +!> \brief Write the magnetic vector potential Fourier harmonics to the output file group \c /vector_potential . +!> \ingroup grp_output +!> +!> The data is stacked in the radial direction over \c Lrad , +!> since \c Lrad can be different in each volume, but HDF5 only supports +!> rectangular arrays. So, one needs to split the \c sumLrad dimension +!> into chunks given by the input \c Lrad array. +!> +!> @param sumLrad total sum over \c Lrad in all nested volumes +!> @param allAte \f$A^{\theta}_\mathrm{even}\f$ for all nested volumes +!> @param allAze \f$A^{\zeta}_\mathrm{even}\f$ for all nested volumes +!> @param allAto \f$A^{\theta}_\mathrm{odd}\f$ for all nested volumes +!> @param allAzo \f$A^{\zeta}_\mathrm{odd}\f$ for all nested volumes +subroutine write_vector_potential(sumLrad, allAte, allAze, allAto, allAzo) + use allglobal, only : mn, myid, skip_write + + integer, intent(in) :: sumLrad + real(wp), dimension(:,:), intent(in) :: allAte + real(wp), dimension(:,:), intent(in) :: allAze + real(wp), dimension(:,:), intent(in) :: allAto + real(wp), dimension(:,:), intent(in) :: allAzo + + integer(hid_t) :: grpVectorPotential + + if (myid.eq.0 .and. .not.skip_write) then + + call HDEFGRP( file_id, "vector_potential", grpVectorPotential ) + + call HWRITERA( grpVectorPotential, "Ate", sumLrad, mn, allAte(1:sumLrad,1:mn) ) + call HWRITERA( grpVectorPotential, "Aze", sumLrad, mn, allAze(1:sumLrad,1:mn) ) + call HWRITERA( grpVectorPotential, "Ato", sumLrad, mn, allAto(1:sumLrad,1:mn) ) + call HWRITERA( grpVectorPotential, "Azo", sumLrad, mn, allAzo(1:sumLrad,1:mn) ) + + call HCLOSEGRP( grpVectorPotential ) + + endif ! myid.eq.0 + +end subroutine write_vector_potential + +!> \brief Write the final state of the equilibrium to the output file. +!> \ingroup grp_output +!> +subroutine hdfint + + use fileunits, only : ounit + use inputlist + use allglobal, only : ncpu, cpus, & + Mvol, ForceErr, & + mn, im, in, iRbc, iZbs, iRbs, iZbc, & + mns, ims, ins, & + dRbc, dZbs, dRbs, dZbc, & + vvolume, dvolume, & + Bsupumn, Bsupvmn, & + Btemn, Bzemn, Btomn, Bzomn, & + iVns, iBns, iVnc, iBnc, & + lmns, & + TT, & + beltramierror, & + IPDt, dlambdaout, lmns + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: Mrad + real(wp) :: tvolume + + integer(hid_t) :: grpOutput + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + call HDEFGRP( file_id, "output", grpOutput ) + + call HWRITERV( grpOutput, "Vns", mn, iVns(1:mn) ) ! stellarator symmetric normal field at boundary; vacuum component; + call HWRITERV( grpOutput, "Bns", mn, iBns(1:mn) ) ! stellarator symmetric normal field at boundary; plasma component; + call HWRITERV( grpOutput, "Vnc", mn, iVnc(1:mn) ) ! non-stellarator symmetric normal field at boundary; vacuum component; + call HWRITERV( grpOutput, "Bnc", mn, iBnc(1:mn) ) ! non-stellarator symmetric normal field at boundary; plasma component; + +!>
      +!>
    • In addition to the input variables, which are described in global(), the following quantities are written to \c ext.sp.h5 : +!latex +!latex \begin{tabular}{|l|l|l|} \hline + +!latex \type{variable} & type & \pb{description} \hline + +!latex \type{mn} & integer & \pb{number of Fourier modes} + call HWRITEIV( grpOutput, "mn", 1, (/ mn /) ) +!latex \type{im(1:mn)} & integer & \pb{poloidal mode numbers} + call HWRITEIV( grpOutput, "im", mn, im(1:mn) ) +!latex \type{in(1:mn)} & integer & \pb{toroidal mode numbers} + call HWRITEIV( grpOutput, "in", mn, in(1:mn) ) +!latex \type{mns} & integer & \pb{number of Fourier modes} + call HWRITEIV( grpOutput, "mns", 1, (/ mns /) ) +!latex \type{ims(1:mns)} & integer & \pb{poloidal mode numbers} + call HWRITEIV( grpOutput, "ims", mns, ims(1:mns) ) +!latex \type{ins(1:mns)} & integer & \pb{toroidal mode numbers} + call HWRITEIV( grpOutput, "ins", mns, ins(1:mns) ) +!latex \type{Mvol} & integer & \pb{number of interfaces = number of volumes} + call HWRITEIV( grpOutput, "Mvol", 1, (/ Mvol /)) +!latex \type{iRbc(1:mn,0:Mvol)} & real & \pb{Fourier harmonics, $R_{m,n}$, of interfaces} + call HWRITERA( grpOutput, "Rbc", mn, (Mvol+1), iRbc(1:mn,0:Mvol) ) +!latex \type{iZbs(1:mn,0:Mvol)} & real & \pb{Fourier harmonics, $Z_{m,n}$, of interfaces} + call HWRITERA( grpOutput, "Zbs", mn, (Mvol+1), iZbs(1:mn,0:Mvol) ) +!latex \type{iRbs(1:mn,0:Mvol)} & real & \pb{Fourier harmonics, $R_{m,n}$, of interfaces} + call HWRITERA( grpOutput, "Rbs", mn, (Mvol+1), iRbs(1:mn,0:Mvol) ) +!latex \type{iZbc(1:mn,0:Mvol)} & real & \pb{Fourier harmonics, $Z_{m,n}$, of interfaces} + call HWRITERA( grpOutput, "Zbc", mn, (Mvol+1), iZbc(1:mn,0:Mvol) ) +!l tex \type{forcetol} & real & \pb{force-balance error across interfaces} +! HWRITERV( grpOutput, 1, forcetol, (/ forcetol /)) ! already in /input/global +!latex \type{ForceErr} & real & \pb{force-balance error across interfaces} + call HWRITERV( grpOutput, "ForceErr", 1, (/ ForceErr /)) +!latex \type{Ivolume} & real & \pb{Volume current at output (parallel, externally induced)} + call HWRITERV( grpOutput, "Ivolume", Mvol, Ivolume(1:Mvol)) +!latex \type{IPDt} & real & \pb{Surface current at output} + call HWRITERV( grpOutput, "IPDt", Mvol, IPDt(1:Mvol)) + + ! the following quantites can be different from input value + call HWRITERV( grpOutput, "adiabatic" , Mvol, adiabatic(1:Nvol) ) + call HWRITERV( grpOutput, "helicity" , Nvol, helicity(1:Nvol) ) + call HWRITERV( grpOutput, "mu" , Mvol, mu(1:Mvol) ) + call HWRITERV( grpOutput, "tflux" , Mvol, tflux(1:Mvol) ) + call HWRITERV( grpOutput, "pflux" , Mvol, pflux(1:Mvol) ) + + if( Lcheck.eq.1 ) then +!latex \type{beltramierror} & real & \pb{error in beltrami field (volume integral)} + call HWRITERA( grpOutput, "beltramierror", Mvol, 3, beltramierror(1:Mvol,1:3) ) + endif + + if( allocated(vvolume) ) then ! why is it required to confirm that vvolume has been allocated ; 24 Nov 16; + + tvolume = sum(vvolume(1:Nvol) ) +!latex \type{volume} & real & \pb{total volume = $\sum V_v$} + call HWRITERV( grpOutput, "volume", 1, (/ tvolume /)) + + else + + if (Wsphdf5) write(ounit,'("hdfint : ", 10x ," : myid=",i3," ; vvolume is not allocated ;")') myid + + endif ! end of if( allocated(vvolume) ) ; 11 Aug 14; + + Mrad = maxval( Lrad(1:Mvol) ) +!latex \type{Mrad} & integer & \pb{the maximum radial (Chebyshev) resolution} + call HWRITEIV( grpOutput, "Mrad", 1, (/ Mrad /)) +!latex \type{TT(0:Mrad,0:1,0:1)} & real & \pb{the Chebyshev polynomials, $T_l$, and their derivatives, evaluated at $s=\pm 1$} + call HWRITERC( grpOutput, "TT", (Mrad+1), 2, 2, TT(0:Mrad,0:1,0:1) ) +!latex \type{Btemn(1:mn,0:1,1:Mvol)} & real & \pb{the cosine harmonics of the covariant poloidal field, +!latex i.e. $[[B_{\t,j}]]$ evaluated on the inner and outer interface in each volume} + call HWRITERC( grpOutput, "Btemn", mn, 2, Mvol, Btemn(1:mn,0:1,1:Mvol) ) +!latex \type{Bzemn(1:mn,0:1,1:Mvol)} & real & \pb{the cosine harmonics of the covariant toroidal field, +!latex i.e. $[[B_{\z,j}]]$ evaluated on the inner and outer interface in each volume} + call HWRITERC( grpOutput, "Bzemn", mn, 2, Mvol, Bzemn(1:mn,0:1,1:Mvol) ) +!latex \type{Btomn(1:mn,0:1,1:Mvol)} & real & \pb{the sine harmonics of the covariant poloidal field, +!latex i.e. $[[B_{\t,j}]]$ evaluated on the inner and outer interface in each volume} + call HWRITERC( grpOutput, "Btomn", mn, 2, Mvol, Btomn(1:mn,0:1,1:Mvol) ) +!latex \type{Bzomn(1:mn,0:1,1:Mvol)} & real & \pb{the sine harmonics of the covariant toroidal field, +!latex i.e. $[[B_{\z,j}]]$ evaluated on the inner and outer interface in each volume} + call HWRITERC( grpOutput, "Bzomn", mn, 2, Mvol, Bzomn(1:mn,0:1,1:Mvol) ) + +! Write lambda_mn, Fourier harmonics or transformation to straight field line coordinates. + call HWRITERC( grpOutput, "lambdamn", lmns, Mvol, 2, dlambdaout(1:lmns,1:Mvol,0:1) ) + + if( Lperturbed.eq.1 ) then + +!latex \type{dRbc(1:mn,0:Nvol)} & real & \pb{Fourier harmonics, $R_{j}$, of interfaces; linearly perturbed solution} + call HWRITERA( grpOutput, "dRbc", mn, (Nvol+1), dRbc(1:mn,0:Nvol) ) +!latex \type{dZbs(1:mn,0:Nvol)} & real & \pb{Fourier harmonics, $Z_{j}$, of interfaces; linearly perturbed solution} + call HWRITERA( grpOutput, "dZbs", mn, (Nvol+1), dZbs(1:mn,0:Nvol) ) +!latex \type{dRbs(1:mn,0:Nvol)} & real & \pb{Fourier harmonics, $R_{j}$, of interfaces; linearly perturbed solution} + call HWRITERA( grpOutput, "dRbs", mn, (Nvol+1), dRbs(1:mn,0:Nvol) ) +!latex \type{dZbc(1:mn,0:Nvol)} & real & \pb{Fourier harmonics, $Z_{j}$, of interfaces; linearly perturbed solution} + call HWRITERA( grpOutput, "dZbc", mn, (Nvol+1), dZbc(1:mn,0:Nvol) ) + + endif + +!latex \type{lmns} & integer & \pb{resolution of straight fieldline transformation} + call HWRITEIV( grpOutput, "lmns", 1, (/ lmns /)) + +!latex \hline \end{tabular} +!>
    • +!>
    • All quantities marked as real should be treated as double precision.
    • +!>
    + + call HCLOSEGRP( grpOutput ) + + endif ! myid.eq.0 + +end subroutine hdfint + +!> \brief Close all open HDF5 objects (we know of) and list any remaining still-open objects. +!> \ingroup grp_output +!> +subroutine finish_outfile +! Close all open HDF5 objects (we know of) and list any remaining still-open objects +! The goal should be to close all objects specifically! + + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + integer(size_t) :: obj_count ! number of open HDF5 objects + integer(size_t) :: num_objs ! number of still-open objects + integer(hid_t),dimension(:),allocatable :: obj_ids ! still-open objects + integer :: iObj + integer(size_t) :: openLength + character(len=:),allocatable :: openName + integer(size_t),parameter :: dummySize=1 + character(len=dummySize+1) :: dummyName + integer :: typeClass + integer :: hdfier !< error flag for HDF5 library + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + + + if (myid.eq.0 .and. .not.skip_write) then + + ! close objects related to convergence output + call h5tclose_f(dt_nDcalls_id, hdfier) + call h5tclose_f(dt_Energy_id, hdfier) + call h5tclose_f(dt_ForceErr_id, hdfier) + call h5tclose_f(dt_iRbc_id, hdfier) + call h5tclose_f(dt_iZbs_id, hdfier) + call h5tclose_f(dt_iRbs_id, hdfier) + call h5tclose_f(dt_iZbc_id, hdfier) + call h5dclose_f(iteration_dset_id, hdfier) ! End access to the dataset and release resources used by it. + call h5pclose_f(plist_id, hdfier) ! close plist used for 'preserve' flag (does not show up in obj_count below) + + ! check whether we forgot to close some resources; only check for group, dataset and datatype (there is only one file and that should be still open...) + call h5fget_obj_count_f(file_id, ior(H5F_OBJ_GROUP_F, ior(H5F_OBJ_DATASET_F, H5F_OBJ_DATATYPE_F)), obj_count, hdfier) + + if (obj_count.gt.0) then + write(*,'("There are still ",i3," hdf5 objects open")') obj_count + allocate(obj_ids(1:obj_count)) + + ! groups + call h5fget_obj_ids_f(file_id, H5F_OBJ_GROUP_F, obj_count, obj_ids, hdfier, num_objs) ! get for open objects + if (num_objs.gt.0) then + write(*,'("There are still ",i3," HDF5 groups open:")') num_objs + do iObj=1,num_objs + openLength=0 + call h5iget_name_f(obj_ids(iObj), dummyName, dummySize, openLength, hdfier) + allocate(character(len=openLength+1) :: openName) + call h5iget_name_f(obj_ids(iObj), openName, openLength, openLength, hdfier) + write(*,*) openName + deallocate(openName) + + call h5gclose_f(obj_ids(iObj), hdfier) + enddo + endif + + ! datasets + call h5fget_obj_ids_f(file_id, H5F_OBJ_DATASET_F, obj_count, obj_ids, hdfier, num_objs) ! get for open objects + if (num_objs.gt.0) then + write(*,'("There are still ",i3," HDF5 datasets open:")') num_objs + do iObj=1,num_objs + openLength=0 + call h5iget_name_f(obj_ids(iObj), dummyName, dummySize, openLength, hdfier) + allocate(character(len=openLength+1) :: openName) + call h5iget_name_f(obj_ids(iObj), openName, openLength, openLength, hdfier) + write(*,*) openName(1:openLength) + deallocate(openName) + + call h5dclose_f(obj_ids(iObj), hdfier) + enddo + endif + + ! datatypes + call h5fget_obj_ids_f(file_id, H5F_OBJ_DATATYPE_F, obj_count, obj_ids, hdfier, num_objs) ! get for open objects + if (num_objs.gt.0) then + write(*,'("There are still ",i3," HDF5 datatypes open:")') num_objs + do iObj=1,num_objs + call h5tget_class_f(obj_ids(iObj), typeClass, hdfier) ! determine class of open datatype + if (typeClass.eq.H5T_NO_CLASS_F ) then ; write(*,*) "H5T_NO_CLASS_F" + else if (typeClass.eq.H5T_INTEGER_F ) then ; write(*,*) "H5T_INTEGER_F" + else if (typeClass.eq.H5T_FLOAT_F ) then ; write(*,*) "H5T_FLOAT_F" + else if (typeClass.eq.H5T_STRING_F ) then ; write(*,*) "H5T_STRING_F" + else if (typeClass.eq.H5T_BITFIELD_F ) then ; write(*,*) "H5T_BITFIELD_F" + else if (typeClass.eq.H5T_OPAQUE_F ) then ; write(*,*) "H5T_OPAQUE_F" + else if (typeClass.eq.H5T_COMPOUND_F ) then ; write(*,*) "H5T_COMPOUND_F" + else if (typeClass.eq.H5T_REFERENCE_F) then ; write(*,*) "H5T_REFERENCE_F" + else if (typeClass.eq.H5T_ENUM_F ) then ; write(*,*) "H5T_ENUM_F" + else if (typeClass.eq.H5T_VLEN_F ) then ; write(*,*) "H5T_VLEN_F" + else if (typeClass.eq.H5T_ARRAY_F ) then ; write(*,*) "H5T_ARRAY_F" + else ; write(*,*) "UNKNOWN TYPE!" + endif + + call h5tclose_f(obj_ids(iObj), hdfier) + enddo + endif + + deallocate(obj_ids) + endif ! (obj_count.gt.0) + + call h5fclose_f( file_id, hdfier ) ! terminate access on output file; + call h5close_f( hdfier ) ! close Fortran interface to the HDF5 library; + + endif ! myid.eq.0 + +end subroutine finish_outfile + +end module sphdf5 diff --git a/src/spsint.f90 b/src/spsint.F90 similarity index 86% rename from src/spsint.f90 rename to src/spsint.F90 index 1c423eda..95c7998e 100644 --- a/src/spsint.f90 +++ b/src/spsint.F90 @@ -11,7 +11,7 @@ !> @param lvol !> @param lrad subroutine spsint( lquad, mn, lvol, lrad ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, pi, pi2 @@ -43,31 +43,47 @@ subroutine spsint( lquad, mn, lvol, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lquad, mn, lvol, lrad +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lquad, mn, lvol, lrad - INTEGER :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele, mi + integer :: jquad, ll, pp, ll1, pp1, uv, ii, jj, io, mn2, lp2, mn2_max, lp2_max, nele, mi - INTEGER :: kk, kd, kka, kks, kda, kds + integer :: kk, kd, kka, kks, kda, kds - REAL :: lss, jthweight, fee, feo, foe, foo, Tl, Dl, Tp, Dp, TlTp, TlDp, DlTp, DlDp, ikda, ikds, imn2, ilrad, lssm + real(wp) :: lss, jthweight, fee, feo, foe, foo, Tl, Dl, Tp, Dp, TlTp, TlDp, DlTp, DlDp, ikda, ikds, imn2, ilrad, lssm - REAL :: foocc, fooss - REAL :: fsscc, fssss - REAL :: fstcc, fstss - REAL :: fszcc, fszss - REAL :: fttcc, fttss - REAL :: ftzcc, ftzss - REAL :: fzzcc, fzzss + real(wp) :: foocc, fooss + real(wp) :: fsscc, fssss + real(wp) :: fstcc, fstss + real(wp) :: fszcc, fszss + real(wp) :: fttcc, fttss + real(wp) :: ftzcc, ftzss + real(wp) :: fzzcc, fzzss - REAL :: goomne, gssmne, gstmne, gszmne, gttmne, gtzmne, gzzmne + real(wp) :: goomne, gssmne, gstmne, gszmne, gttmne, gtzmne, gzzmne - REAL :: sbar + real(wp) :: sbar - REAL, allocatable :: basis(:,:,:,:) + real(wp), allocatable :: basis(:,:,:,:) + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN( spsint ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! mn2_max = mn*mn @@ -93,7 +109,10 @@ subroutine spsint( lquad, mn, lvol, lrad ) DDzzss = zero endif !NOTstellsym - SALLOCATE(basis, (0:lrad,0:mpol,0:1,lquad), zero) + + allocate( basis(0:lrad,0:mpol,0:1,lquad), stat=astat ) + basis(0:lrad,0:mpol,0:1,lquad) = zero + do jquad = 1, lquad lss = gaussianabscissae(jquad,lvol) ; jthweight = gaussianweight(jquad,lvol) sbar = (lss + one) * half @@ -241,10 +260,17 @@ subroutine spsint( lquad, mn, lvol, lrad ) end if !NOTstellsym - DALLOCATE(basis) + + deallocate(basis,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN( spsint ) + +9999 continue + cput = MPI_WTIME() + Tspsint = Tspsint + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/spsmat.f90 b/src/spsmat.F90 similarity index 90% rename from src/spsmat.f90 rename to src/spsmat.F90 index ff7fbd4e..d27bcf6f 100644 --- a/src/spsmat.f90 +++ b/src/spsmat.F90 @@ -39,7 +39,7 @@ !> @param mn !> @param lrad subroutine spsmat( lvol, mn, lrad ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, two @@ -72,32 +72,48 @@ subroutine spsmat( lvol, mn, lrad ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER, intent(in) :: lvol, mn, lrad + integer, intent(in) :: lvol, mn, lrad !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - INTEGER :: NN, ii, jj, ll, kk, pp, ll1, pp1, mi, ni, mj, nj, mimj, minj, nimj, ninj, mjmi, mjni, njmi, njni, id, jd, idx + integer :: NN, ii, jj, ll, kk, pp, ll1, pp1, mi, ni, mj, nj, mimj, minj, nimj, ninj, mjmi, mjni, njmi, njni, id, jd, idx - REAL :: Wtete, Wteto, Wtote, Wtoto - REAL :: Wteze, Wtezo, Wtoze, Wtozo - REAL :: Wzete, Wzeto, Wzote, Wzoto - REAL :: Wzeze, Wzezo, Wzoze, Wzozo + real(wp) :: Wtete, Wteto, Wtote, Wtoto + real(wp) :: Wteze, Wtezo, Wtoze, Wtozo + real(wp) :: Wzete, Wzeto, Wzote, Wzoto + real(wp) :: Wzeze, Wzezo, Wzoze, Wzozo - REAL :: Htete, Hteto, Htote, Htoto - REAL :: Hteze, Htezo, Htoze, Htozo - REAL :: Hzete, Hzeto, Hzote, Hzoto - REAL :: Hzeze, Hzezo, Hzoze, Hzozo - REAL :: adata, ddata, factorcc, factorss + real(wp) :: Htete, Hteto, Htote, Htoto + real(wp) :: Hteze, Htezo, Htoze, Htozo + real(wp) :: Hzete, Hzeto, Hzote, Hzoto + real(wp) :: Hzeze, Hzezo, Hzoze, Hzozo + real(wp) :: adata, ddata, factorcc, factorss - REAL,allocatable :: dMASqueue(:,:), dMDSqueue(:,:), TTdata(:,:,:), TTMdata(:,:) ! queues to construct sparse matrices - INTEGER,allocatable :: jdMASqueue(:,:) ! indices - INTEGER :: nqueue(4), nrow, ns, nmaxqueue + real(wp),allocatable :: dMASqueue(:,:), dMDSqueue(:,:), TTdata(:,:,:), TTMdata(:,:) ! queues to construct sparse matrices + integer,allocatable :: jdMASqueue(:,:) ! indices + integer :: nqueue(4), nrow, ns, nmaxqueue + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(spsmat) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -112,12 +128,27 @@ subroutine spsmat( lvol, mn, lrad ) nmaxqueue = 4 * lrad + 10 ! estimate the size of the queue - SALLOCATE( dMASqueue, (1:nmaxqueue, 4), zero) - SALLOCATE( dMDSqueue, (1:nmaxqueue, 4), zero) - SALLOCATE( jdMASqueue, (1:nmaxqueue, 4), zero) - SALLOCATE( TTdata, (0:lrad, 0:mpol, 0:1), zero) - SALLOCATE( TTMdata, (0:lrad, 0:mpol), zero) + allocate( dMASqueue(1:nmaxqueue, 4), stat=astat ) + dMASqueue(1:nmaxqueue, 4) = zero + + + allocate( dMDSqueue(1:nmaxqueue, 4), stat=astat ) + dMDSqueue(1:nmaxqueue, 4) = zero + + + allocate( jdMASqueue(1:nmaxqueue, 4), stat=astat ) + jdMASqueue(1:nmaxqueue, 4) = zero + + + + allocate( TTdata(0:lrad, 0:mpol, 0:1), stat=astat ) + TTdata(0:lrad, 0:mpol, 0:1) = zero + + + allocate( TTMdata(0:lrad, 0:mpol), stat=astat ) + TTMdata(0:lrad, 0:mpol) = zero + ! fill in Zernike/Chebyshev polynomials depending on Lcooridnatesingularity if (Lcoordinatesingularity) then @@ -453,15 +484,30 @@ subroutine spsmat( lvol, mn, lrad ) ! dMB and dMG are constructed elsewhere - DALLOCATE( dMASqueue ) - DALLOCATE( dMDSqueue ) - DALLOCATE( jdMASqueue ) - DALLOCATE( TTdata ) - DALLOCATE( TTMdata ) + deallocate(dMASqueue ,stat=astat) + + + deallocate(dMDSqueue ,stat=astat) + + + deallocate(jdMASqueue ,stat=astat) + + + + deallocate(TTdata ,stat=astat) + + + deallocate(TTMdata ,stat=astat) + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(spsmat) + +9999 continue + cput = MPI_WTIME() + Tspsmat = Tspsmat + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -483,6 +529,7 @@ end subroutine spsmat !> @param qD !> @param qjA subroutine push_back(iq, nq, NN, vA, vD, vjA, qA, qD, qjA) + use mod_kinds, only: wp => dp ! push a new element at the back of the queue ! INPUTS: ! iq - INTEGER, which queue (1-4) @@ -494,10 +541,10 @@ subroutine push_back(iq, nq, NN, vA, vD, vjA, qA, qD, qjA) use constants, only : zero implicit none - REAL, INTENT(IN) :: vA, vD - INTEGER, INTENT(IN) :: vjA, iq, NN - REAL, INTENT(INOUT) :: qA(NN,4), qD(NN,4) - INTEGER, INTENT(INOUT) :: qjA(NN,4), nq(4) + real(wp), INTENT(IN) :: vA, vD + integer, INTENT(IN) :: vjA, iq, NN + real(wp), INTENT(INOUT) :: qA(NN,4), qD(NN,4) + integer, INTENT(INOUT) :: qjA(NN,4), nq(4) if (abs(vA).gt.zero .or. abs(vD).gt.zero) then @@ -518,13 +565,14 @@ end subroutine push_back !> @param qD !> @param qjA subroutine clean_queue(nq, NN, qA, qD, qjA) + use mod_kinds, only: wp => dp ! clean the queue use constants, only : zero implicit none - INTEGER, INTENT(IN) :: NN - REAL, INTENT(INOUT) :: qA(NN,4), qD(NN,4) - INTEGER, INTENT(INOUT) :: qjA(NN,4), nq(4) + integer, INTENT(IN) :: NN + real(wp), INTENT(INOUT) :: qA(NN,4), qD(NN,4) + integer, INTENT(INOUT) :: qjA(NN,4), nq(4) nq = 0 qA = zero @@ -548,16 +596,17 @@ end subroutine clean_queue !> @param jdMAS !> @param idMAS subroutine addline(nq, NN, qA, qD, qjA, ns, nrow, dMAS, dMDS, jdMAS, idMAS) + use mod_kinds, only: wp => dp ! add the content from the queue to the real matrices implicit none - INTEGER, INTENT(INOUT) :: NN, ns, nrow - REAL, INTENT(INOUT) :: qA(NN,4), qD(NN,4) - INTEGER, INTENT(INOUT) :: qjA(NN,4), nq(4) - REAL :: dMAS(*), dMDS(*) - INTEGER :: jdMAS(*), idMAS(*) + integer, INTENT(INOUT) :: NN, ns, nrow + real(wp), INTENT(INOUT) :: qA(NN,4), qD(NN,4) + integer, INTENT(INOUT) :: qjA(NN,4), nq(4) + real(wp) :: dMAS(*), dMDS(*) + integer :: jdMAS(*), idMAS(*) - INTEGER :: pp + integer :: pp do pp = 1, 4 if (nq(pp) .eq. 0) cycle diff --git a/src/stzxyz.f90 b/src/stzxyz.F90 similarity index 80% rename from src/stzxyz.f90 rename to src/stzxyz.F90 index 9aa5a171..5722e874 100644 --- a/src/stzxyz.f90 +++ b/src/stzxyz.F90 @@ -17,7 +17,7 @@ !> @param[in] stz !> @param[out] RpZ subroutine stzxyz( lvol , stz , RpZ ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, one, half @@ -33,23 +33,51 @@ subroutine stzxyz( lvol , stz , RpZ ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol - REAL, intent(in) :: stz(1:3) - REAL, intent(out) :: RpZ(1:3) + real(wp), intent(in) :: stz(1:3) + real(wp), intent(out) :: RpZ(1:3) - INTEGER :: ii, mi, ni - REAL :: Remn, Zomn, Romn, Zemn, RR, phi, ZZ, arg, carg, sarg, lss, alss, blss, sbar, sbarhim, fj + integer :: ii, mi, ni + real(wp) :: Remn, Zomn, Romn, Zemn, RR, phi, ZZ, arg, carg, sarg, lss, alss, blss, sbar, sbarhim, fj + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(stzxyz) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL(stzxyz, lvol.lt.1 .or. lvol.gt.Mvol, invalid interface label ) - FATAL(stzxyz, abs(stz(1)).gt.one, invalid radial coordinate ) + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("stzxyz : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; invalid interface label ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "stzxyz : lvol.lt.1 .or. lvol.gt.Mvol : invalid interface label ;" + endif + + + if( abs(stz(1)).gt.one ) then + write(6,'("stzxyz : fatal : myid=",i3," ; abs(stz(1)).gt.one ; invalid radial coordinate ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "stzxyz : abs(stz(1)).gt.one : invalid radial coordinate ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -129,7 +157,12 @@ subroutine stzxyz( lvol , stz , RpZ ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(stzxyz) + +9999 continue + cput = MPI_WTIME() + Tstzxyz = Tstzxyz + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/tr00ab.f90 b/src/tr00ab.F90 similarity index 75% rename from src/tr00ab.f90 rename to src/tr00ab.F90 index 8a6b973b..1e6ab67a 100644 --- a/src/tr00ab.f90 +++ b/src/tr00ab.F90 @@ -68,7 +68,7 @@ !> @param iflag !> @param ldiota subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-field line magnetic coordinates; - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, third, half, one, two, pi2, goldenmean @@ -94,17 +94,25 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol, mn, NN, Nt, Nz, iflag +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + - REAL, intent(inout) :: ldiota(0:1,-1:2) + integer, intent(in) :: lvol, mn, NN, Nt, Nz, iflag - INTEGER :: innout, ll, ii, jj, kk, jb, kb, mj, nj, ideriv, jderiv, id, MM, ielement, nelements, Lcurvature, idof, icon, mi, ni, imupf + real(wp), intent(inout) :: ldiota(0:1,-1:2) - REAL :: lcpu, mfactor, lss, Dteta, Dzeta, rfac, tol, rnorm, omega, diotaerror!, sparsedenseerror + integer :: innout, ll, ii, jj, kk, jb, kb, mj, nj, ideriv, jderiv, id, MM, ielement, nelements, Lcurvature, idof, icon, mi, ni, imupf - REAL :: lAte(0:mn,-1:2), lAze(0:mn,-1:2), lAto(0:mn,-1:2), lAzo(0:mn,-1:2) + real(wp) :: lcpu, mfactor, lss, Dteta, Dzeta, rfac, tol, rnorm, omega, diotaerror!, sparsedenseerror + + real(wp) :: lAte(0:mn,-1:2), lAze(0:mn,-1:2), lAto(0:mn,-1:2), lAzo(0:mn,-1:2) ! REAL :: lBso(1:mn,-1:2), lBte(1:mn,-1:2), lBze(1:mn,-1:2) ! REAL :: lBse(1:mn,-1:2), lBto(1:mn,-1:2), lBzo(1:mn,-1:2) @@ -112,42 +120,68 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi ! REAL :: gvu(1:Nt*Nz,1:3,1:3) ! local workspace; 13 Sep 13; ! required for Fourier routines; - INTEGER :: IA, if04aaf, idgesvx, ipiv(1:NN), iwork4(1:NN) - REAL , allocatable :: dmatrix(:,:,:), omatrix(:,:), FAA(:,:) - REAL :: drhs(1:NN,-1:2), dlambda(1:NN,-1:2) - REAL :: Rdgesvx(1:NN), Cdgesvx(1:NN), work4(1:4*NN), rcond, ferr(1), berr(1), ferr2(1:2), berr2(1:2) - CHARACTER :: equed + integer :: IA, if04aaf, idgesvx, ipiv(1:NN), iwork4(1:NN) + real(wp) , allocatable :: dmatrix(:,:,:), omatrix(:,:), FAA(:,:) + real(wp) :: drhs(1:NN,-1:2), dlambda(1:NN,-1:2) + real(wp) :: Rdgesvx(1:NN), Cdgesvx(1:NN), work4(1:4*NN), rcond, ferr(1), berr(1), ferr2(1:2), berr2(1:2) + character :: equed ! required for real-space routines; - INTEGER :: maxitn, reqdits, extralength, lrwork, integerwork(1:2*Nt*Nz+2+1), if11def, if11zaf, if11xaf - INTEGER :: IAA, if04atf, if04arf - INTEGER :: Ndof, label(-3:Nt+2,-3:Nz+2), isym + integer :: maxitn, reqdits, extralength, lrwork, integerwork(1:2*Nt*Nz+2+1), if11def, if11zaf, if11xaf + integer :: IAA, if04atf, if04arf + integer :: Ndof, label(-3:Nt+2,-3:Nz+2), isym !required for SVD routines; - INTEGER :: idgelsd, Lwork, Liwork, Irank, nlvl - REAL :: sval(1:NN) - REAL , allocatable :: work(:) + integer :: idgelsd, Lwork, Liwork, Irank, nlvl + real(wp) :: sval(1:NN) + real(wp) , allocatable :: work(:) + + real(wp) :: Bsupt(1:Nt*Nz,-1:2), Bsupz(1:Nt*Nz,-1:2), tdot(1:Nt*Nz) + real(wp) :: Bsubs(1:Nt*Nz,-1:2), Bsubt(1:Nt*Nz,-1:2), Bsubz(1:Nt*Nz,-1:2) - REAL :: Bsupt(1:Nt*Nz,-1:2), Bsupz(1:Nt*Nz,-1:2), tdot(1:Nt*Nz) - REAL :: Bsubs(1:Nt*Nz,-1:2), Bsubt(1:Nt*Nz,-1:2), Bsubz(1:Nt*Nz,-1:2) + real(wp) :: dotteta, dotzeta - REAL :: dotteta, dotzeta + real(wp) , allocatable :: rmatrix(:,:,:), rrhs(:,:), rlambda(:,:), wks1(:), wks2(:), AA(:,:) - REAL , allocatable :: rmatrix(:,:,:), rrhs(:,:), rlambda(:,:), wks1(:), wks2(:), AA(:,:) + integer :: inz(-1:2), lnz + integer, allocatable :: irow(:,:), jcol(:,:), istr(:), iwork(:) + real(wp) , allocatable :: smatrix(:,:), srhs(:,:), slambda(:,:), swork(:) + character :: duplicate*1, zeros*1, method*8, precon*1, trans*1, check*1 ! logical control of sparse routines; 20 Apr 13; - INTEGER :: inz(-1:2), lnz - INTEGER, allocatable :: irow(:,:), jcol(:,:), istr(:), iwork(:) - REAL , allocatable :: smatrix(:,:), srhs(:,:), slambda(:,:), swork(:) - CHARACTER :: duplicate*1, zeros*1, method*8, precon*1, trans*1, check*1 ! logical control of sparse routines; 20 Apr 13; - BEGIN(tr00ab) + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( tr00ab, mns.le.0, no degrees of freedom in angle transformation ) ! this is only for Fourier; 20 Apr 13; - FATAL( tr00ab, lvol.lt.1 .or. lvol.gt.Mvol, illegal lvol ) - FATAL( tr00ab, iflag.lt.-1 .or. iflag.gt.2, illegal iflag ) + + if( mns.le.0 ) then + write(6,'("tr00ab : fatal : myid=",i3," ; mns.le.0 ; no degrees of freedom in angle transformation ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : mns.le.0 : no degrees of freedom in angle transformation ;" + endif + ! this is only for Fourier; 20 Apr 13; + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("tr00ab : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; illegal lvol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : lvol.lt.1 .or. lvol.gt.Mvol : illegal lvol ;" + endif + + + if( iflag.lt.-1 .or. iflag.gt.2 ) then + write(6,'("tr00ab : fatal : myid=",i3," ; iflag.lt.-1 .or. iflag.gt.2 ; illegal iflag ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : iflag.lt.-1 .or. iflag.gt.2 : illegal iflag ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -215,9 +249,18 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi ! construct real-space, real-space transformation matrix; 20 Apr 13; if( Lsparse.eq.0 .or. Lsparse.eq.3 ) then - SALLOCATE( dmatrix, (1:NN,1:NN,-1:2), zero ) - SALLOCATE( omatrix, (1:NN,1:NN), zero ) - SALLOCATE( FAA, (1:NN,1:NN), zero ) + + allocate( dmatrix(1:NN,1:NN,-1:2), stat=astat ) + dmatrix(1:NN,1:NN,-1:2) = zero + + + allocate( omatrix(1:NN,1:NN), stat=astat ) + omatrix(1:NN,1:NN) = zero + + + allocate( FAA(1:NN,1:NN), stat=astat ) + FAA(1:NN,1:NN) = zero + endif @@ -225,15 +268,33 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi if( Lsparse.gt.0 ) then - FATAL( tr00ab, NOTstellsym, under construction ) - FATAL( tr00ab, Ntor.ne.0 , under construction ) + + if( NOTstellsym ) then + write(6,'("tr00ab : fatal : myid=",i3," ; NOTstellsym ; under construction ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : NOTstellsym : under construction ;" + endif + + + if( Ntor.ne.0 ) then + write(6,'("tr00ab : fatal : myid=",i3," ; Ntor.ne.0 ; under construction ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : Ntor.ne.0 : under construction ;" + endif + select case( iorder ) case( 2 ) ; Dteta = 2 * pi2 / Nt ; Dzeta = pi2nfp / Nz ! real-space grid resolution; 20 Apr 13; case( 4 ) ; Dteta = 12 * pi2 / Nt ; Dzeta = pi2nfp / Nz ! real-space grid resolution; 20 Apr 13; case( 6 ) ; Dteta = 60 * pi2 / Nt ; Dzeta = pi2nfp / Nz ! real-space grid resolution; 20 Apr 13; case default - FATAL( tr00ab, .true., iorder not supported ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; iorder not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : iorder not supported ;" + endif + end select tdot(1:Ntz) = Bsupt(1:Ntz,0) / Bsupz(1:Ntz,0) ! shorthand; 24 Apr 13; @@ -265,33 +326,84 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi endif - FATAL( tr00ab, ii.ne.Ndof, counting error ) + + if( ii.ne.Ndof ) then + write(6,'("tr00ab : fatal : myid=",i3," ; ii.ne.Ndof ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : ii.ne.Ndof : counting error ;" + endif + Ndof = Ndof + 1 ! include rotational-transform as a degree-of-freedom; 23 Apr 13; ! dense arrays; 24 Apr 13; ! these will eventually be redundant; 24 Apr 13; if( Lsparse.eq.1 ) then ! dense transformation; 24 Apr 13; - SALLOCATE( rmatrix, (1:Ndof,1:Ndof,-1:2), zero ) ! real-space angle transformation matrix; dense; 23 Apr 13; - SALLOCATE( rrhs , (1:Ndof, -1:2), zero ) - SALLOCATE( rlambda, (1:Ndof, -1:2), zero ) - SALLOCATE( wks1 , (1:Ndof ), zero ) - SALLOCATE( wks2 , (1:Ndof ), zero ) - SALLOCATE( AA , (1:Ndof,1:Ndof ), zero ) + + allocate( rmatrix(1:Ndof,1:Ndof,-1:2), stat=astat ) + rmatrix(1:Ndof,1:Ndof,-1:2) = zero + ! real-space angle transformation matrix; dense; 23 Apr 13; + + allocate( rrhs (1:Ndof, -1:2), stat=astat ) + rrhs (1:Ndof, -1:2) = zero + + + allocate( rlambda(1:Ndof, -1:2), stat=astat ) + rlambda(1:Ndof, -1:2) = zero + + + allocate( wks1 (1:Ndof ), stat=astat ) + wks1 (1:Ndof ) = zero + + + allocate( wks2 (1:Ndof ), stat=astat ) + wks2 (1:Ndof ) = zero + + + allocate( AA (1:Ndof,1:Ndof ), stat=astat ) + AA (1:Ndof,1:Ndof ) = zero + endif ! end of if( Lsparse.eq.1 ) ; 24 Apr 13; ! sparse arrays; ! all of these can be simply defined (1:Ntz) etc. . . . ; 24 Apr 13; if( Lsparse.ge.2 ) then ! sparse transformation; 24 Apr 13; - SALLOCATE( srhs , (1: Ndof ,-1:2), zero ) - SALLOCATE( istr , (1: Ndof+1 ), 0 ) ! for re-ordering; 24 Apr 13; - SALLOCATE( iwork , (1:2*Ndof+1 ), 0 ) ! for re-ordering & iterative solver; 24 Apr 13; - SALLOCATE( slambda, (1: Ndof ,-1:2), zero ) + + allocate( srhs (1: Ndof ,-1:2), stat=astat ) + srhs (1: Ndof ,-1:2) = zero + + + allocate( istr (1: Ndof+1 ), stat=astat ) + istr (1: Ndof+1 ) = 0 + ! for re-ordering; 24 Apr 13; + + allocate( iwork (1:2*Ndof+1 ), stat=astat ) + iwork (1:2*Ndof+1 ) = 0 + ! for re-ordering & iterative solver; 24 Apr 13; + + allocate( slambda(1: Ndof ,-1:2), stat=astat ) + slambda(1: Ndof ,-1:2) = zero + select case( iorder ) case( 2 ) - SALLOCATE( smatrix, (1:Ndof*5,-1:2), zero) ! real-space angle transformation; sparse; 24 Apr 13; - SALLOCATE( irow , (1:Ndof*5,-1:2), 0 ) - SALLOCATE( jcol , (1:Ndof*5,-1:2), 0 ) + + allocate( smatrix(1:Ndof*5,-1:2), stat=astat ) + smatrix(1:Ndof*5,-1:2) = zero + ! real-space angle transformation; sparse; 24 Apr 13; + + allocate( irow (1:Ndof*5,-1:2), stat=astat ) + irow (1:Ndof*5,-1:2) = 0 + + + allocate( jcol (1:Ndof*5,-1:2), stat=astat ) + jcol (1:Ndof*5,-1:2) = 0 + case default - FATAL( tr00ab, .true., need to estimate length of smatrix irow and jcol ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; need to estimate length of smatrix irow and jcol ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : need to estimate length of smatrix irow and jcol ;" + endif + end select endif ! end of if( Lsparse.gt.2 ) ; 24 Apr 13; @@ -350,7 +462,13 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi case( 6 ) ; jb = jj-3 ; kb = kk ; rfac = - 1 * dotteta / Dteta ; isym = +1 end select case default - FATAL( tr00ab, .true., selected value of iorder not supported ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; selected value of iorder not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : selected value of iorder not supported ;" + endif + end select ! end of select case( iorder ) ; 24 Apr 13; if ( jb.eq.-3 ) then ; jb = 3 ; isym = -1 @@ -427,7 +545,13 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi endif case default - FATAL( tr00ab, .true., iorder not supported ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; iorder not supported ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : iorder not supported ;" + endif + end select rfac= (- 1)*dotzeta @@ -439,21 +563,27 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi inz(id) = lnz ! lnz is short-hand; 24 Apr 13; - lcpu = GETTIME ! record time taken in F11DEF; 20 Apr 13; + lcpu = MPI_WTIME() ! record time taken in F11DEF; 20 Apr 13; duplicate = 'S' ! duplicate = 'R' = remove, 'S' = sum or 'F' = fatal ; 20 Apr 13; zeros = 'K' ! zeros = 'R' = remove, 'K' = keep or 'F' = fatal ; 20 Apr 13; if11zaf = 1 call F11ZAF( Ndof, inz(id), smatrix(1:inz(id),id), irow(1:inz(id),id), jcol(1:inz(id),id), duplicate, zeros, istr(1:Ndof+1), iwork(1:Ndof), if11zaf ) - cput = GETTIME + cput = MPI_WTIME() select case( if11zaf ) !1234567890123456789012 case( 0 ) ; if( Wtr00ab ) write(ounit,1000) myid, lvol, innout, id, if11zaf, cput-lcpu, "success ; " case( 1 ) ; write(ounit,1000) myid, lvol, innout, id, if11zaf, cput-lcpu, "input error ; " case( 2 ) ; write(ounit,1000) myid, lvol, innout, id, if11zaf, cput-lcpu, "row or column error ; " case( 3 ) ; write(ounit,1000) myid, lvol, innout, id, if11zaf, cput-lcpu, "duplicate eq F error ;" case( 4 ) ; write(ounit,1000) myid, lvol, innout, id, if11zaf, cput-lcpu, "zeros eq F error ; " - case default ; FATAL( tr00ab, .true., illegal ifail returned by F11ZAF ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned by F11ZAF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned by F11ZAF ;" + endif + end select 1000 format("tr00ab : ", 10x ," : myid=",i3," ; lvol=",i3," ; innout="i2" ; ideriv="i2" ; if11zaf="i2" ; time="f10.4" ; "a22) @@ -473,17 +603,32 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi case( 0 ) ; precon='N' ; extralength = 0 case( 1 ) ; precon='J' ; extralength = Ndof case( 2 ) ; precon='S' ; extralength = Ndof - case default ; FATAL( tr00ab, .true., illegal iprecon ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal iprecon ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal iprecon ;" + endif + end select select case( imethod ) case( 1 ) ; method='RGMRES' ; MM = min( Ndof, 50) ; lrwork = 4 * Ndof + MM * ( MM + Ndof + 4 ) + extralength + 1 case( 2 ) ; method='CGS' ; ; lrwork = 8 * Ndof + extralength case( 3 ) ; method='BICGSTAB' ; MM = min( Ndof, 10) ; lrwork = 2 * Ndof * ( MM + 2) + MM * ( MM + 2 ) + Ndof + 2 * extralength - case default ; FATAL( tr00ab, .true., illegal imethod ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal imethod ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal imethod ;" + endif + end select - SALLOCATE( swork, (1:lrwork), zero ) + + allocate( swork(1:lrwork), stat=astat ) + swork(1:lrwork) = zero + tol = max( iotatol, machprec ) ; maxitn = Ndof**3 ; omega = one @@ -518,24 +663,36 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi case( 1 ) ; write(ounit,1015) myid, lvol, innout, id, if11xaf, "input error ;" case( 2 ) ; write(ounit,1015) myid, lvol, innout, id, if11xaf, "input error ;" case( 3 ) ; write(ounit,1015) myid, lvol, innout, id, if11xaf, "input error ;" - case default ; FATAL( tr00ab, .true., illegal ifail returned from F11XAF ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned from F11XAF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned from F11XAF ;" + endif + end select 1015 format("tr00ab : ", 10x ," : myid=",i3," ; lvol=",i3," ; innout="i2" ; ideriv="i2" ; if11xaf="i2" ; "10x" ; "a13) srhs(1:Ndof,id) = srhs(1:Ndof,id) - srhs(1:Ndof,-1) endif ! end of if( Lsparse.ge.2. ) ; 24 Apr 13; case default - FATAL( tr00ab, .true., invalid ideriv ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; invalid ideriv ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : invalid ideriv ;" + endif + end select ! end of select case( ideriv ) ; 21 Apr 13; if( Lsparse.ge.2 ) then ! use sparse solver; 24 Apr 13; slambda(1:Ndof,ideriv) = glambda(1:Ndof,ideriv,innout,lvol) ! initial guess provided ; 24 Apr 13; - lcpu = GETTIME ! record time taken in F11DEF; 20 Apr 13; + lcpu = MPI_WTIME() ! record time taken in F11DEF; 20 Apr 13; reqdits = 0 ; rnorm = -one ! sometimes, F11DEF can exit without these being set; 21 Apr 13; if11def = 1 call F11DEF( method, precon, Ndof, inz( 0), smatrix(1:inz( 0), 0), irow(1:inz( 0), 0), jcol(1:inz( 0), 0), omega, srhs(1:Ndof,id), MM, tol, maxitn, & slambda(1:Ndof,id), rnorm, reqdits, swork(1:lrwork), lrwork, iwork(1:2*Ndof+1), if11def) - cput = GETTIME + cput = MPI_WTIME() select case( if11def ) ! !12345678901234567 case( 0 ) ; if( Wtr00ab ) write(ounit,1020) cput-cpus, myid, lvol, innout, id, if11def, cput-lcpu, "solved sparse ; ", slambda(Ndof,id), reqdits, rnorm case( 1 ) ; write(ounit,1020) cput-cpus, myid, lvol, innout, id, if11def, cput-lcpu, "input error ; ", zero , reqdits, rnorm @@ -544,7 +701,13 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi case( 4 ) ; write(ounit,1020) cput-cpus, myid, lvol, innout, id, if11def, cput-lcpu, "reqd acc. fail ; ", zero , reqdits, rnorm case( 5 ) ; write(ounit,1020) cput-cpus, myid, lvol, innout, id, if11def, cput-lcpu, "reqd acc. fail ; ", zero , reqdits, rnorm case( 6 ) ; write(ounit,1020) cput-cpus, myid, lvol, innout, id, if11def, cput-lcpu, "serious error ; ", zero , reqdits, rnorm - case default ; FATAL( tr00ab, .true., illegal ifail returned by f11def ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned by f11def ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned by f11def ;" + endif + end select 1020 format("tr00ab : ",f10.2," ; myid=",i3," ; lvol=",i3," ; innout="i2" ; ideriv="i2" ; if11def="i2" ; time="f10.4" ; "a17,:" [d]iota="es17.09& " ; its="i6" rnorm="es13.5" ;") @@ -555,16 +718,22 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi if( Lsparse.eq.1 ) then ! use dense-solver; 24 Apr 13; - lcpu = GETTIME + lcpu = MPI_WTIME() if04atf = 1 ; IA = Ndof ; IAA = Ndof call F04ATF( rmatrix(1:Ndof,1:Ndof, 0), IA, rrhs(1:Ndof,id), Ndof, rlambda(1:Ndof,id), AA(1:IAA,1:Ndof), IAA, wks1(1:Ndof), wks2(1:Ndof), if04atf ) - cput = GETTIME + cput = MPI_WTIME() select case( if04atf ) ! !12345678901234567 case( 0 ) ; if( Wtr00ab ) write(ounit,1025) cput-cpus, myid, lvol, innout, id, if04atf, cput-lcpu, "solved real ; ", rlambda(Ndof,id) case( 1 ) ; write(ounit,1025) cput-cpus, myid, lvol, innout, id, if04atf, cput-lcpu, "singular ; " case( 2 ) ; write(ounit,1025) cput-cpus, myid, lvol, innout, id, if04atf, cput-lcpu, "ill-conditioned ;" case( 3 ) ; write(ounit,1025) cput-cpus, myid, lvol, innout, id, if04atf, cput-lcpu, "input error ; " - case default ; FATAL( tr00ab, .true., illegal ifail returned by f04atf ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned by f04atf ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned by f04atf ;" + endif + end select 1025 format("tr00ab : ",f10.2," ; myid=",i3," ; lvol=",i3," ; innout="i2" ; ideriv="i2" ; if04atf="i2" ; time="f10.4" ; "a17,:" [d]iota="es17.09" ;") ldiota(innout,ideriv) = rlambda(Ndof,id) ! return intent out; 23 Apr 13; @@ -573,7 +742,9 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi enddo ! end of do ideriv; 23 Apr 13; - DALLOCATE(swork) + + deallocate(swork,stat=astat) + endif ! end of if( Lsparse.gt.0 ); @@ -622,7 +793,13 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi !$OMP ATOMIC UPDATE dmatrix(ii ,jj ,ideriv) = dmatrix(ii ,jj ,ideriv) + ( - mj * lAze(kk,ideriv) + nj * lAte(kk,ideriv) ) * half if( NOTstellsym) then - FATAL( tr00ab,ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN, illegal subscript ) ! THIS CAN BE DELETED EVENTUALLY; + + if( ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN ) then + write(6,'("tr00ab : fatal : myid=",i3," ; ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN ; illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN : illegal subscript ;" + endif + ! THIS CAN BE DELETED EVENTUALLY; !$OMP ATOMIC UPDATE dmatrix(ii+mns-1,jj ,ideriv) = dmatrix(ii+mns-1,jj ,ideriv) + ( - mj * lAzo(kk,ideriv) + nj * lAto(kk,ideriv) ) * half !$OMP ATOMIC UPDATE @@ -640,11 +817,23 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi if( ii.lt.1 ) cycle - FATAL( tr00ab,ii.gt.NN .or. jj.gt.NN, illegal subscript ) ! THIS CAN BE DELETED EVENTUALLY; 02 Sep 14; + + if( ii.gt.NN .or. jj.gt.NN ) then + write(6,'("tr00ab : fatal : myid=",i3," ; ii.gt.NN .or. jj.gt.NN ; illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : ii.gt.NN .or. jj.gt.NN : illegal subscript ;" + endif + ! THIS CAN BE DELETED EVENTUALLY; 02 Sep 14; !$OMP ATOMIC UPDATE dmatrix(ii ,jj ,ideriv) = dmatrix(ii ,jj ,ideriv) + ( - mj * lAze(kk,ideriv) + nj * lAte(kk,ideriv) ) * half if( NOTstellsym) then - FATAL( tr00ab,ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN, illegal subscript ) ! THIS CAN BE DELETED EVENTUALLY; + + if( ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN ) then + write(6,'("tr00ab : fatal : myid=",i3," ; ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN ; illegal subscript ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : ii+mns-1.lt.1 .or. ii+mns-1.gt.NN .or. jj+mns-1.lt.1 .or. jj+mns-1.gt.NN : illegal subscript ;" + endif + ! THIS CAN BE DELETED EVENTUALLY; !$OMP ATOMIC UPDATE dmatrix(ii+mns-1,jj ,ideriv) = dmatrix(ii+mns-1,jj ,ideriv) + ( - mj * lAzo(kk,ideriv) + nj * lAto(kk,ideriv) ) * half * iotaksgn(kk,jj) !$OMP ATOMIC UPDATE @@ -683,10 +872,16 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi if( iflag.eq.-1) then ; call DGEMV('N',NN,NN,-one,dmatrix(1,1,-1),NN,dlambda(1,0),1,one,drhs(1,-1),1) ! BLAS version 21 Jul 19 endif case default - FATAL( tr00ab, .true., invalid jderiv ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; invalid jderiv ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : invalid jderiv ;" + endif + end select - lcpu = GETTIME ! record time taken in dgesvx; 09 Nov 17; + lcpu = MPI_WTIME() ! record time taken in dgesvx; 09 Nov 17; select case( Lsvdiota ) @@ -705,7 +900,7 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi NN, rcond, ferr, berr, work4(1:4*NN), iwork4(1:NN), idgesvx ) ; ldiota(innout, 0) = dlambda(1, 0) ! return intent out; 21 Apr 13; - ; dlambdaout(1:NN, lvol, innout) = dlambda(1:NN,0) + ; dlambdaout(1:NN, lvol, innout) = dlambda(1:NN,0) case( 1 ) ! Lsvdiota = 0; jderiv = 1; 02 Sep 14; @@ -726,20 +921,38 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi case default - FATAL( tr00ab, .true., invalid jderiv ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; invalid jderiv ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : invalid jderiv ;" + endif + end select ! end of select case jderiv; 02 Sep 14; - cput = GETTIME + cput = MPI_WTIME() select case( idgesvx ) !12345678901234567 case( 0 ) ; if( Wtr00ab ) write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgesvx", idgesvx, cput-lcpu, "solved Fourier ; ", dlambda(1,0) case( 1: ) ; write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgesvx", idgesvx, cput-lcpu, "singular ; " case( :-1 ) ; write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgesvx", idgesvx, cput-lcpu, "input error ; " - case default ; FATAL( tr00ab, .true., illegal ifail returned by dgesvx ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned by dgesvx ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned by dgesvx ;" + endif + end select - FATAL( tr00ab, idgesvx.ne.0, failed to construct straight-fieldline angle using dgesvx ) + + if( idgesvx.ne.0 ) then + write(6,'("tr00ab : fatal : myid=",i3," ; idgesvx.ne.0 ; failed to construct straight-fieldline angle using dgesvx ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : idgesvx.ne.0 : failed to construct straight-fieldline angle using dgesvx ;" + endif + case( 1 ) ! Lsvdiota = 1; use least-squares to invert linear equations that define the straight fieldline angle; 01 Jul 14; @@ -751,11 +964,19 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi Lwork = (63+8*nlvl)*NN+676 Liwork = max(1,11*NN+3*nlvl*NN) - SALLOCATE( work, (1:Lwork), zero ) + + allocate( work(1:Lwork), stat=astat ) + work(1:Lwork) = zero + if (allocated(iwork)) then - DALLOCATE(iwork) + + deallocate(iwork,stat=astat) + endif - SALLOCATE( iwork, (1:Liwork), zero ) + + allocate( iwork(1:Liwork), stat=astat ) + iwork(1:Liwork) = zero + select case( jderiv ) @@ -789,33 +1010,65 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi ldiota(innout,imupf) = dlambda(1,imupf) enddo else - FATAL( tr00ab, .true., invalid iflag ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; invalid iflag ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : invalid iflag ;" + endif + endif case default - FATAL( tr00ab, .true., invalid jderiv ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; invalid jderiv ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : invalid jderiv ;" + endif + end select ! end of select case( jderiv) ; 02 Sep 14; - DALLOCATE(work) - cput = GETTIME + deallocate(work,stat=astat) + + + cput = MPI_WTIME() select case( idgelsd ) !12345678901234567 case( 0 ) ; if( Wtr00ab) write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgelsd", idgelsd, cput-lcpu, "solved Fourier ; ", dlambda(1,0) case( :-1 ) ; write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgelsd", idgelsd, cput-lcpu, "input error ; " case( 1: ) ; write(ounit,1030) cput-cpus, myid, lvol, innout, id, "idgelsd", idgelsd, cput-lcpu, "QR failed ; " - case default ; FATAL( tr00ab, .true., illegal ifail returned by f04arf ) + case default ; + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal ifail returned by f04arf ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal ifail returned by f04arf ;" + endif + end select - FATAL( tr00ab, idgelsd.ne.0, failed to construct straight-fieldline angle using dgelsd ) + + if( idgelsd.ne.0 ) then + write(6,'("tr00ab : fatal : myid=",i3," ; idgelsd.ne.0 ; failed to construct straight-fieldline angle using dgelsd ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : idgelsd.ne.0 : failed to construct straight-fieldline angle using dgelsd ;" + endif + dmatrix(1:NN,1:NN, 0) = omatrix(1:NN,1:NN) ! original "unperturbed" matrix; 30 Jan 13; case default - FATAL( tr00ab, .true., illegal Lsvdiota ) + + if( .true. ) then + write(6,'("tr00ab : fatal : myid=",i3," ; .true. ; illegal Lsvdiota ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "tr00ab : .true. : illegal Lsvdiota ;" + endif + end select ! end of select case( Lsvdiota ) ; 02 Sep 14; @@ -833,7 +1086,7 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi if( Lsparse.eq.3 ) then ! compare estimates for rotational-transform provided by Fourier method and real-space method; - cput = GETTIME + cput = MPI_WTIME() do ideriv = -1, 2 ; id = ideriv @@ -859,30 +1112,62 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( Lsparse.eq.1 ) then - DALLOCATE(rmatrix) - DALLOCATE(rrhs) - DALLOCATE(rlambda) - DALLOCATE(wks1) - DALLOCATE(wks2) - DALLOCATE(AA) + + deallocate(rmatrix,stat=astat) + + + deallocate(rrhs,stat=astat) + + + deallocate(rlambda,stat=astat) + + + deallocate(wks1,stat=astat) + + + deallocate(wks2,stat=astat) + + + deallocate(AA,stat=astat) + endif if( Lsparse.ge.2 ) then - DALLOCATE(smatrix) - DALLOCATE(srhs) - DALLOCATE(irow) - DALLOCATE(jcol) - DALLOCATE(slambda) - DALLOCATE(istr) - DALLOCATE(iwork) + + deallocate(smatrix,stat=astat) + + + deallocate(srhs,stat=astat) + + + deallocate(irow,stat=astat) + + + deallocate(jcol,stat=astat) + + + deallocate(slambda,stat=astat) + + + deallocate(istr,stat=astat) + + + deallocate(iwork,stat=astat) + endif #endif if( Lsparse.eq.0 .or. Lsparse.eq.3 ) then - DALLOCATE( dmatrix ) - DALLOCATE( omatrix ) - DALLOCATE( FAA ) + + deallocate(dmatrix ,stat=astat) + + + deallocate(omatrix ,stat=astat) + + + deallocate(FAA ,stat=astat) + endif @@ -892,7 +1177,12 @@ subroutine tr00ab( lvol, mn, NN, Nt, Nz, iflag, ldiota ) ! construct straight-fi !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(tr00ab) + +9999 continue + cput = MPI_WTIME() + Ttr00ab = Ttr00ab + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/volume.f90 b/src/volume.F90 similarity index 85% rename from src/volume.f90 rename to src/volume.F90 index 76581dd1..b3439f5b 100644 --- a/src/volume.f90 +++ b/src/volume.F90 @@ -32,7 +32,7 @@ !> subroutine volume( lvol, vflag ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, two, four, third, quart, pi2 @@ -56,26 +56,48 @@ subroutine volume( lvol, vflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, intent(in) :: lvol +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, intent(in) :: lvol - INTEGER :: vflag, Lcurvature + integer :: vflag, Lcurvature - INTEGER :: jvol, ii, jj, kk, mi, ni, mj, nj, mk, nk, innout + integer :: jvol, ii, jj, kk, mi, ni, mj, nj, mk, nk, innout - REAL :: vol(0:1), vint(1:Ntz) + real(wp) :: vol(0:1), vint(1:Ntz) - REAL :: Rei, Roi, Zei, Zoi, Rej, Roj, Zej, Zoj, Rek, Rok, Zek, Zok + real(wp) :: Rei, Roi, Zei, Zoi, Rej, Roj, Zej, Zoj, Rek, Rok, Zek, Zok - REAL :: AA, BB, CC, DD, lss + real(wp) :: AA, BB, CC, DD, lss + + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif - BEGIN(volume) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( volume, lvol.lt.1 .or. lvol.gt.Mvol, invalid volume ) ! 15 Jan 13; + + if( lvol.lt.1 .or. lvol.gt.Mvol ) then + write(6,'("volume : fatal : myid=",i3," ; lvol.lt.1 .or. lvol.gt.Mvol ; invalid volume ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : lvol.lt.1 .or. lvol.gt.Mvol : invalid volume ;" + endif + ! 15 Jan 13; #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -113,7 +135,13 @@ subroutine volume( lvol, vflag ) vol(innout) = iRbc(1,jvol) ! 20 Jun 14; #ifdef DEBUG - FATAL( volume, dBdX%L .and. dBdX%irz.eq.1, volume does not depend on Z ) + + if( dBdX%L .and. dBdX%irz.eq.1 ) then + write(6,'("volume : fatal : myid=",i3," ; dBdX%L .and. dBdX%irz.eq.1 ; volume does not depend on Z ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : dBdX%L .and. dBdX%irz.eq.1 : volume does not depend on Z ;" + endif + #endif if( dBdX%L .and. dBdX%innout.eq.innout .and. dBdX%ii.eq.1 ) then ! compute derivative of volume; @@ -132,7 +160,13 @@ subroutine volume( lvol, vflag ) !> \f} #ifdef DEBUG - FATAL( volume, dBdX%L .and. dBdX%irz.eq.1, volume does not depend on Z for cylindrical geometry ) + + if( dBdX%L .and. dBdX%irz.eq.1 ) then + write(6,'("volume : fatal : myid=",i3," ; dBdX%L .and. dBdX%irz.eq.1 ; volume does not depend on Z for cylindrical geometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : dBdX%L .and. dBdX%irz.eq.1 : volume does not depend on Z for cylindrical geometry ;" + endif + #endif if( YESstellsym ) then @@ -169,7 +203,13 @@ subroutine volume( lvol, vflag ) if( dBdX%issym.eq.0 ) then ! stellarator-symmetric harmonic; dV/dRei ; 13 Sep 13; dvolume = dvolume + iRbc(jj,jvol) * ( djkp(jj,ii) + djkm(jj,ii) + djkp(ii,jj) + djkm(ii,jj) ) else - FATAL( volume, .true., derivatives of volume under construction ) + + if( .true. ) then + write(6,'("volume : fatal : myid=",i3," ; .true. ; derivatives of volume under construction ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : .true. : derivatives of volume under construction ;" + endif + dvolume = dvolume + iRbs(jj,jvol) * ( djkp(jj,ii) - djkm(jj,ii) + djkp(ii,jj) - djkm(ii,jj) ) ! needs to be checked; 02 Sep 14; endif endif @@ -246,7 +286,12 @@ subroutine volume( lvol, vflag ) Lcurvature = 1 lss = innout * two - one - WCALL( volume, coords, ( lvol, lss, Lcurvature, Ntz, mn ) ) + + cput = MPI_WTIME() + Tvolume = Tvolume + ( cput-cpuo ) + call coords( lvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + vint = Rij(1:Ntz,0,0) * (Zij(1:Ntz,0,0)*Rij(1:Ntz,2,0) - Zij(1:Ntz,2,0)*Rij(1:Ntz,0,0)) vol(innout) = four * sum(vint) / float(Ntz) @@ -307,7 +352,7 @@ subroutine volume( lvol, vflag ) #ifdef DEBUG if( Wvolume ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("volume : ",f10.2," : myid=",i3," ; lvol=",i3," ; vol=",2f15.10," ;")') cput-cpus, myid, lvol, vol(0:1) endif #endif @@ -317,9 +362,21 @@ subroutine volume( lvol, vflag ) case( 2 ) ; vvolume(lvol) = ( vol(1) - vol(0) ) * pi2pi2nfpquart ; dvolume = dvolume * pi2pi2nfpquart case( 3 ) ; vvolume(lvol) = ( vol(1) - vol(0) ) * pi2pi2nfpquart * third ; dvolume = dvolume * pi2pi2nfpquart * third case( 4 ) ; vvolume(lvol) = one ; dvolume = zero ! this is under construction; 04 Dec 14; - FATAL( volume, abs(pscale).gt.vsmall,need to compute volume ) + + if( abs(pscale).gt.vsmall ) then + write(6,'("volume : fatal : myid=",i3," ; abs(pscale).gt.vsmall ; need to compute volume ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : abs(pscale).gt.vsmall : need to compute volume ;" + endif + case default - FATAL( volume, .true., invalid Igeometry ) + + if( .true. ) then + write(6,'("volume : fatal : myid=",i3," ; .true. ; invalid Igeometry ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : .true. : invalid Igeometry ;" + endif + end select if( dBdX%innout.eq.0 ) dvolume = - dvolume @@ -327,13 +384,19 @@ subroutine volume( lvol, vflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( Wvolume ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("volume : ",f10.2," : myid=",i3," ; Igeometry=",i2," ; vvolume(",i3," ) =",es23.15" ;")') cput-cpus, myid, Igeometry, lvol, vvolume(lvol) endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - FATAL( volume, vflag.eq.0 .and. vvolume(lvol).lt.small, volume cannot be zero or negative ) ! 15 Jan 13; + + if( vflag.eq.0 .and. vvolume(lvol).lt.small ) then + write(6,'("volume : fatal : myid=",i3," ; vflag.eq.0 .and. vvolume(lvol).lt.small ; volume cannot be zero or negative ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "volume : vflag.eq.0 .and. vvolume(lvol).lt.small : volume cannot be zero or negative ;" + endif + ! 15 Jan 13; if( vvolume(lvol).lt.small ) then write(ounit,'("volume : ", 10x ," : myid=",i3," ; lvol=",i3," ; vvolume=",es13.5," ; volume cannot be zero or negative ;")') myid, lvol, vvolume(lvol) @@ -345,7 +408,12 @@ subroutine volume( lvol, vflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(volume) + +9999 continue + cput = MPI_WTIME() + Tvolume = Tvolume + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! diff --git a/src/wa00aa.f90 b/src/wa00aa.F90 similarity index 71% rename from src/wa00aa.f90 rename to src/wa00aa.F90 index d76edf0a..715c34e5 100644 --- a/src/wa00aa.f90 +++ b/src/wa00aa.F90 @@ -5,34 +5,34 @@ !> \brief ...todo... module laplaces - + use mod_kinds, only: wp => dp LOGICAL :: stage1 !< what is this ? LOGICAL :: exterior !< what is this ? LOGICAL :: dorm !< what is this ? - INTEGER :: Nintervals !< what is this ? - INTEGER :: Nsegments !< what is this ? - INTEGER :: IC !< what is this ? - INTEGER :: NP4 !< what is this ? - INTEGER :: NP1 !< what is this ? - INTEGER, allocatable :: icint(:) !< what is this ? - REAL :: originalalpha !< what is this ? - REAL, allocatable :: xpoly(:) !< what is this ? - REAL, allocatable :: ypoly(:) !< what is this ? - REAL, allocatable :: phi(:) !< what is this ? - REAL, allocatable :: phid(:) !< what is this ? - REAL, allocatable :: CC(:,:) !< what is this ? + integer :: Nintervals !< what is this ? + integer :: Nsegments !< what is this ? + integer :: IC !< what is this ? + integer :: NP4 !< what is this ? + integer :: NP1 !< what is this ? + integer, allocatable :: icint(:) !< what is this ? + real(wp) :: originalalpha !< what is this ? + real(wp), allocatable :: xpoly(:) !< what is this ? + real(wp), allocatable :: ypoly(:) !< what is this ? + real(wp), allocatable :: phi(:) !< what is this ? + real(wp), allocatable :: phid(:) !< what is this ? + real(wp), allocatable :: CC(:,:) !< what is this ? - INTEGER :: ilength !< what is this ? - REAL :: totallength !< what is this ? + integer :: ilength !< what is this ? + real(wp) :: totallength !< what is this ? - INTEGER :: niterations !< counter; eventually redundant; 24 Oct 12; + integer :: niterations !< counter; eventually redundant; 24 Oct 12; - INTEGER :: iangle !< angle ; eventually redundant; 24 Oct 12; + integer :: iangle !< angle ; eventually redundant; 24 Oct 12; - REAL :: Rmid !< used to define local polar coordinate; eventually redundant; 24 Oct 12; - REAL :: Zmid !< used to define local polar coordinate; eventually redundant; 24 Oct 12; + real(wp) :: Rmid !< used to define local polar coordinate; eventually redundant; 24 Oct 12; + real(wp) :: Zmid !< used to define local polar coordinate; eventually redundant; 24 Oct 12; - REAL :: alpha !< eventually redundant; 24 Oct 12; + real(wp) :: alpha !< eventually redundant; 24 Oct 12; end module laplaces @@ -47,7 +47,7 @@ end module laplaces !> !> subroutine wa00aa( iwa00aa ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, ten, pi2 @@ -71,40 +71,79 @@ subroutine wa00aa( iwa00aa ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER, parameter :: Nconstraints = 1, lengthwork = Nconstraints * ( 3*Nconstraints + 13 ) / 2 ! required for C05NBF; +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer, parameter :: Nconstraints = 1, lengthwork = Nconstraints * ( 3*Nconstraints + 13 ) / 2 ! required for C05NBF; - INTEGER :: iwa00aa, Lcurvature, Nwall, iwall, ii, ifail + integer :: iwa00aa, Lcurvature, Nwall, iwall, ii, ifail - REAL :: lss, lRZ(1:2), px, py, Rmin, Rmax, Zmin, Zmax, xtol - REAL :: rho(1:Nconstraints), fvec(1:Nconstraints), realwork(1:lengthwork) + real(wp) :: lss, lRZ(1:2), px, py, Rmin, Rmax, Zmin, Zmax, xtol + real(wp) :: rho(1:Nconstraints), fvec(1:Nconstraints), realwork(1:lengthwork) - REAL, allocatable :: RZwall(:,:) + real(wp), allocatable :: RZwall(:,:) !REAL :: phiwall external :: VacuumPhi - BEGIN(wa00aa) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! #ifdef DEBUG - FATAL( wa00aa, myid.ne.0, error ) - FATAL( wa00aa, Ntor.gt.0, presently axisymmetry is assumed but this can easily be generalized ) + + if( myid.ne.0 ) then + write(6,'("wa00aa : fatal : myid=",i3," ; myid.ne.0 ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : myid.ne.0 : error ;" + endif + + + if( Ntor.gt.0 ) then + write(6,'("wa00aa : fatal : myid=",i3," ; Ntor.gt.0 ; presently axisymmetry is assumed but this can easily be generalized ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : Ntor.gt.0 : presently axisymmetry is assumed but this can easily be generalized ;" + endif + #endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! lss = one ; Lcurvature = 0 ; Lcoordinatesingularity = .false. - WCALL( wa00aa, co01aa, ( Nvol, lss, Lcurvature, Ntz, mn ) ) ! get plasma boundary, which serves as inner boundary; 10 Apr 13; + + cput = MPI_WTIME() + Twa00aa = Twa00aa + ( cput-cpuo ) + call co01aa( Nvol, lss, Lcurvature, Ntz, mn ) + cpuo = MPI_WTIME() + ! get plasma boundary, which serves as inner boundary; 10 Apr 13; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! open(gunit, file="wall.dat", status='old', action='read', iostat=ios ) ! read polygon, which serves as outer boundary; 10 Apr 13; - FATAL( wa00aa, ios.ne.0, error opening wall.dat ) + + if( ios.ne.0 ) then + write(6,'("wa00aa : fatal : myid=",i3," ; ios.ne.0 ; error opening wall.dat ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : ios.ne.0 : error opening wall.dat ;" + endif + Nwall = 0 do @@ -114,13 +153,28 @@ subroutine wa00aa( iwa00aa ) enddo close(gunit) - SALLOCATE( RZwall, (1:2,1:Nwall), zero ) + + allocate( RZwall(1:2,1:Nwall), stat=astat ) + RZwall(1:2,1:Nwall) = zero + open(gunit,file="wall.dat",status='old',action='read',iostat=ios) - FATAL( wa00aa, ios.ne.0,error opening wall.dat ) + + if( ios.ne.0 ) then + write(6,'("wa00aa : fatal : myid=",i3," ; ios.ne.0 ; error opening wall.dat ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : ios.ne.0 : error opening wall.dat ;" + endif + read(gunit,*,iostat=ios) RZwall(1:2,1:Nwall) ! MUST GO ANTI-CLOCKWISE; LAST-POINT = FIRST POINT; - FATAL( wa00aa, ios.ne.0, error reading RZwall from wall.dat ) + + if( ios.ne.0 ) then + write(6,'("wa00aa : fatal : myid=",i3," ; ios.ne.0 ; error reading RZwall from wall.dat ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : ios.ne.0 : error reading RZwall from wall.dat ;" + endif + close(gunit) @@ -140,19 +194,37 @@ subroutine wa00aa( iwa00aa ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( xpoly, (1:Nsegments), zero ) - SALLOCATE( ypoly, (1:Nsegments), zero ) - SALLOCATE( phi , (1:Nintervals), zero ) ! must set to boundary value of phi; - SALLOCATE( phid, (1:Nintervals), zero ) ! can leave this as zero; + allocate( xpoly(1:Nsegments), stat=astat ) + xpoly(1:Nsegments) = zero + + + allocate( ypoly(1:Nsegments), stat=astat ) + ypoly(1:Nsegments) = zero + + + + allocate( phi (1:Nintervals), stat=astat ) + phi (1:Nintervals) = zero + ! must set to boundary value of phi; + + allocate( phid(1:Nintervals), stat=astat ) + phid(1:Nintervals) = zero + ! can leave this as zero; IC = Nintervals + 1 NP4 = Nintervals + 4 NP1 = Nintervals + 1 - SALLOCATE( CC , (1:IC,1:NP4), zero ) - SALLOCATE( ICINT, ( 1:NP1), zero ) + + allocate( CC (1:IC,1:NP4), stat=astat ) + CC (1:IC,1:NP4) = zero + + + allocate( ICINT( 1:NP1), stat=astat ) + ICINT( 1:NP1) = zero + iwall = 0 @@ -185,7 +257,7 @@ subroutine wa00aa( iwa00aa ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! if( Wwa00aa ) then - cput = GETTIME + cput = MPI_WTIME() do ii = 1, iwall ; write(ounit,'("wa00aa : ",f10.2," : Nsegments="i9" ; ii="i9" ; R="f15.9" ; Z="f15.9" ;")') cput-cpus, Nsegments, ii, xpoly(ii), ypoly(ii) enddo endif @@ -206,14 +278,20 @@ subroutine wa00aa( iwa00aa ) call D03EAF( stage1, exterior, dorm, Nintervals, px, py, xpoly, ypoly, Nsegments, phi, phid, alpha, CC, IC, NP4, ICINT, NP1, ifail ) - cput = GETTIME + cput = MPI_WTIME() ; ; write(ounit,'("wa00aa : ", 10x ," : ")') select case( ifail ) case( 0 ) ; write(ounit,'("wa00aa : ",f10.2," : prepared vacuum calculation; stage1="L2" ; ifail=",i3," ; success ; ")') cput-cpus, stage1, ifail case( 1 ) ; write(ounit,'("wa00aa : ",f10.2," : prepared vacuum calculation; stage1="L2" ; ifail=",i3," ; invalid tolerance ;")') cput-cpus, stage1, ifail case( 2 ) ; write(ounit,'("wa00aa : ",f10.2," : prepared vacuum calculation; stage1="L2" ; ifail=",i3," ; incorrect rank ; ")') cput-cpus, stage1, ifail case default - FATAL( wa00aa, .true., invalid ifail returned by D03EAF ) + + if( .true. ) then + write(6,'("wa00aa : fatal : myid=",i3," ; .true. ; invalid ifail returned by D03EAF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : .true. : invalid ifail returned by D03EAF ;" + endif + end select stage1 = .false. ; originalalpha = alpha @@ -238,7 +316,7 @@ subroutine wa00aa( iwa00aa ) call C05NBF( VacuumPhi, Nconstraints, rho(1:Nconstraints), fvec(1:Nconstraints), xtol, realwork(1:lengthwork), lengthwork, ifail ) - cput = GETTIME + cput = MPI_WTIME() select case( ifail ) case( :-1 ) ; write(ounit,'("wa00aa : ",f10.2," : iangle="i6" ; ifail=",i3," ; outside domain ; FAIL ; ")') cput-cpus, iangle, ifail case( 0 ) ; if( Wwa00aa ) write(ounit,'("wa00aa : ",f10.2," : iangle="i6" ; ifail=",i3," ; success ; ")') cput-cpus, iangle, ifail @@ -247,7 +325,13 @@ subroutine wa00aa( iwa00aa ) case( 3 ) ; write(ounit,'("wa00aa : ",f10.2," : iangle="i6" ; ifail=",i3," ; xtol is too small ; FAIL ;")') cput-cpus, iangle, ifail case( 4 ) ; write(ounit,'("wa00aa : ",f10.2," : iangle="i6" ; ifail=",i3," ; bad progress ; FAIL ; ")') cput-cpus, iangle, ifail case default - FATAL( wa00aa, .true., invalid ifail returned by C05NBF ) + + if( .true. ) then + write(6,'("wa00aa : fatal : myid=",i3," ; .true. ; invalid ifail returned by C05NBF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : .true. : invalid ifail returned by C05NBF ;" + endif + end select if( ifail.ne.0 ) exit @@ -267,7 +351,7 @@ subroutine wa00aa( iwa00aa ) iwa00aa = ifail - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("wa00aa : ",f10.2," : constructed outer boundary; iwa00aa=",i3," ;")') cput-cpus, iwa00aa ! 24 Oct 12; if( ifail.ne.0 ) goto 9999 @@ -282,7 +366,12 @@ subroutine wa00aa( iwa00aa ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - RETURN(wa00aa) + +9999 continue + cput = MPI_WTIME() + Twa00aa = Twa00aa + ( cput-cpuo ) + return + !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -301,7 +390,7 @@ end subroutine wa00aa !> @param fvec !> @param iflag subroutine VacuumPhi( Nconstraints, rho, fvec, iflag ) - + use mod_kinds, only: wp => dp !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! use constants, only : zero, half, one, pi2 @@ -316,14 +405,22 @@ subroutine VacuumPhi( Nconstraints, rho, fvec, iflag ) !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - INTEGER :: Nconstraints, iflag - REAL :: rho(1:Nconstraints), fvec(1:Nconstraints), angle, px, py +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + integer :: Nconstraints, iflag + real(wp) :: rho(1:Nconstraints), fvec(1:Nconstraints), angle, px, py !REAL :: phiwall - INTEGER :: ifail + integer :: ifail !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -341,14 +438,20 @@ subroutine VacuumPhi( Nconstraints, rho, fvec, iflag ) call D03EAF( stage1, exterior, dorm, Nintervals, px, py, xpoly, ypoly, Nsegments, phi, phid, alpha, CC, IC, NP4, ICINT, NP1, ifail ) - cput = GETTIME + cput = MPI_WTIME() select case( ifail ) case( 0 ) ; if( Wwa00aa ) write(ounit,'("wa00aa : ",f10.2," : stage1="L2" ; ifail=",i3," ; success ; ")') cput-cpus, stage1, ifail case( 1 ) ; write(ounit,'("wa00aa : ",f10.2," : stage1="L2" ; ifail=",i3," ; invalid tolerance ;")') cput-cpus, stage1, ifail case( 2 ) ; write(ounit,'("wa00aa : ",f10.2," : stage1="L2" ; ifail=",i3," ; incorrect rank ; ")') cput-cpus, stage1, ifail case default - FATAL( wa00aa, .true., invalid ifail returned by D03EAF ) + + if( .true. ) then + write(6,'("wa00aa : fatal : myid=",i3," ; .true. ; invalid ifail returned by D03EAF ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "wa00aa : .true. : invalid ifail returned by D03EAF ;" + endif + end select if( rho(1).lt.zero ) iflag = -1 ! could also check that R, Z are within domain; diff --git a/src/xspech.f90 b/src/xspech.F90 similarity index 78% rename from src/xspech.f90 rename to src/xspech.F90 index 05fbcc0a..588a88fd 100644 --- a/src/xspech.f90 +++ b/src/xspech.F90 @@ -20,7 +20,7 @@ end program spec_main !>
  • write the output file(s)
  • !> subroutine xspech - + use mod_kinds, only: wp => dp use numerical use allglobal, only: set_mpi_comm, myid, ncpu, cpus, version, MPI_COMM_SPEC, & wrtend, read_inputlists_from_file, check_inputs, broadcast_inputs, skip_write, & @@ -33,32 +33,48 @@ subroutine xspech hdfint, finish_outfile, write_grid use cputiming, only: Txspech - LOCALS - CHARACTER :: ldate*8, ltime*10, arg*100 +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + character :: ldate*8, ltime*10, arg*100 #ifdef DEBUG character(len=255) :: hostname integer :: iwait, pid, status - INTEGER, external :: getpid, hostnm + integer, external :: getpid, hostnm #endif call MPI_INIT( ierr ) - BEGIN(xspech) + + cpui = MPI_WTIME() + cpuo = cpui +#ifdef OPENMP + nthreads = omp_get_max_threads() +#else + nthreads = 1 +#endif + ! set default communicator to MPI_COMM_WORLD call set_mpi_comm(MPI_COMM_WORLD) ! set initial time - cpus = GETTIME + cpus = MPI_WTIME() cpuo = cpus ! explicitly enable writing of HDF5 output file skip_write = .false. ! print header: version of SPEC, compilation info, current date and time, machine precision - cput = GETTIME + cput = MPI_WTIME() if( myid.eq.0 ) then ! screen output header @@ -175,12 +191,14 @@ subroutine xspech call MPI_Barrier(MPI_COMM_SPEC, ierr) if (myid.eq.0) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("xspech : ", 10x ," :")') write(ounit,'("xspech : ",f10.2," : myid=",i3," : time="f8.2"m = "f6.2"h = "f5.2"d ;")') cput-cpus, myid, (cput-cpus) / (/ 60, 60*60, 24*60*60 /) endif - MPIFINALIZE + + call MPI_FINALIZE(ierr) + stop @@ -201,21 +219,29 @@ end subroutine xspech !> !> subroutine read_command_args - + use mod_kinds, only: wp => dp use fileunits, only: ounit use inputlist, only: Wreadin use allglobal, only: cpus, myid, ext, MPI_COMM_SPEC, write_spec_namelist - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL :: Lspexist - INTEGER :: iargc, iarg, numargs, extlen, sppos + integer :: iargc, iarg, numargs, extlen, sppos - CHARACTER(len=100) :: arg + character(len=255) :: arg if (myid.eq.0) then - cput = GETTIME + cput = MPI_WTIME() ! first command-line argument is likely ext or ext.sp call getarg( 1, arg ) @@ -231,10 +257,12 @@ subroutine read_command_args write(ounit,'("rdcmdl : ", 10x ," : Additional arguments:")') write(ounit,'("rdcmdl : ", 10x ," : -readin : print debugging information during reading inputs")') call MPI_ABORT( MPI_COMM_SPEC, 0, ierr ) + stop case ("-i", "--init") write(ounit,'("rdcmdl : ", 10x ," : write a template input file in example.sp")') call write_spec_namelist() call MPI_ABORT( MPI_COMM_SPEC, 0, ierr ) + stop case default extlen = len_trim(arg) sppos = index(arg, ".sp", .true.) ! search for ".sp" from the back of ext @@ -275,7 +303,7 @@ end subroutine read_command_args !> and within each Picard iteration, the fixed-boundary problem !> is solved (also iteratively). subroutine spec - + use mod_kinds, only: wp => dp use constants, only : zero, one, pi2, mu0 use numerical, only : vsmall, logtolerance @@ -321,25 +349,42 @@ subroutine spec !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + LOGICAL :: LComputeDerivatives, LContinueFreeboundaryIterations, exist, LupdateBn, LComputeAxis - INTEGER :: imn, lmn, lNfp, lim, lin, ii, ideriv, stat - INTEGER :: vvol, ifail, wflag, iflag, vflag - REAL :: rflag, lastcpu, bnserr, lRwc, lRws, lZwc, lZws, lItor, lGpol, lgBc, lgBs - REAL, allocatable :: position(:), gradient(:) - CHARACTER :: pack - INTEGER :: Lfindzero_old, mfreeits_old - REAL :: gBnbld_old - INTEGER :: lnPtrj, numTrajTotal + integer :: imn, lmn, lNfp, lim, lin, ii, ideriv, stat + integer :: vvol, ifail, wflag, iflag, vflag + real(wp) :: rflag, lastcpu, bnserr, lRwc, lRws, lZwc, lZws, lItor, lGpol, lgBc, lgBs + real(wp), allocatable :: position(:), gradient(:) + character :: pack + integer :: Lfindzero_old, mfreeits_old + real(wp) :: gBnbld_old + integer :: lnPtrj, numTrajTotal !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - cpuo = GETTIME + cpuo = MPI_WTIME() + + + if( NGdof.lt.0 ) then + write(6,'("xspech : fatal : myid=",i3," ; NGdof.lt.0 ; counting error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : NGdof.lt.0 : counting error ;" + endif - FATAL( xspech, NGdof.lt.0, counting error ) - SALLOCATE( position, (0:NGdof), zero ) ! position ; NGdof = #geometrical degrees-of-freedom was computed in preset; + + allocate( position(0:NGdof), stat=astat ) + position(0:NGdof) = zero + ! position ; NGdof = #geometrical degrees-of-freedom was computed in preset; !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -382,8 +427,13 @@ subroutine spec pack = 'P' LComputeAxis = .true. - WCALL( xspech, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) ) + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) + cpuo = MPI_WTIME() + endif @@ -401,9 +451,24 @@ subroutine spec do vvol = 1, Mvol - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + vflag = 0 - WCALL( xspech, volume, ( vvol, vflag ) ) ! compute volume; + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call volume( vvol, vflag ) + cpuo = MPI_WTIME() + ! compute volume; if( Ladiabatic.eq.0 ) adiabatic(vvol) = pressure(vvol) * vvolume(vvol)**gamma ! initialize adiabatic constants using supplied pressure profile; @@ -413,7 +478,7 @@ subroutine spec endif if( Wxspech .and. myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("xspech : ",f10.2," : myid=",i3," ; adiabatic constants = "999es13.5)') cput-cpus, myid, adiabatic(1:Mvol) endif @@ -446,14 +511,24 @@ subroutine spec ! This is the call to do one fixed-boundary iteration (by a Newton method). ifail = 1 - WCALL( xspech, newton, ( NGdof, position(0:NGdof), ifail ) ) + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call newton( NGdof, position(0:NGdof), ifail ) + cpuo = MPI_WTIME() + endif pack = 'U' ! unpack geometrical degrees of freedom; 13 Sep 13; LComputeAxis = .true. - WCALL( xspech, packxi, ( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & - iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) ) + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call packxi( NGdof, position(0:NGdof), Mvol, mn, iRbc(1:mn,0:Mvol), iZbs(1:mn,0:Mvol), & + iRbs(1:mn,0:Mvol), iZbc(1:mn,0:Mvol), pack, .false., LComputeAxis ) + cpuo = MPI_WTIME() + endif @@ -475,7 +550,13 @@ subroutine spec #ifdef DEBUG do vvol = 1, Mvol - FATAL( xspech, vvolume(vvol).lt.vsmall, error dividing adiabatic by volume ) + + if( vvolume(vvol).lt.vsmall ) then + write(6,'("xspech : fatal : myid=",i3," ; vvolume(vvol).lt.vsmall ; error dividing adiabatic by volume ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : vvolume(vvol).lt.vsmall : error dividing adiabatic by volume ;" + endif + enddo #endif @@ -483,36 +564,70 @@ subroutine spec !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - SALLOCATE( gradient, (0:NGdof), zero ) - lastcpu = GETTIME + allocate( gradient(0:NGdof), stat=astat ) + gradient(0:NGdof) = zero + + + lastcpu = MPI_WTIME() LComputeDerivatives = .false. LComputeAxis = .true. ! vvol = Mvol ; ideriv = 0 ; ii = 1 ! write(ounit,'("xspech : ", 10x ," : sum(Ate(",i3,",",i2,",",i2,")%s) =",99es23.15)') vvol, ideriv, ii, sum(Ate(vvol,ideriv,ii)%s(0:Lrad(vvol))) - WCALL( xspech, dforce, ( NGdof, position(0:NGdof), gradient(0:NGdof), LComputeDerivatives, LComputeAxis) ) ! (re-)calculate Beltrami fields; - DALLOCATE(gradient) + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call dforce( NGdof, position(0:NGdof), gradient(0:NGdof), LComputeDerivatives, LComputeAxis) + cpuo = MPI_WTIME() + ! (re-)calculate Beltrami fields; + + + deallocate(gradient,stat=astat) + #ifdef DEBUG do vvol = 1, Mvol-1 - ; FATAL( xspech, BBe(vvol).lt.logtolerance, underflow ) + ; + if( BBe(vvol).lt.logtolerance ) then + write(6,'("xspech : fatal : myid=",i3," ; BBe(vvol).lt.logtolerance ; underflow ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : BBe(vvol).lt.logtolerance : underflow ;" + endif + if( Igeometry.eq.3 ) then ! include spectral constraints; 04 Dec 14; - ;FATAL( xspech, IIo(vvol).lt.logtolerance, underflow ) + ; + if( IIo(vvol).lt.logtolerance ) then + write(6,'("xspech : fatal : myid=",i3," ; IIo(vvol).lt.logtolerance ; underflow ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : IIo(vvol).lt.logtolerance : underflow ;" + endif + endif if( NOTstellsym ) then - ;FATAL( xspech, BBo(vvol).lt.logtolerance, underflow ) + ; + if( BBo(vvol).lt.logtolerance ) then + write(6,'("xspech : fatal : myid=",i3," ; BBo(vvol).lt.logtolerance ; underflow ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : BBo(vvol).lt.logtolerance : underflow ;" + endif + if( Igeometry.eq.3 ) then ! include spectral constraints; 04 Dec 14; - FATAL( xspech, IIe(vvol).lt.logtolerance, underflow ) + + if( IIe(vvol).lt.logtolerance ) then + write(6,'("xspech : fatal : myid=",i3," ; IIe(vvol).lt.logtolerance ; underflow ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : IIe(vvol).lt.logtolerance : underflow ;" + endif + endif endif enddo #endif if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,1000) cput-cpus, nfreeboundaryiterations, ForceErr, cput-lastcpu, "|BB|e", alog10(BBe(1:min(Mvol-1,28))) if( Igeometry.ge.3 ) then ! include spectral constraints; 04 Dec 14; write(ounit,1001) "|II|o", alog10(IIo(1:min(Mvol-1,28))) @@ -533,12 +648,17 @@ subroutine spec if( Lcheck.eq.5 .or. LHevalues .or. LHevectors .or. LHmatrix .or. Lperturbed.eq.1 ) then ! check construction of Hessian; 01 Jul 14; if( myid.eq.0 ) then - cput = GETTIME + cput = MPI_WTIME() write(ounit,'("xspech : ", 10x ," : -------------------Stability Evaluations------------------ ")') write(ounit,'("xspech : ",f10.2," : myid=",i3," ; calling hessian; see .ext.hessian.myid ;")') cput-cpus, myid endif - WCALL( xspech, hesian, ( NGdof, position(0:NGdof), Mvol, mn, LGdof ) ) + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call hesian( NGdof, position(0:NGdof), Mvol, mn, LGdof ) + cpuo = MPI_WTIME() + endif ! end of if( Lcheck.eq.5 ) ; 01 Jul 14; @@ -610,9 +730,14 @@ subroutine spec Mvol = Nvol + Lfreebound - lastcpu = GETTIME + lastcpu = MPI_WTIME() + - WCALL( xspech, bnorml, ( mn, Ntz, efmn(1:mn), ofmn(1:mn) ) ) ! compute normal field etc. on computational boundary; + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call bnorml( mn, Ntz, efmn(1:mn), ofmn(1:mn) ) + cpuo = MPI_WTIME() + ! compute normal field etc. on computational boundary; !FATAL( xspech, mn-1.le.0, divide by zero ) @@ -635,7 +760,13 @@ subroutine spec case( -2 ) ! mfreeits = -2 ; shall set plasma normal field at computational boundary ; 24 Nov 16; inquire( file=trim(ext)//".Vn", exist=exist ) - FATAL( xspech, .not.exist, ext.Vn does not exist : cannot set vacuum field) + + if( .not.exist ) then + write(6,'("xspech : fatal : myid=",i3," ; .not.exist ; ext.Vn does not exist : cannot set vacuum field;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .not.exist : ext.Vn does not exist : cannot set vacuum field ;" + endif + if( myid.eq.0 ) then ! only myid = 0 reads in the vacuum field; 04 Jan 17; @@ -659,9 +790,13 @@ subroutine spec endif ! end of if( myid.eq.0 ) ; 07 Dec 16; - ;RlBCAST( iVns(1:mn), mn, 0 ) ! only required for ii > 1 ; + ; + call MPI_BCAST(iVns(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + ! only required for ii > 1 ; if( NOTstellsym ) then - RlBCAST( iVnc(1:mn), mn, 0 ) + + call MPI_BCAST(iVnc(1:mn),mn,MPI_DOUBLE_PRECISION,0 ,MPI_COMM_SPEC,ierr) + endif ; iBns(2:mn) = - iBns(2:mn) - iVns(2:mn) ! updated vacuum field ; 24 Nov 16; @@ -687,7 +822,13 @@ subroutine spec case( 0 ) ! mfreeits = 0 ; 09 Mar 17; - FATAL( xspech, .true., illegal mfreeits logic ) + + if( .true. ) then + write(6,'("xspech : fatal : myid=",i3," ; .true. ; illegal mfreeits logic ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .true. : illegal mfreeits logic ;" + endif + case( 1: ) ! mfreeits > 0 ; 09 Mar 17; @@ -708,7 +849,13 @@ subroutine spec case default ! Lzerovac; 09 Mar 17; - FATAL( xspech, .true., invalid Lzerovac ) + + if( .true. ) then + write(6,'("xspech : fatal : myid=",i3," ; .true. ; invalid Lzerovac ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .true. : invalid Lzerovac ;" + endif + end select ! end select case( Lzerovac ) ; 27 Feb 17; @@ -720,7 +867,7 @@ subroutine spec endif ! end of if( bnserr.gt.gBntol ) ; 24 Nov 16; - cput = GETTIME + cput = MPI_WTIME() if( myid.eq.0 ) then ; write(ounit,1003) ; ; write(ounit,1004) cput-cpus, nfreeboundaryiterations, mfreeits, gBntol, bnserr, cput-lastcpu @@ -740,10 +887,20 @@ subroutine spec !>
  • The vector potential is written to file using ra00aa() .
  • !> - WCALL( xspech, ra00aa, ('W') ) ! this writes vector potential to file; + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call ra00aa('W') + cpuo = MPI_WTIME() + ! this writes vector potential to file; if( myid.eq.0 ) then ! write restart file; note that this is inside free-boundary iteration loop; 11 Aug 14; - WCALL( xspech, wrtend ) ! write restart file; save initial input; + + cput = MPI_WTIME() + Txspech = Txspech + ( cput-cpuo ) + call wrtend + cpuo = MPI_WTIME() + ! write restart file; save initial input; endif !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! @@ -768,7 +925,7 @@ end subroutine spec !>
  • pp00aa() is called to construct the Poincare plot by field-line following.
  • !> subroutine final_diagnostics - + use mod_kinds, only: wp => dp use inputlist, only: nPtrj, nPpts, Igeometry, Lcheck, Nvol, odetol, & Isurf, Ivolume, mu, Wmacros, Ltransform use fileunits, only: ounit @@ -783,12 +940,20 @@ subroutine final_diagnostics dlambdaout, diotadxup - LOCALS + +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + integer :: iocons, llmodnp, vvol, iflag, cpu_id real :: sumI - REAL, allocatable :: Bt00(:,:,:) - REAL :: work(0:1,-1:2) + real(wp), allocatable :: Bt00(:,:,:) + real(wp) :: work(0:1,-1:2) @@ -848,10 +1013,26 @@ subroutine final_diagnostics if (IsMyVolumeValue.eq.0) then cycle elseif (IsMyVolumeValue.eq.-1) then - FATAL( xspech, .true., Unassociated volume ) + + if( .true. ) then + write(6,'("xspech : fatal : myid=",i3," ; .true. ; Unassociated volume ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .true. : Unassociated volume ;" + endif + endif - LREGION( vvol ) + + if( Igeometry.eq.1 .or. vvol .gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol .le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + call tr00ab( vvol, mn, lmns, Nt, Nz, iflag, diotadxup(0:1,-1:2, vvol) ) ! stores lambda in a global variable. enddo @@ -859,8 +1040,12 @@ subroutine final_diagnostics ! Broadcast do vvol = 1, Mvol call WhichCpuID( vvol, cpu_id ) - RlBCAST( diotadxup(0:1,-1:2,vvol), 8, cpu_id ) - RlBCAST( dlambdaout(1:lmns, vvol, 0:1), 2*lmns, cpu_id ) + + call MPI_BCAST(diotadxup(0:1,-1:2,vvol),8,MPI_DOUBLE_PRECISION,cpu_id ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(dlambdaout(1:lmns, vvol, 0:1),2*lmns,MPI_DOUBLE_PRECISION,cpu_id ,MPI_COMM_SPEC,ierr) + enddo endif @@ -869,11 +1054,24 @@ subroutine final_diagnostics !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! ! Computes the surface current at each interface for output - SALLOCATE( Bt00, (1:Mvol, 0:1, -1:2) , zero) + + allocate( Bt00(1:Mvol, 0:1, -1:2) , stat=astat ) + Bt00(1:Mvol, 0:1, -1:2) = zero + do vvol = 1, Mvol - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + do iocons = 0, 1 if( ( Lcoordinatesingularity .and. iocons.eq.0 ) .or. ( Lvacuumregion .and. iocons.eq.1 ) ) cycle @@ -893,7 +1091,9 @@ subroutine final_diagnostics IPDt(vvol) = pi2 * (Bt00(vvol+1, 0, 0) - Bt00(vvol, 1, 0)) enddo - DALLOCATE( Bt00 ) + + deallocate(Bt00 ,stat=astat) + ! Evaluate volume current sumI = 0 @@ -904,7 +1104,7 @@ subroutine final_diagnostics ! screen info about diagnostics; 20 Jun 14; if (myid.eq.0) then - cput = GETTIME + cput = MPI_WTIME() if( nPpts.gt.0 ) then write(ounit,'("xspech : ", 10x ," :")') @@ -920,11 +1120,21 @@ subroutine final_diagnostics do vvol = 1, Mvol - LREGION(vvol) + + if( Igeometry.eq.1 .or. vvol.gt.1 ) then ; Lcoordinatesingularity = .false. + else ; Lcoordinatesingularity = .true. + endif + + if( vvol.le.Nvol ) then ; Lplasmaregion = .true. + else ; Lplasmaregion = .false. + endif + + Lvacuumregion = .not.Lplasmaregion + if( myid.eq.modulo(vvol-1,ncpu) .and. myid.lt.Mvol) then ! the following is in parallel; 20 Jun 14; - if( .not.ImagneticOK(vvol) ) then ; cput = GETTIME ; write(ounit,1002) cput-cpus ; write(ounit,1002) cput-cpus, myid, vvol, ImagneticOK(vvol) ; cycle + if( .not.ImagneticOK(vvol) ) then ; cput = MPI_WTIME() ; write(ounit,1002) cput-cpus ; write(ounit,1002) cput-cpus, myid, vvol, ImagneticOK(vvol) ; cycle endif ! No need for sc00aa anymore - this is done in lbpol @@ -948,18 +1158,52 @@ subroutine final_diagnostics do vvol = 1, Mvol ; llmodnp = modulo(vvol-1,ncpu) #ifdef DEBUG - FATAL( xspech, .not.allocated(Btemn), error ) - FATAL( xspech, .not.allocated(Bzemn), error ) - FATAL( xspech, .not.allocated(Btomn), error ) - FATAL( xspech, .not.allocated(Bzomn), error ) + + if( .not.allocated(Btemn) ) then + write(6,'("xspech : fatal : myid=",i3," ; .not.allocated(Btemn) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .not.allocated(Btemn) : error ;" + endif + + + if( .not.allocated(Bzemn) ) then + write(6,'("xspech : fatal : myid=",i3," ; .not.allocated(Bzemn) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .not.allocated(Bzemn) : error ;" + endif + + + if( .not.allocated(Btomn) ) then + write(6,'("xspech : fatal : myid=",i3," ; .not.allocated(Btomn) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .not.allocated(Btomn) : error ;" + endif + + + if( .not.allocated(Bzomn) ) then + write(6,'("xspech : fatal : myid=",i3," ; .not.allocated(Bzomn) ; error ;")') myid + call MPI_ABORT( MPI_COMM_SPEC, 1, ierr ) + stop "xspech : .not.allocated(Bzomn) : error ;" + endif + #endif - RlBCAST( Btemn(1:mn,0:1,vvol), mn*2, llmodnp ) ! this is computed in lbpol; 07 Dec 16; - RlBCAST( Bzemn(1:mn,0:1,vvol), mn*2, llmodnp ) - RlBCAST( Btomn(1:mn,0:1,vvol), mn*2, llmodnp ) - RlBCAST( Bzomn(1:mn,0:1,vvol), mn*2, llmodnp ) - RlBCAST( beltramierror(vvol,1:9), 9, llmodnp ) ! this is computed in jo00aa; 21 Aug 18; + call MPI_BCAST(Btemn(1:mn,0:1,vvol),mn*2,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + ! this is computed in lbpol; 07 Dec 16; + + call MPI_BCAST(Bzemn(1:mn,0:1,vvol),mn*2,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Btomn(1:mn,0:1,vvol),mn*2,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + call MPI_BCAST(Bzomn(1:mn,0:1,vvol),mn*2,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + + + + call MPI_BCAST(beltramierror(vvol,1:9),9,MPI_DOUBLE_PRECISION,llmodnp ,MPI_COMM_SPEC,ierr) + ! this is computed in jo00aa; 21 Aug 18; enddo ! end of do vvol = 1, Mvol; 01 Jul 14; @@ -968,7 +1212,7 @@ end subroutine final_diagnostics !> \brief Closes output files, writes screen summary. !> subroutine ending - + use mod_kinds, only: wp => dp use constants, only : zero use fileunits, only : ounit @@ -980,24 +1224,30 @@ subroutine ending use allglobal, only : myid, cpus, mn, MPI_COMM_SPEC, ext !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - LOCALS - REAL :: Ttotal, dcpu, ecpu - CHARACTER :: date*8, time*10 +#ifdef OPENMP + USE OMP_LIB +#endif + use mpi + implicit none + integer :: ierr, astat, ios, nthreads, ithread + real(wp) :: cput, cpui, cpuo=0 ! cpu time; cpu initial; cpu old; 31 Jan 13; + + + real(wp) :: Ttotal, dcpu, ecpu + character :: date*8, time*10 !-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-!-! - cpui = GETTIME ; cpuo = cpui ! see macro expansion for begin; 11 Aug 14; + cpui = MPI_WTIME() ; cpuo = cpui ! see macro expansion for begin; 11 Aug 14; #ifdef DEBUG if( Wxspech ) write(ounit,'("ending : ",f10.2," : myid=",i3," ; start ;")') cpui-cpus, myid #endif - cput = GETTIME - -! SUMTIME ! this is expanded by Makefile, and then again by macros; do not remove; + cput = MPI_WTIME() - cput = GETTIME ; dcpu = cput-cpus + cput = MPI_WTIME() ; dcpu = cput-cpus if( Ltiming .and. myid.eq.0 ) then