diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3ab301a..511c0d8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,6 +7,11 @@ on: branches: [ "master" ] workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: linux: @@ -33,6 +38,8 @@ jobs: PYNUCLEUS_BUILD_PARALLELISM: 2 OMPI_CC: ${{ matrix.c-compiler }} OMPI_CXX: ${{ matrix.cxx-compiler }} + BUILD_PRETTY_IDENTIFIER: "Linux ${{ matrix.c-compiler }} Python ${{ matrix.py-version }}" + BUILD_IDENTIFIER: "Linux-${{ matrix.c-compiler }}-${{ matrix.py-version }}" steps: - name: Check out repo @@ -45,7 +52,7 @@ jobs: uses: actions/cache/restore@v3 with: path: /home/runner/.cache/ccache - key: ccache-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }} + key: ccache-${{ env.BUILD_IDENTIFIER }} - uses: actions/setup-python@v4 if: always() @@ -77,7 +84,7 @@ jobs: GH_TOKEN: ${{ github.token }} run: | gh extension install actions/gh-actions-cache - gh actions-cache delete ccache-${{ runner.os }}-${{ matrix.c-compiler}}-${{ matrix.py-version }} --confirm + gh actions-cache delete ccache-${{ env.BUILD_IDENTIFIER }} --confirm continue-on-error: true - name: Push ccache cache @@ -85,7 +92,7 @@ jobs: uses: actions/cache/save@v3 with: path: /home/runner/.cache/ccache - key: ccache-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }} + key: ccache-${{ env.BUILD_IDENTIFIER }} - name: Ccache report if: always() @@ -93,36 +100,37 @@ jobs: - name: Run tests if: always() - run: python3 -m pytest --junit-xml=test-results-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }}.xml tests/ + run: python3 -m pytest --junit-xml=test-results-${{ env.BUILD_IDENTIFIER }}.xml tests/ - name: Run flake8 if: always() run: | make flake8 - mv flake8.xml flake8-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }}.xml + mv flake8.xml flake8-${{ env.BUILD_IDENTIFIER }}.xml - - name: Archive test results - uses: actions/upload-artifact@v3 + - name: Run cython-lint if: always() - with: - name: Test results - path: test-results-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }}.xml + run: | + make cython-lint + mv cython-lint.xml cython-lint-${{ env.BUILD_IDENTIFIER }}.xml - - name: Report test results - uses: dorny/test-reporter@v1 + - name: Archive results + uses: actions/upload-artifact@v3 if: always() with: - name: Test report (${{ runner.os }}, ${{ matrix.c-compiler }}, Python ${{ matrix.py-version }}) - path: test-results-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }}.xml - reporter: java-junit - fail-on-error: true + name: Results (${{ env.BUILD_PRETTY_IDENTIFIER }}) + path: | + test-results-${{ env.BUILD_IDENTIFIER }}.xml + flake8-${{ env.BUILD_IDENTIFIER }}.xml + cython-lint-${{ env.BUILD_IDENTIFIER }}.xml - - name: Report flake8 results + - name: Report results uses: dorny/test-reporter@v1 if: always() with: - name: Flake8 report (${{ runner.os }}, ${{ matrix.c-compiler }}, Python ${{ matrix.py-version }}) - path: flake8-${{ runner.os }}-${{ matrix.c-compiler }}-${{ matrix.py-version }}.xml + name: Report (${{ env.BUILD_PRETTY_IDENTIFIER }}) + path: | + *-${{ env.BUILD_IDENTIFIER }}.xml reporter: java-junit fail-on-error: false @@ -157,7 +165,7 @@ jobs: key: ccache-${{ runner.os }}-${{ matrix.py-version }} - name: Setup GNU Fortran - uses: modflowpy/install-gfortran-action@v1 + uses: fortran-lang/setup-fortran@v1 - uses: actions/setup-python@v4 with: @@ -173,6 +181,10 @@ jobs: if: always() run: ccache --show-config + - name: Augment path + run: | + echo "$HOME/.local/bin" >> $GITHUB_PATH + - name: Install Python dependencies run: make prereq && make prereq-extra @@ -204,18 +216,34 @@ jobs: if: always() run: python3 -m pytest --junit-xml=test-results-${{ runner.os }}-${{ matrix.py-version }}.xml tests/ - - name: Archive test results + - name: Run flake8 + if: always() + run: | + make flake8 + mv flake8.xml flake8-${{ runner.os }}-${{ matrix.py-version }}.xml + + - name: Run cython-lint + if: always() + run: | + make cython-lint + mv cython-lint.xml cython-lint-${{ runner.os }}-${{ matrix.py-version }}.xml + + - name: Archive results uses: actions/upload-artifact@v3 if: always() with: - name: Test results - path: test-results-${{ runner.os }}-${{ matrix.py-version }}.xml + name: Results ${{ github.job }} + path: | + test-results-${{ runner.os }}-${{ matrix.py-version }}.xml + flake8-${{ runner.os }}-${{ matrix.py-version }}.xml + cython-lint-${{ runner.os }}-${{ matrix.py-version }}.xml - - name: Report test results + - name: Report results uses: dorny/test-reporter@v1 if: always() with: - name: Test report (${{ runner.os }}, Python ${{ matrix.py-version }}) - path: test-results-${{ runner.os }}-${{ matrix.py-version }}.xml + name: Report (${{ github.job }}) + path: | + *-${{ runner.os }}-${{ matrix.py-version }}.xml reporter: java-junit - fail-on-error: true + fail-on-error: false diff --git a/Dockerfile b/Dockerfile index 8ff6d0e..0b33aff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,8 +33,8 @@ ENV VIRTUAL_ENV=/pynucleus/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" WORKDIR /pynucleus -RUN make prereq && \ - make prereq-extra && \ +RUN make prereq PIP_FLAGS=--no-cache-dir && \ + make prereq-extra PIP_FLAGS=--no-cache-dir && \ make install && \ python -m pip install --no-cache-dir ipykernel && \ rm -rf build packageTools/build base/build metisCy/build fem/build multilevelSolver/build nl/build @@ -45,6 +45,8 @@ RUN echo "alias ls='ls --color=auto -FN'" >> /root/.bashrc \ # allow running MPI as root in the container # bind MPI ranks to hwthreads ENV OMPI_MCA_hwloc_base_binding_policy=hwthread \ - MPIEXEC_FLAGS=--allow-run-as-root + MPIEXEC_FLAGS=--allow-run-as-root \ + OMPI_ALLOW_RUN_AS_ROOT=1 \ + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 RUN python -m ipykernel install --name=PyNucleus diff --git a/Makefile b/Makefile index 11be666..3a73925 100644 --- a/Makefile +++ b/Makefile @@ -169,8 +169,16 @@ prereq: $(PYTHON) -m pip install $(PIP_FLAGS) $(PIP_INSTALL_FLAGS) scikit-sparse prereq-extra: - $(PYTHON) -m pip install $(PIP_FLAGS) pytest pytest-html pytest-xdist Sphinx sphinxcontrib-programoutput flake8 flake8-junit-report + $(PYTHON) -m pip install $(PIP_FLAGS) pytest pytest-html pytest-xdist Sphinx sphinxcontrib-programoutput flake8 flake8-junit-report cython-lint flake8: - $(PYTHON) -m flake8 --output-file=flake8.txt --exit-zero drivers packageTools base metisCy fem multilevelSolver nl tests + $(PYTHON) -m flake8 --output-file=flake8.txt --exit-zero drivers examples packageTools base metisCy fem multilevelSolver nl tests flake8_junit flake8.txt flake8.xml + rm flake8.txt + +cython-lint: + - cython-lint --max-line-length=160 drivers examples packageTools base metisCy fem multilevelSolver nl tests > cython-lint.txt 2>&1 + flake8_junit cython-lint.txt cython-lint.xml + rm cython-lint.txt + sed 's/name="flake8"/name="cython-lint"/g' cython-lint.xml > cython-lint2.xml + mv cython-lint2.xml cython-lint.xml diff --git a/README.rst b/README.rst index 8f8daec..94ff2aa 100644 --- a/README.rst +++ b/README.rst @@ -99,7 +99,7 @@ For example, on Ubuntu podman can be installed with Instructions for other platforms can be found here: https://podman.io/docs/installation -Once podman is installed, we can download a copy of https://github.com/sandialabs/PyNucleus/compose.yaml and save it to an empty directory. +Once podman is installed, we can download a copy of https://github.com/sandialabs/PyNucleus/blob/master/compose.yaml and save it to an empty directory. In that directory we then run .. code-block:: shell @@ -121,7 +121,7 @@ For development using PyNucleus it can be useful to launch a Jupyter notebook se podman compose up pynucleus-jupyter -and then open the access the Jupyter notebook interface at https://localhost:8889 +and then open the Jupyter notebook interface at https://localhost:8889 Spack install diff --git a/base/PyNucleus_base/LinearOperator_{SCALAR}.pxi b/base/PyNucleus_base/LinearOperator_{SCALAR}.pxi index 42708d8..c32204c 100644 --- a/base/PyNucleus_base/LinearOperator_{SCALAR}.pxi +++ b/base/PyNucleus_base/LinearOperator_{SCALAR}.pxi @@ -651,14 +651,12 @@ cdef class {SCALAR_label}Transpose_Linear_Operator({SCALAR_label}LinearOperator) cdef INDEX_t matvec(self, {SCALAR}_t[::1] x, {SCALAR}_t[::1] y) except -1: - self.A.matvecTrans(x, y) - return 0 + return self.A.matvecTrans(x, y) cdef INDEX_t matvec_no_overwrite(self, {SCALAR}_t[::1] x, {SCALAR}_t[::1] y) except -1: - self.A.matvecTrans_no_overwrite(x, y) - return 0 + return self.A.matvecTrans_no_overwrite(x, y) def isSparse(self): return self.A.isSparse() @@ -677,7 +675,10 @@ cdef class {SCALAR_label}Transpose_Linear_Operator({SCALAR_label}LinearOperator) return Bcsr def toarray(self): - return self.A.transpose().toarray() + try: + return self.A.transpose().toarray() + except AttributeError: + return np.ascontiguousarray(self.A.toarray().T) def get_diagonal(self): return np.array(self.A.diagonal, copy=False) diff --git a/base/PyNucleus_base/linear_operators.pyx b/base/PyNucleus_base/linear_operators.pyx index f19d071..163c7e1 100644 --- a/base/PyNucleus_base/linear_operators.pyx +++ b/base/PyNucleus_base/linear_operators.pyx @@ -1213,26 +1213,57 @@ cdef class sumMultiplyOperator(LinearOperator): cdef: INDEX_t i LinearOperator op + int ret op = self.ops[0] - op.matvec(x, y) + ret = op.matvec(x, y) scaleScalar(y, self.coeffs[0]) for i in range(1, self.coeffs.shape[0]): op = self.ops[i] - op.matvec(x, self.z) + ret = min(ret, op.matvec(x, self.z)) assign3(y, y, 1.0, self.z, self.coeffs[i]) - return 0 + return ret cdef INDEX_t matvec_no_overwrite(self, REAL_t[::1] x, REAL_t[::1] y) except -1: + cdef: + INDEX_t i + LinearOperator op = 0 + int ret = 0 + for i in range(self.coeffs.shape[0]): + op = self.ops[i] + ret = min(op.matvec(x, self.z), ret) + assign3(y, y, 1.0, self.z, self.coeffs[i]) + return ret + + cdef INDEX_t matvecTrans(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: cdef: INDEX_t i LinearOperator op + int ret + op = self.ops[0] + ret = op.matvecTrans(x, y) + scaleScalar(y, self.coeffs[0]) + for i in range(1, self.coeffs.shape[0]): + op = self.ops[i] + ret = min(ret, op.matvecTrans(x, self.z)) + assign3(y, y, 1.0, self.z, self.coeffs[i]) + return ret + + cdef INDEX_t matvecTrans_no_overwrite(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + cdef: + INDEX_t i + LinearOperator op + int ret = 0 for i in range(self.coeffs.shape[0]): op = self.ops[i] - op.matvec(x, self.z) + ret = min(ret, op.matvecTrans(x, self.z)) assign3(y, y, 1.0, self.z, self.coeffs[i]) - return 0 + return ret def toarray(self): return sum([c*op.toarray() for c, op in zip(self.coeffs, self.ops)]) @@ -1436,8 +1467,34 @@ cdef class multiIntervalInterpolationOperator(LinearOperator): interpolationOperator op assert self.selected != -1 op = self.ops[self.selected] - op.matvec(x, y) - return 0 + return op.matvec(x, y) + + cdef INDEX_t matvec_no_overwrite(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + cdef: + interpolationOperator op + assert self.selected != -1 + op = self.ops[self.selected] + return op.matvec_no_overwrite(x, y) + + cdef INDEX_t matvecTrans(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + cdef: + interpolationOperator op + assert self.selected != -1 + op = self.ops[self.selected] + return op.matvecTrans(x, y) + + cdef INDEX_t matvecTrans_no_overwrite(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + cdef: + interpolationOperator op + assert self.selected != -1 + op = self.ops[self.selected] + return op.matvecTrans_no_overwrite(x, y) def toarray(self): assert self.selected != -1 @@ -1521,8 +1578,25 @@ cdef class delayedConstructionOperator(LinearOperator): REAL_t[::1] x, REAL_t[::1] y) except -1: self.assure_constructed() - self.A.matvec(x, y) - return 0 + return self.A.matvec(x, y) + + cdef INDEX_t matvec_no_overwrite(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + self.assure_constructed() + return self.A.matvec_no_overwrite(x, y) + + cdef INDEX_t matvecTrans(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + self.assure_constructed() + return self.A.matvecTrans(x, y) + + cdef INDEX_t matvecTrans_no_overwrite(self, + REAL_t[::1] x, + REAL_t[::1] y) except -1: + self.assure_constructed() + return self.A.matvecTrans_no_overwrite(x, y) def toarray(self): self.assure_constructed() diff --git a/base/PyNucleus_base/utilsFem.py b/base/PyNucleus_base/utilsFem.py index 47057d5..f02e30c 100644 --- a/base/PyNucleus_base/utilsFem.py +++ b/base/PyNucleus_base/utilsFem.py @@ -646,7 +646,7 @@ def diff(self, d): result[p.label] = (p.value, d[p.label]) elif isinstance(p.value, (int, INDEX, REAL, float)): if not np.allclose(p.value, d[p.label], - rtol=rTol, atol=aTol): + rtol=rTol, atol=aTol) and not (np.isnan(p.value) and np.isnan(d[p.label])): print(p.label, p.value, d[p.label], rTol, aTol, p.rTol, p.aTol) result[p.label] = (p.value, d[p.label]) else: diff --git a/compose.yaml b/compose.yaml index 7cf4574..9ecb7db 100644 --- a/compose.yaml +++ b/compose.yaml @@ -25,7 +25,7 @@ services: # Launch with: # docker compose up - # Then open localhost:8888 in your browser + # Then open localhost:8889 in your browser pynucleus-jupyter: image: ghcr.io/sandialabs/pynucleus:latest build: . diff --git a/docs/installation.rst b/docs/installation.rst index c2df9cf..8a5d37f 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -28,7 +28,7 @@ For example, on Ubuntu podman can be installed with Instructions for other platforms can be found here: https://podman.io/docs/installation -Once podman is installed, we can download a copy of https://github.com/sandialabs/PyNucleus/compose.yaml and save it to an empty directory. +Once podman is installed, we can download a copy of https://github.com/sandialabs/PyNucleus/blob/master/compose.yaml and save it to an empty directory. In that directory we then run .. code-block:: shell @@ -50,7 +50,7 @@ For development using PyNucleus it can be useful to launch a Jupyter notebook se podman compose up pynucleus-jupyter -and then open the access the Jupyter notebook interface at https://localhost:8889 +and then open the Jupyter notebook interface at https://localhost:8889 Spack install