Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/ladisk/pyuff
Browse files Browse the repository at this point in the history
  • Loading branch information
klemengit committed Dec 3, 2024
2 parents eda6a97 + 3681182 commit 9e3d952
Show file tree
Hide file tree
Showing 9 changed files with 204 additions and 97 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]

steps:
- uses: actions/checkout@v3
Expand All @@ -29,4 +29,4 @@ jobs:
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest
pytest
9 changes: 2 additions & 7 deletions README.rst
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
|pytest| |documentation| |binder|
|pytest| |documentation|

pyuff
=====
Expand Down Expand Up @@ -138,13 +138,8 @@ Or we can use support function ``prepare_58`` to prepare the dictionary for crea
ordinate_spec_data_type=12,
orddenom_spec_data_type=13)
|binder| to test the *pyuff Showcase.ipynb* online.

.. |binder| image:: http://mybinder.org/badge.svg
:target: http://mybinder.org:/repo/ladisk/pyuff
.. |pytest| image:: https://github.com/ladisk/pyuff/actions/workflows/python-package.yml/badge.svg
:target: https://github.com/ladisk/pyuff/actions
.. |documentation| image:: https://readthedocs.org/projects/pyuff/badge/?version=latest
:target: https://pyuff.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
:alt: Documentation Status
Binary file added data/MPSTD#Set001_2024_10_08_10_27_07.uff
Binary file not shown.
Binary file not shown.
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "pyuff"
version = "2.4.3"
version = "2.4.4"
authors = [{name = "Primož Čermelj, Janko Slavič", email = "[email protected]"}]
maintainers = [{name = "Janko Slavič et al.", email = "[email protected]"}]
license = "MIT"
Expand Down Expand Up @@ -40,4 +40,4 @@ documentation = "https://pyuff.readthedocs.io/en/latest/"
source = "https://github.com/ladisk/pyuff"

[tool.hatch.build.targets.sdist]
include = ["pyuff/*"]
include = ["pyuff/*"]
2 changes: 1 addition & 1 deletion pyuff/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "2.4.3"
__version__ = "2.4.4"
from .pyuff import *
from .datasets import *

209 changes: 145 additions & 64 deletions pyuff/datasets/dataset_58.py
Original file line number Diff line number Diff line change
Expand Up @@ -1021,8 +1021,18 @@ def _write58(fh, dset, mode='add', _filename=None, force_double=True):
raise Exception('Error writing data-set #58')


def _extract58(block_data):
"""Extract function at nodal DOF - data-set 58."""
def _extract58(block_data, header_only=False):
"""
Extract function at nodal DOF - data-set 58.
:param header_only: False (default). If True the header data will be
extracted, only (useful with large files).
"""





dset = {'type': 58, 'binary': 0}
try:
binary = False
Expand Down Expand Up @@ -1067,70 +1077,78 @@ def _extract58(block_data):
'z_axis_axis_units_lab']))
# Body
# split_data = ''.join(split_data[13:])
if binary:
split_data = b''.join(block_data.splitlines(True)[13:])
if dset['byte_ordering'] == 1:
bo = '<'
else:
bo = '>'
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
# single precision - 4 bytes
values = np.asarray(struct.unpack('%c%sf' % (bo, int(len(split_data) / 4)), split_data), 'd')
else:
# double precision - 8 bytes
values = np.asarray(struct.unpack('%c%sd' % (bo, int(len(split_data) / 8)), split_data), 'd')
if header_only:
# If not reading data, just set placeholders
dset['x'] = None
dset['data'] = None
else:
values = []
split_data = block_data.decode('utf-8', errors='replace').splitlines(True)[13:]
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
for line in split_data[:-1]: # '6E13.5'
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13)])
else:
line = split_data[-1]
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13) if line[13 * i:13 * (i + 1)]!=' '])
elif ((dset['ord_data_type'] == 4) or (dset['ord_data_type'] == 6)) and (dset['abscissa_spacing'] == 1):
for line in split_data: # '4E20.12'
values.extend([float(line[20 * i:20 * (i + 1)]) for i in range(len(line) // 20)])
elif (dset['ord_data_type'] == 4) and (dset['abscissa_spacing'] == 0):
for line in split_data: # 2(E13.5,E20.12)
values.extend(
[float(line[13 * (i + j) + 20 * (i):13 * (i + 1) + 20 * (i + j)]) \
for i in range(len(line) // 33) for j in [0, 1]])
elif (dset['ord_data_type'] == 6) and (dset['abscissa_spacing'] == 0):
for line in split_data: # 1E13.5,2E20.12
values.extend([float(line[0:13]), float(line[13:33]), float(line[33:53])])
else:
raise Exception('Error reading data-set #58b; not proper data case.')

values = np.asarray(values)
# values = np.asarray([float(str) for str in split_data],'d')
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 4):
# Non-complex ordinate data
if (dset['abscissa_spacing'] == 0):
# Uneven abscissa
dset['x'] = values[:-1:2].copy()
dset['data'] = values[1::2].copy()
else:
# Even abscissa
n_val = len(values)
min_val = dset['abscissa_min']
d = dset['abscissa_inc']
dset['x'] = min_val + np.arange(n_val) * d
dset['data'] = values.copy()
elif (dset['ord_data_type'] == 5) or (dset['ord_data_type'] == 6):
# Complex ordinate data
if (dset['abscissa_spacing'] == 0):
# Uneven abscissa
dset['x'] = values[:-2:3].copy()
dset['data'] = values[1:-1:3] + 1.j * values[2::3]
if binary:
try:
split_data = b''.join(block_data.splitlines(True)[13:])
if dset['byte_ordering'] == 1:
bo = '<'
else:
bo = '>'
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
# single precision - 4 bytes
values = np.asarray(struct.unpack('%c%sf' % (bo, int(len(split_data) / 4)), split_data), 'd')
else:
# double precision - 8 bytes
values = np.asarray(struct.unpack('%c%sd' % (bo, int(len(split_data) / 8)), split_data), 'd')
except:
raise Exception('Potentially wrong data format (common with binary files from some commercial softwares). Try using pyuff.fix_58b() to fix your file. For more information, see https://github.com/ladisk/pyuff/issues/61')
else:
# Even abscissa
n_val = len(values) / 2
min_val = dset['abscissa_min']
d = dset['abscissa_inc']
dset['x'] = min_val + np.arange(n_val) * d
dset['data'] = values[0:-1:2] + 1.j * values[1::2]
del values
values = []
split_data = block_data.decode('utf-8', errors='replace').splitlines(True)[13:]
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
for line in split_data[:-1]: # '6E13.5'
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13)])
else:
line = split_data[-1]
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13) if line[13 * i:13 * (i + 1)]!=' '])
elif ((dset['ord_data_type'] == 4) or (dset['ord_data_type'] == 6)) and (dset['abscissa_spacing'] == 1):
for line in split_data: # '4E20.12'
values.extend([float(line[20 * i:20 * (i + 1)]) for i in range(len(line) // 20)])
elif (dset['ord_data_type'] == 4) and (dset['abscissa_spacing'] == 0):
for line in split_data: # 2(E13.5,E20.12)
values.extend(
[float(line[13 * (i + j) + 20 * (i):13 * (i + 1) + 20 * (i + j)]) \
for i in range(len(line) // 33) for j in [0, 1]])
elif (dset['ord_data_type'] == 6) and (dset['abscissa_spacing'] == 0):
for line in split_data: # 1E13.5,2E20.12
values.extend([float(line[0:13]), float(line[13:33]), float(line[33:53])])
else:
raise Exception('Error reading data-set #58b; not proper data case.')

values = np.asarray(values)
# values = np.asarray([float(str) for str in split_data],'d')
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 4):
# Non-complex ordinate data
if (dset['abscissa_spacing'] == 0):
# Uneven abscissa
dset['x'] = values[:-1:2].copy()
dset['data'] = values[1::2].copy()
else:
# Even abscissa
n_val = len(values)
min_val = dset['abscissa_min']
d = dset['abscissa_inc']
dset['x'] = min_val + np.arange(n_val) * d
dset['data'] = values.copy()
elif (dset['ord_data_type'] == 5) or (dset['ord_data_type'] == 6):
# Complex ordinate data
if (dset['abscissa_spacing'] == 0):
# Uneven abscissa
dset['x'] = values[:-2:3].copy()
dset['data'] = values[1:-1:3] + 1.j * values[2::3]
else:
# Even abscissa
n_val = len(values) / 2
min_val = dset['abscissa_min']
d = dset['abscissa_inc']
dset['x'] = min_val + np.arange(n_val) * d
dset['data'] = values[0:-1:2] + 1.j * values[1::2]
del values
except:
raise Exception('Error reading data-set #58b')
return dset
Expand Down Expand Up @@ -1484,3 +1502,66 @@ def prepare_58(


return dataset


def fix_58b(filename,fixed_filename=None):
"""
Opens the UFF file, fixes a common formatting issue and saves the fixed file.
Specifically, it fixes the instance, when closing ' -1' of the dataset is on its own line, and not right after the data.
:param filename: filename of the UFF file to be fixed
:param filename: filename to write the fixed UFF file, if None, the fixed file will be saved as 'filename_fixed.uff'
"""

if not os.path.exists(filename):
raise Exception('Filename does not exist')
try:
# Open the file in binary read mode
with open(filename, 'rb') as fh:
data = fh.read()
except Exception as e:
raise Exception(f'Cannot access the file {filename}: {e}')
else:
try:
lines = data.splitlines(keepends=True)

# Fix 1: Adjust ending ' -1' line
if len(lines) >= 1 and lines[-1].strip() == b'-1':
if len(lines) >= 2:
# Move ' -1' up to the end of the previous line
prev_line = lines[-2].rstrip(b'\r\n')
prev_line += b' -1' + lines[-1][-1:] # Keep the newline character
lines[-2] = prev_line
lines.pop() # Remove the last line
else:
pass

# Fix 2: Adjust 'data\n -1\n -1\n data' patterns
i = 0
while i < len(lines) - 3:
if (lines[i+1].strip() == b'-1' and lines[i+2].strip() == b'-1'):
# Move ' -1' from lines[i+1] to the end of lines[i]
data_line = lines[i].rstrip(b'\r\n') # Remove newline characters
data_line += b' -1' + lines[i+1][-1:] # Add ' -1' and newline
lines[i] = data_line
del lines[i+1] # Remove the now-empty line
# Do not increment i to recheck the new line at position i
else:
i += 1 # Move to the next line

# Reassemble the data
data = b''.join(lines)


# Write the fixed data back to the file
if fixed_filename is None:
base, ext = os.path.splitext(filename)
new_filename = f"{base}_fixed{ext}" #default filename
else:
new_filename = fixed_filename #custom filename
with open(new_filename, 'wb') as fh:
fh.write(data)
print('fixed file saved as:', new_filename)
except Exception as e:
raise Exception(f'Error fixing UFF file: {filename}: {e}')

35 changes: 22 additions & 13 deletions pyuff/pyuff.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
from .datasets.dataset_15 import _write15, _extract15, get_structure_15
from .datasets.dataset_18 import _extract18, get_structure_18
from .datasets.dataset_55 import _write55, _extract55, get_structure_55
from .datasets.dataset_58 import _write58, _extract58, get_structure_58
from .datasets.dataset_58 import _write58, _extract58, get_structure_58, fix_58b
from .datasets.dataset_82 import _write82, _extract82, get_structure_82
from .datasets.dataset_151 import _write151, _extract151, get_structure_151
from .datasets.dataset_164 import _write164, _extract164, get_structure_164
Expand Down Expand Up @@ -230,17 +230,20 @@ def refresh(self):
fh.close()
return self._refreshed

def read_sets(self, setn=None):
def read_sets(self, setn=None, header_only=False):
"""
Reads sets from the list or array ``setn``. If ``setn=None``, all
sets are read (default). Sets are numbered starting at 0, ending at
n-1. The method returns a list of dset dictionaries - as
many dictionaries as there are sets. Unknown data-sets are returned
empty.
Reads sets.
The method returns a list of dset dictionaries - as many dictionaries as there are sets.
Unknown data-sets are returned empty.
User must be sure that, since the last reading/writing/refreshing,
the data has not changed by some other means than through the
UFF object.
:param setn: None(default), all sets are read in None (default).
If a number is given, then only a particular set is read.
:param header_only: False (default), if True header is read, only
This usefull for large files.
"""
dset = []
if setn is None:
Expand All @@ -257,11 +260,12 @@ def read_sets(self, setn=None):
raise Exception('Cannot read from the file: ' + self._filename)
try:
for ii in read_range:
dset.append(self._read_set(ii))
dset.append(self._read_set(ii, header_only=header_only))
except Exception as msg:
raise Exception('Error when reading ' + str(ii) + '-th data-set: ' + msg.value)
except:
raise Exception('Error when reading data-set(s)')
if hasattr(msg, 'value'):
raise Exception('Error when reading ' + str(ii) + '-th data-set: ' + msg.value)
else:
raise Exception('Error when reading data-set(s).')
if len(dset) == 1:
dset = dset[0]
return dset
Expand Down Expand Up @@ -306,13 +310,18 @@ def write_sets(self, dsets, mode='add', force_double=True):
else:
raise Exception('Unknown mode: ' + mode)

def _read_set(self, n):
def _read_set(self, n, header_only=False):
"""
Reads n-th set from UFF file.
n can be an integer between 0 and n_sets-1.
User must be sure that, since the last reading/writing/refreshing,
the data has not changed by some other means than through the
UFF object. The method returns dset dictionary.
:param header_only: False (default), if True header is read, only
This usefull for large files.
"""

dset = {}
Expand Down Expand Up @@ -351,7 +360,7 @@ def _read_set(self, n):
elif self._set_types[int(n)] == 55:
dset = _extract55(block_data)
elif self._set_types[int(n)] == 58:
dset = _extract58(block_data)
dset = _extract58(block_data, header_only=header_only)
elif self._set_types[int(n)] == 82:
dset = _extract82(block_data)
elif self._set_types[int(n)] == 151:
Expand Down
Loading

0 comments on commit 9e3d952

Please sign in to comment.