Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • main
  • latest
2 results

Target

Select target project
  • Lennard/messdatensilo
1 result
Select Git revision
  • main
  • latest
2 results
Show changes

Commits on Source 12

...@@ -2,5 +2,6 @@ ...@@ -2,5 +2,6 @@
out/* out/*
CHANGELOG.md CHANGELOG.md
profiling.log profiling.log
logs/log
data/data data/data
data.zip
__pycache__/**
\ No newline at end of file
...@@ -10,7 +10,7 @@ variables: ...@@ -10,7 +10,7 @@ variables:
workflow: workflow:
rules: rules:
- if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_PIPELINE_SOURCE == "schedule" # only run this pipeline upon a schedule event
convert: convert:
stage: convert stage: convert
...@@ -27,9 +27,6 @@ convert: ...@@ -27,9 +27,6 @@ convert:
upload: upload:
stage: upload stage: upload
image: curlimages/curl:latest image: curlimages/curl:latest
script:
- echo "Compiling the code..."
- echo "Compile complete."
script: script:
- 'curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file data.zip "${PACKAGE_REGISTRY_URL}/data.zip"' - 'curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file data.zip "${PACKAGE_REGISTRY_URL}/data.zip"'
......
# DMS Messungen vom Silo # DMS Messungen vom Silo
## Daten
- Die Daten der Messungen bis zum Vortag können unter [Veröffentlichungen/Releases](https://gitlab.cvh-server.de/Lennard/messdatensilo/-/releases/latest) heruntergeladen werden.
- Die Daten werden als `.mat` Datein gespeichert und sind jeweils von einer Woche.
## Funktionsweise
- Ein Cron Job führt nach jeden reboot and jeden Tag um 0:00 das `scripts/run.bash` skript aus.
- `run.bash` führt dann `python3 main.py` aus, welches die Daten für einen Tag sammelt, und läd diese dann im Anschluss auf gitlab.cvh-server.de hoch.
- Das `main.py` Programm liest immer die Daten von den Arduinos, mittelt diese über einen gewissen Zeitraum und speichert diese dann in `data/data` ab.
- `main.py` stoppt dann kurz vor Mitternacht, benennt `data/data` dann in `log.Jahr-Monat-Tag_Stunde.log` um und löscht die älteste Datei, falls zu viele Datein vorhanden sind.
- Der Zeitraum, die Anzahl zu behaltene Datein und weitere Parameter von dem Programm können in `config.yml` verändert werden.
- Zusätzlich werden noch weitere Log Datein geführt:
- In `logs/*` werden logs vom `python3 main.py` geschrieben.
- In `bash.log` werden logs von `run.bash` geschrieben.
\ No newline at end of file
File added
Source diff could not be displayed: it is too large. Options to address this: view the blob.
DataLogger: DataLogger:
backupCount: 70 # number of datalogs to keep backupCount: 70 # number of datalogs to keep
filename: data # filename for datalogs
levels: # log level for outputting to file and to stdout respectivly levels: # log level for outputting to file and to stdout respectivly
- INFO - INFO
- INFO - WARNING
InfoLogger: InfoLogger:
backupCount: 10 # number of logs to keep backupCount: 10 # number of logs to keep
maxBytes: 100000 # size of single log in bytes maxBytes: 100000 # size of single log in bytes
filename: log # filename for logs
levels: # log level for outputting to file and to stdout respectivly levels: # log level for outputting to file and to stdout respectivly
- INFO - INFO
- WARNING - WARNING
Data: Data:
factors: [1.855, 0, 0.923, -1] # factors for the 4 dms factors: [1.855, 0, 0.923, -1] # factors for the 4 dms
delta_time: 10 # time between logging data delta_time: 30 # time between logging data
smoothing: false # whether to smoothe the logged data
"""Convert csv data into mat files to read into matlab. """Convert csv data into mat files to read into matlab.
Combines the files from one weak and converts it into a single '.mat' file. Combines the files from one week and converts it into a single '.mat' file.
""" """
from datetime import datetime, timedelta from datetime import datetime, timedelta
...@@ -41,14 +41,14 @@ for file in files: ...@@ -41,14 +41,14 @@ for file in files:
Path(f"{Path(__file__).parent}/out").mkdir(parents=True, exist_ok=True) Path(f"{Path(__file__).parent}/out").mkdir(parents=True, exist_ok=True)
# save each week as seperate '.mat' file # save each week as seperate '.mat' file in 'out' folder
for week_start, arr in data.items(): for week_start, arr in data.items():
scipy.io.savemat( scipy.io.savemat(
f"{Path(__file__).parent}/out/data.{week_start}.mat", f"{Path(__file__).parent}/out/data.{week_start}.mat",
mdict={name: column for name, column in zip(header, np.split(arr, arr.shape[1], axis=1))}, mdict={name: column for name, column in zip(header, np.split(arr, arr.shape[1], axis=1))},
) )
# zip folder # zip 'out' folder
shutil.make_archive("data", "zip", "out") shutil.make_archive("data", "zip", "out")
# Update CHANGELOG.md # Update CHANGELOG.md
......
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
...@@ -25,6 +25,7 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl ...@@ -25,6 +25,7 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl
self.first = True self.first = True
super().__init__(filename, when=when, interval=interval, backupCount=backupCount, atTime=atTime) super().__init__(filename, when=when, interval=interval, backupCount=backupCount, atTime=atTime)
self.namer = self._namer self.namer = self._namer
print(datetime.datetime.fromtimestamp(self.rolloverAt).strftime('%Y-%m-%d %H:%M:%S'))
@staticmethod @staticmethod
def _namer(filename: str) -> str: def _namer(filename: str) -> str:
...@@ -32,11 +33,8 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl ...@@ -32,11 +33,8 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl
def emit(self, record): def emit(self, record):
try: try:
if self.shouldRollover(record) or self.first: if self.first and self._header:
if self.shouldRollover(record):
self.doRollover()
stream = self.stream stream = self.stream
if self._header:
stream.write(self._header + self.terminator) stream.write(self._header + self.terminator)
else: else:
stream = self.stream stream = self.stream
...@@ -63,7 +61,10 @@ def convert(data) -> str: ...@@ -63,7 +61,10 @@ def convert(data) -> str:
def get_offset() -> np.ndarray: def get_offset() -> np.ndarray:
files = sorted(glob.glob(str(Path.joinpath(Path(__file__).parent, "data", "data.*.log")))) """
Try and read the last logged value from the previous datalog file and use that as an offset.
"""
files = sorted(glob.glob(str(Path.joinpath(Path(__file__).parent, "data", "log.*.log"))))
if files: if files:
for file in files[::-1]: for file in files[::-1]:
...@@ -86,7 +87,10 @@ def get_offset() -> np.ndarray: ...@@ -86,7 +87,10 @@ def get_offset() -> np.ndarray:
sys.excepthook = handle_exception sys.excepthook = handle_exception
def setup_loggers(config: Any) -> None: def setup_loggers(config: Any, data_folder='data', info_folder='logs') -> None:
"""
Configure the two loggers. DataLogger for logging the data and InfoLogger for logging various information.
"""
global data_logger, logger, fh global data_logger, logger, fh
data_logger = logging.getLogger("data_logger") data_logger = logging.getLogger("data_logger")
data_logger.setLevel(logging.DEBUG) data_logger.setLevel(logging.DEBUG)
...@@ -94,9 +98,9 @@ def setup_loggers(config: Any) -> None: ...@@ -94,9 +98,9 @@ def setup_loggers(config: Any) -> None:
fh.append( fh.append(
TimedRotatingFileHandlerWithHeader( TimedRotatingFileHandlerWithHeader(
header=f"Timestamp,{','.join([f'dms{i+1}' for i in range(4)])},{','.join([f'temp{i+1}' for i in range(4)])},n", header=f"Timestamp,{','.join([f'dms{i+1}' for i in range(4)])},{','.join([f'temp{i+1}' for i in range(4)])},n",
filename=f"{Path(__file__).parent}/data/{config['DataLogger']['filename']}", filename=f"{Path(__file__).parent}/{data_folder}/data",
when="h", when="h",
interval=25, interval=23,
backupCount=config["DataLogger"]["backupCount"], backupCount=config["DataLogger"]["backupCount"],
) )
) )
...@@ -112,7 +116,7 @@ def setup_loggers(config: Any) -> None: ...@@ -112,7 +116,7 @@ def setup_loggers(config: Any) -> None:
bf = logging.Formatter("{asctime}, {levelname}, [{name}.{funcName}:{lineno}]\t{message}", datefmt=r"%Y-%m-%d %H:%M:%S", style="{") bf = logging.Formatter("{asctime}, {levelname}, [{name}.{funcName}:{lineno}]\t{message}", datefmt=r"%Y-%m-%d %H:%M:%S", style="{")
fh.append( fh.append(
logging.handlers.RotatingFileHandler( logging.handlers.RotatingFileHandler(
filename=f"{Path(__file__).parent}/logs/{config['InfoLogger']['filename']}", filename=f"{Path(__file__).parent}/{info_folder}/log",
maxBytes=config["InfoLogger"]["maxBytes"], maxBytes=config["InfoLogger"]["maxBytes"],
backupCount=config["InfoLogger"]["backupCount"], backupCount=config["InfoLogger"]["backupCount"],
) )
...@@ -148,37 +152,32 @@ def main(config: Any) -> None: ...@@ -148,37 +152,32 @@ def main(config: Any) -> None:
last_write = time.time() last_write = time.time()
data = np.zeros((8,)) data = np.zeros((8,))
n = 0 n = 0
recv1, recv2 = None, None
off1, off2 = None, None
while datetime.datetime.now() + datetime.timedelta(seconds=delta_time) < end_time: while datetime.datetime.now() + datetime.timedelta(seconds=delta_time) < end_time:
try: try:
new_data = data.copy() new_data = data.copy()
# offsets for writing data of each arduino in correct column
con1.write(1) con1.write(1)
off1 = 0 if int(convert(con1.readline())) == 1.0 else 4 # offsets for writing data in correct column
off = 0 if int(convert(con1.readline())) == 1.0 else 4
# read data # read data
for i in range(4): for i in range(4):
recv1 = con1.readline() new_data[i + off] += float(convert(con1.readline()))
new_data[i + off1] += float(convert(recv1))
recv1 = None
con2.write(2) con2.write(1)
off2 = 4 if int(convert(con2.readline())) == 2.0 else 0 # offsets for writing data in correct column
off = 0 if int(convert(con2.readline())) == 1.0 else 4
for i in range(4): for i in range(4):
recv2 = con2.readline() new_data[i + off] += float(convert(con2.readline()))
new_data[i + off2] += float(convert(recv2))
recv2 = None
n += 1 n += 1
data = new_data data = new_data
except (TypeError, ValueError): except (TypeError, ValueError):
# may occur if no data was read over serial # may occur if no data was read over serial
logger.info(f"Didn't receive data from arduino, off1: {off1}, off2: {off2}, recv1: {recv1}, recv2: {recv2}") logger.info("Didnt receive data from arduino", exc_info=True)
if time.time() - last_write > delta_time: if time.time() - last_write > delta_time:
# write data # write data
...@@ -188,7 +187,7 @@ def main(config: Any) -> None: ...@@ -188,7 +187,7 @@ def main(config: Any) -> None:
data = np.zeros((8,)) data = np.zeros((8,))
last_write = time.time() last_write = time.time()
fh[0].doRollover() fh[0].doRollover() # rollover the current data log file
logger.warning("Finished") logger.warning("Finished")
......
...@@ -12,15 +12,18 @@ import os ...@@ -12,15 +12,18 @@ import os
import time import time
import threading import threading
import traceback import traceback
from typing import Any
import serial import serial
import serial.serialutil import serial.serialutil
import sys
import datetime
import yaml
from pathlib import Path
import numpy as np
from multiprocessing_logging import install_mp_handler from multiprocessing_logging import install_mp_handler
# we want to be able to log from multiple processes from main import logger, data_logger, fh, get_offset, setup_loggers
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
install_mp_handler()
# separate logger that only stores events into a file # separate logger that only stores events into a file
prof_logger = logging.getLogger("profiling") prof_logger = logging.getLogger("profiling")
...@@ -84,10 +87,10 @@ def read_value(connection: serial.Serial) -> bytes: ...@@ -84,10 +87,10 @@ def read_value(connection: serial.Serial) -> bytes:
@log_profile("read") @log_profile("read")
def read(connection: serial.Serial): def read(connection: serial.Serial, data: np.ndarray, off: int):
for _ in range(4): for i in range(4):
recv1 = read_value(connection) recv = read_value(connection)
float(convert(recv1)) data[i + off] += float(convert(recv))
@log_profile("write") @log_profile("write")
...@@ -96,14 +99,14 @@ def write(connection: serial.Serial): ...@@ -96,14 +99,14 @@ def write(connection: serial.Serial):
@log_profile("offset") @log_profile("offset")
def offset(connection: serial.Serial): def offset(connection: serial.Serial) -> int:
return 0 if int(convert(connection.readline())) == 1.0 else 4 return 0 if int(convert(connection.readline())) == 1.0 else 4
@log_profile("write_data") @log_profile("write_data")
def write_data(n: int): def write_data(data: np.ndarray, n: int, factors: np.ndarray, offsets: np.ndarray):
print(f"writing data, {n}") data_logger.info(",".join([f"{(value/n) * factors[i] - offsets[i]:.5f}" for i, value in enumerate(data)]) + f",{n}")
time.sleep(10e-3) logger.debug("Wrote data")
def convert(data) -> str: def convert(data) -> str:
...@@ -111,42 +114,62 @@ def convert(data) -> str: ...@@ -111,42 +114,62 @@ def convert(data) -> str:
@log_profile("get_data") @log_profile("get_data")
def get_data(con1: serial.Serial, con2: serial.Serial): def get_data(con1: serial.Serial, con2: serial.Serial) -> np.ndarray:
data = np.zeros((8,))
try: try:
for connection in [con1, con2]: for connection in [con1, con2]:
write(connection) write(connection)
offset(connection) off = offset(connection)
read(connection) read(connection, data, off)
except (TypeError, ValueError): except (TypeError, ValueError):
# may occur if no data was read over serial # may occur if no data was read over serial
log_event(ph="I", ts=time_usec(), name="NoData", cat="NoData", **base_info) logger.info(f"Didn't receive data from arduino", exc_info=True)
print("Didn't receive data from arduino") return data
@log_profile("loop") @log_profile("loop")
def loop(con1: serial.Serial, con2: serial.Serial): def loop(con1: serial.Serial, con2: serial.Serial, factors: np.ndarray, offsets: np.ndarray):
last_write = time.time() last_write = time.time()
delta_time = 30 delta_time = 30
n = 0 n = 0
data = np.zeros((8,))
while time.time() - last_write < delta_time: while time.time() - last_write < delta_time:
get_data(con1, con2) data += get_data(con1, con2)
n += 1 n += 1
write_data(n) write_data(data, n, factors, offsets)
@log_profile("main") @log_profile("main")
def main() -> None: def main(config: Any) -> None:
print("Starting") print("Starting")
try: Path(f"{Path(__file__).parent}/test_data").mkdir(parents=True, exist_ok=True)
Path(f"{Path(__file__).parent}/test_logs").mkdir(parents=True, exist_ok=True)
setup_loggers(config, "test_data", "test_logs")
delta_time = config["Data"]["delta_time"] # log averaged out data every n seconds
end_time = datetime.datetime.combine(datetime.date.today(), datetime.time(1, 0, 0, 0))
logger.warning("Starting")
factors: np.ndarray = np.hstack((np.array(config["Data"]["factors"]), np.ones((4,))))
offsets: np.ndarray = np.hstack((get_offset(), np.zeros((4,))))
logger.info(
f"Factors: {', '.join(f'{factor:.3f}' for factor in factors[:4])}, Offset: {', '.join(f'{offset:.3f}' for offset in offsets[:4])}"
)
with serial.Serial("/dev/ttyACM0", 9600, timeout=3) as con1, serial.Serial("/dev/ttyACM1", 9600, timeout=3) as con2: with serial.Serial("/dev/ttyACM0", 9600, timeout=3) as con1, serial.Serial("/dev/ttyACM1", 9600, timeout=3) as con2:
for _ in range(50): for _ in range(100):
loop(con1, con2) loop(con1, con2, factors, offsets)
except serial.serialutil.SerialException:
print(traceback.format_exc()) fh[0].doRollover() # rollover the current data log file
print("Finished")
logger.warning("Finished")
if __name__ == "__main__": if __name__ == "__main__":
main() main(yaml.safe_load(open(f"{Path(__file__).parent}/config.yml")))
convert_log_to_trace("profiling.log", "profiling_trace.json") convert_log_to_trace("profiling.log", "profiling_trace.json")
Source diff could not be displayed: it is too large. Options to address this: view the blob.