Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • Lennard/messdatensilo
1 result
Select Git revision
Show changes
Commits on Source (12)
......@@ -2,5 +2,6 @@
out/*
CHANGELOG.md
profiling.log
logs/log
data/data
data.zip
__pycache__/**
\ No newline at end of file
......@@ -10,7 +10,7 @@ variables:
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_PIPELINE_SOURCE == "schedule" # only run this pipeline upon a schedule event
convert:
stage: convert
......@@ -27,9 +27,6 @@ convert:
upload:
stage: upload
image: curlimages/curl:latest
script:
- echo "Compiling the code..."
- echo "Compile complete."
script:
- 'curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file data.zip "${PACKAGE_REGISTRY_URL}/data.zip"'
......
# DMS Messungen vom Silo
## Daten
- Die Daten der Messungen bis zum Vortag können unter [Veröffentlichungen/Releases](https://gitlab.cvh-server.de/Lennard/messdatensilo/-/releases/latest) heruntergeladen werden.
- Die Daten werden als `.mat` Datein gespeichert und sind jeweils von einer Woche.
## Funktionsweise
- Ein Cron Job führt nach jeden reboot and jeden Tag um 0:00 das `scripts/run.bash` skript aus.
- `run.bash` führt dann `python3 main.py` aus, welches die Daten für einen Tag sammelt, und läd diese dann im Anschluss auf gitlab.cvh-server.de hoch.
- Das `main.py` Programm liest immer die Daten von den Arduinos, mittelt diese über einen gewissen Zeitraum und speichert diese dann in `data/data` ab.
- `main.py` stoppt dann kurz vor Mitternacht, benennt `data/data` dann in `log.Jahr-Monat-Tag_Stunde.log` um und löscht die älteste Datei, falls zu viele Datein vorhanden sind.
- Der Zeitraum, die Anzahl zu behaltene Datein und weitere Parameter von dem Programm können in `config.yml` verändert werden.
- Zusätzlich werden noch weitere Log Datein geführt:
- In `logs/*` werden logs vom `python3 main.py` geschrieben.
- In `bash.log` werden logs von `run.bash` geschrieben.
\ No newline at end of file
File added
This diff is collapsed.
DataLogger:
backupCount: 70 # number of datalogs to keep
filename: data # filename for datalogs
levels: # log level for outputting to file and to stdout respectivly
- INFO
- INFO
- WARNING
InfoLogger:
backupCount: 10 # number of logs to keep
maxBytes: 100000 # size of single log in bytes
filename: log # filename for logs
levels: # log level for outputting to file and to stdout respectivly
- INFO
- WARNING
Data:
factors: [1.855, 0, 0.923, -1] # factors for the 4 dms
delta_time: 10 # time between logging data
smoothing: false # whether to smoothe the logged data
delta_time: 30 # time between logging data
"""Convert csv data into mat files to read into matlab.
Combines the files from one weak and converts it into a single '.mat' file.
Combines the files from one week and converts it into a single '.mat' file.
"""
from datetime import datetime, timedelta
......@@ -41,14 +41,14 @@ for file in files:
Path(f"{Path(__file__).parent}/out").mkdir(parents=True, exist_ok=True)
# save each week as seperate '.mat' file
# save each week as seperate '.mat' file in 'out' folder
for week_start, arr in data.items():
scipy.io.savemat(
f"{Path(__file__).parent}/out/data.{week_start}.mat",
mdict={name: column for name, column in zip(header, np.split(arr, arr.shape[1], axis=1))},
)
# zip folder
# zip 'out' folder
shutil.make_archive("data", "zip", "out")
# Update CHANGELOG.md
......
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is stored in LFS. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
Source diff could not be displayed: it is too large. Options to address this: view the blob.
......@@ -25,6 +25,7 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl
self.first = True
super().__init__(filename, when=when, interval=interval, backupCount=backupCount, atTime=atTime)
self.namer = self._namer
print(datetime.datetime.fromtimestamp(self.rolloverAt).strftime('%Y-%m-%d %H:%M:%S'))
@staticmethod
def _namer(filename: str) -> str:
......@@ -32,11 +33,8 @@ class TimedRotatingFileHandlerWithHeader(logging.handlers.TimedRotatingFileHandl
def emit(self, record):
try:
if self.shouldRollover(record) or self.first:
if self.shouldRollover(record):
self.doRollover()
if self.first and self._header:
stream = self.stream
if self._header:
stream.write(self._header + self.terminator)
else:
stream = self.stream
......@@ -63,7 +61,10 @@ def convert(data) -> str:
def get_offset() -> np.ndarray:
files = sorted(glob.glob(str(Path.joinpath(Path(__file__).parent, "data", "data.*.log"))))
"""
Try and read the last logged value from the previous datalog file and use that as an offset.
"""
files = sorted(glob.glob(str(Path.joinpath(Path(__file__).parent, "data", "log.*.log"))))
if files:
for file in files[::-1]:
......@@ -86,7 +87,10 @@ def get_offset() -> np.ndarray:
sys.excepthook = handle_exception
def setup_loggers(config: Any) -> None:
def setup_loggers(config: Any, data_folder='data', info_folder='logs') -> None:
"""
Configure the two loggers. DataLogger for logging the data and InfoLogger for logging various information.
"""
global data_logger, logger, fh
data_logger = logging.getLogger("data_logger")
data_logger.setLevel(logging.DEBUG)
......@@ -94,9 +98,9 @@ def setup_loggers(config: Any) -> None:
fh.append(
TimedRotatingFileHandlerWithHeader(
header=f"Timestamp,{','.join([f'dms{i+1}' for i in range(4)])},{','.join([f'temp{i+1}' for i in range(4)])},n",
filename=f"{Path(__file__).parent}/data/{config['DataLogger']['filename']}",
filename=f"{Path(__file__).parent}/{data_folder}/data",
when="h",
interval=25,
interval=23,
backupCount=config["DataLogger"]["backupCount"],
)
)
......@@ -112,7 +116,7 @@ def setup_loggers(config: Any) -> None:
bf = logging.Formatter("{asctime}, {levelname}, [{name}.{funcName}:{lineno}]\t{message}", datefmt=r"%Y-%m-%d %H:%M:%S", style="{")
fh.append(
logging.handlers.RotatingFileHandler(
filename=f"{Path(__file__).parent}/logs/{config['InfoLogger']['filename']}",
filename=f"{Path(__file__).parent}/{info_folder}/log",
maxBytes=config["InfoLogger"]["maxBytes"],
backupCount=config["InfoLogger"]["backupCount"],
)
......@@ -148,37 +152,32 @@ def main(config: Any) -> None:
last_write = time.time()
data = np.zeros((8,))
n = 0
recv1, recv2 = None, None
off1, off2 = None, None
while datetime.datetime.now() + datetime.timedelta(seconds=delta_time) < end_time:
try:
new_data = data.copy()
# offsets for writing data of each arduino in correct column
con1.write(1)
off1 = 0 if int(convert(con1.readline())) == 1.0 else 4
# offsets for writing data in correct column
off = 0 if int(convert(con1.readline())) == 1.0 else 4
# read data
for i in range(4):
recv1 = con1.readline()
new_data[i + off1] += float(convert(recv1))
recv1 = None
new_data[i + off] += float(convert(con1.readline()))
con2.write(2)
off2 = 4 if int(convert(con2.readline())) == 2.0 else 0
con2.write(1)
# offsets for writing data in correct column
off = 0 if int(convert(con2.readline())) == 1.0 else 4
for i in range(4):
recv2 = con2.readline()
new_data[i + off2] += float(convert(recv2))
recv2 = None
new_data[i + off] += float(convert(con2.readline()))
n += 1
data = new_data
except (TypeError, ValueError):
# may occur if no data was read over serial
logger.info(f"Didn't receive data from arduino, off1: {off1}, off2: {off2}, recv1: {recv1}, recv2: {recv2}")
logger.info("Didnt receive data from arduino", exc_info=True)
if time.time() - last_write > delta_time:
# write data
......@@ -188,7 +187,7 @@ def main(config: Any) -> None:
data = np.zeros((8,))
last_write = time.time()
fh[0].doRollover()
fh[0].doRollover() # rollover the current data log file
logger.warning("Finished")
......
......@@ -12,15 +12,18 @@ import os
import time
import threading
import traceback
from typing import Any
import serial
import serial.serialutil
import sys
import datetime
import yaml
from pathlib import Path
import numpy as np
from multiprocessing_logging import install_mp_handler
# we want to be able to log from multiple processes
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
install_mp_handler()
from main import logger, data_logger, fh, get_offset, setup_loggers
# separate logger that only stores events into a file
prof_logger = logging.getLogger("profiling")
......@@ -84,10 +87,10 @@ def read_value(connection: serial.Serial) -> bytes:
@log_profile("read")
def read(connection: serial.Serial):
for _ in range(4):
recv1 = read_value(connection)
float(convert(recv1))
def read(connection: serial.Serial, data: np.ndarray, off: int):
for i in range(4):
recv = read_value(connection)
data[i + off] += float(convert(recv))
@log_profile("write")
......@@ -96,14 +99,14 @@ def write(connection: serial.Serial):
@log_profile("offset")
def offset(connection: serial.Serial):
def offset(connection: serial.Serial) -> int:
return 0 if int(convert(connection.readline())) == 1.0 else 4
@log_profile("write_data")
def write_data(n: int):
print(f"writing data, {n}")
time.sleep(10e-3)
def write_data(data: np.ndarray, n: int, factors: np.ndarray, offsets: np.ndarray):
data_logger.info(",".join([f"{(value/n) * factors[i] - offsets[i]:.5f}" for i, value in enumerate(data)]) + f",{n}")
logger.debug("Wrote data")
def convert(data) -> str:
......@@ -111,42 +114,62 @@ def convert(data) -> str:
@log_profile("get_data")
def get_data(con1: serial.Serial, con2: serial.Serial):
def get_data(con1: serial.Serial, con2: serial.Serial) -> np.ndarray:
data = np.zeros((8,))
try:
for connection in [con1, con2]:
write(connection)
offset(connection)
read(connection)
off = offset(connection)
read(connection, data, off)
except (TypeError, ValueError):
# may occur if no data was read over serial
log_event(ph="I", ts=time_usec(), name="NoData", cat="NoData", **base_info)
print("Didn't receive data from arduino")
logger.info(f"Didn't receive data from arduino", exc_info=True)
return data
@log_profile("loop")
def loop(con1: serial.Serial, con2: serial.Serial):
def loop(con1: serial.Serial, con2: serial.Serial, factors: np.ndarray, offsets: np.ndarray):
last_write = time.time()
delta_time = 30
n = 0
data = np.zeros((8,))
while time.time() - last_write < delta_time:
get_data(con1, con2)
data += get_data(con1, con2)
n += 1
write_data(n)
write_data(data, n, factors, offsets)
@log_profile("main")
def main() -> None:
def main(config: Any) -> None:
print("Starting")
try:
Path(f"{Path(__file__).parent}/test_data").mkdir(parents=True, exist_ok=True)
Path(f"{Path(__file__).parent}/test_logs").mkdir(parents=True, exist_ok=True)
setup_loggers(config, "test_data", "test_logs")
delta_time = config["Data"]["delta_time"] # log averaged out data every n seconds
end_time = datetime.datetime.combine(datetime.date.today(), datetime.time(1, 0, 0, 0))
logger.warning("Starting")
factors: np.ndarray = np.hstack((np.array(config["Data"]["factors"]), np.ones((4,))))
offsets: np.ndarray = np.hstack((get_offset(), np.zeros((4,))))
logger.info(
f"Factors: {', '.join(f'{factor:.3f}' for factor in factors[:4])}, Offset: {', '.join(f'{offset:.3f}' for offset in offsets[:4])}"
)
with serial.Serial("/dev/ttyACM0", 9600, timeout=3) as con1, serial.Serial("/dev/ttyACM1", 9600, timeout=3) as con2:
for _ in range(50):
loop(con1, con2)
except serial.serialutil.SerialException:
print(traceback.format_exc())
print("Finished")
for _ in range(100):
loop(con1, con2, factors, offsets)
fh[0].doRollover() # rollover the current data log file
logger.warning("Finished")
if __name__ == "__main__":
main()
main(yaml.safe_load(open(f"{Path(__file__).parent}/config.yml")))
convert_log_to_trace("profiling.log", "profiling_trace.json")
This diff is collapsed.