123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208 |
- """
- @author: Juegen Pannosch (welser project), modified by Tanja Zolotareva
- @description: Here we define a data-structure that contains arguments
- used throughout the project. Arguments (like data locations) that can differ
- from person to person are loaded from the ./.config file, arguments that should
- be the same fro everyone are defined directly in the data structure. All
- all changes in this script should be committed to git.
- """
- # -*- coding: utf-8 -*-
- import os
- import configparser
- class Configuration:
- def __init__(self,
- config_file: str = os.path.join(os.getcwd(), ".env")):
- '''
- '''
- assert isinstance(config_file, str), "the config_file must be a string"
- assert os.path.isfile(config_file), "config file was not found"
- self._parse_ini_file(config_file)
- def __getitem__(self, item):
- '''
- '''
- if item in self._config:
- return self._config[item]
- else:
- return None
- def _parse_ini_file(self, config_file: str):
- '''
- '''
- self._config = dict()
- config = configparser.ConfigParser()
- config.read(config_file)
- for key in config:
- self._config[key] = {}
- sub_config = config[key]
- for sub_key in sub_config:
- name = sub_key.upper()
- value = sub_config[sub_key]
- self._config[key][name] = value if (value != '') else None
- @property
- def labeled_history_folder(self):
- '''
- '''
- return os.path.join(self._config["LOCATIONS"]["DATA_DIR"],
- "Aufarbeitungsdaten/2018/Datenextrakt einsatzfähige Radsätze 2018")
- @property
- def unlabeled_history_yearly_folders(self):
- '''
- '''
- folders = []
- for year in ["2016", "2017", "2018"]:
- folders.append(os.path.join(self._config["LOCATIONS"]["DATA_DIR"],
- "Aufarbeitungsdaten",
- year,
- "Datenextrakt alle Radsätze {} ausgehend von der Station 110").format(year))
- return folders
- @property
- def additional_data_folder(self):
- '''
- '''
- return os.path.join(self._config["LOCATIONS"]["DATA_DIR"],
- "Info-Austausch")
- @property
- def columns_rs516(self):
- '''
- '''
- return {0: "radsatznummer",
- 1: "positionsnummer",
- 2: "status",
- 3: "taetigkeitsname",
- 4: "datum",
- 5: "presskrafdiagram_min",
- 6: "presskrafdiagram_max",
- 7: "presskrafdiagram_wert"}
- @property
- def ihs_labels(self):
- '''
- For analysis we replace replace string IHS by an integer value,
- can be useful for comparing IHS of two wheelsets
- '''
- ihs_labels = {"null": -1,
- "IS1": 0,
- "IS1L": 1,
- "IS2": 2,
- "IS3": 3}
- return ihs_labels
- @property
- def schrott_schadcodes(self):
- '''
- If during the process one of the following schadcodes is assigned,
- then the wheelset is scap and is removed from the process.
- This should correspond to aufarbeitungstyp = 2 in rs0, but if there
- was a delay (or a mistake) in the maintainance of the table
- rs0, this might not be the case. Count as scap anyway.
- '''
- schadcodes_schrott = ["RSAUS"]
- return schadcodes_schrott
- @property
- def schrott_taetigkeiten(self):
- '''
- If during the process one of the folling tätigkeiten is assigned,
- then the wheelset is scap and is removed from the process.
- This should correspond to aufarbeitungstyp = 2 in rs0 and (or)
- to assignment of a corresponding schadcode. Data might contain
- inconsistencies. If such an activity is assigned, count as scrap.
- '''
- taetigkeiten_schrott = ["RADSATZ AUSSCHEIDEN"]
- return taetigkeiten_schrott
- @property
- def status_labels(self):
- '''
- Used to uniformize the column "Status" in the table rs1,
- integer values convenient for analysis
- '''
- status_labels = {"Scheiden": 2,
- "Schlecht": 1,
- "Fertig": 0,
- "Gut": 0}
- return status_labels
- @property
- def process_stages(self):
- '''
- For machine learning predictions we divide the process into
- big stages, stages can be skipped dependeing on the IHS of the
- wheelset. We use all information geathered during the previous
- process stages to make predictions for the next stage.
- '''
- import networkx as nx
- critical_stations = {"A": [421, 110]}
- critical_stations["B"] = [130, 131]
- critical_stations["C"] = [140, 141, 142, 150]
- critical_stations["D"] = [410, 420]
- critical_stations["E"] = [510, 511, 520, 521, 535,
- 530, 531, 516, 550]
- critical_stations["F"] = [490, 480, 430, 170]
- critical_stations["G"] = [595, 190, 630]
- critical_stations["H"] = [640, 641]
- critical_stations["I"] = [650, 560]
- critical_stations["J"] = [675]
- critical_stations["K"] = [690]
- critical_stations["L"] = [710, 670]
- stages_graph = nx.DiGraph()
- for stage in critical_stations:
- stages_graph.add_node(stage, stations=critical_stations[stage])
- stages_graph.add_edge("A", "B")
- stages_graph.add_edge("B", "C")
- stages_graph.add_edge("C", "D")
- stages_graph.add_edge("D", "E")
- stages_graph.add_edge("D", "F")
- stages_graph.add_edge("E", "G")
- stages_graph.add_edge("F", "G")
- stages_graph.add_edge("G", "H")
- stages_graph.add_edge("H", "I")
- stages_graph.add_edge("I", "J")
- stages_graph.add_edge("J", "K")
- stages_graph.add_edge("K", "L")
- return stages_graph
- # singleton
- default = Configuration()
|